gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines standard transformations which transforms a structure into
another structure. Standard transformations operate in a structure-wide manner,
rather than site-specific manner.
All transformations should inherit the AbstractTransformation ABC.
"""
import logging
from fractions import Fraction
from typing import Optional, Union
from numpy import around
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.elasticity.strain import Deformation
from pymatgen.analysis.ewald import EwaldMinimizer, EwaldSummation
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Lattice, Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.site_transformations import (
PartialRemoveSitesTransformation,
)
from pymatgen.transformations.transformation_abc import AbstractTransformation
logger = logging.getLogger(__name__)
class RotationTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
"""
def __init__(self, axis, angle, angle_in_radians=False):
"""
Args:
axis (3x1 array): Axis of rotation, e.g., [1, 0, 0]
angle (float): Angle to rotate
angle_in_radians (bool): Set to True if angle is supplied in radians.
Else degrees are assumed.
"""
self.axis = axis
self.angle = angle
self.angle_in_radians = angle_in_radians
self._symmop = SymmOp.from_axis_angle_and_translation(self.axis, self.angle, self.angle_in_radians)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Rotated Structure.
"""
s = structure.copy()
s.apply_operation(self._symmop)
return s
def __str__(self):
return "Rotation Transformation about axis " + "{} with angle = {:.4f} {}".format(
self.axis, self.angle, "radians" if self.angle_in_radians else "degrees"
)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
return RotationTransformation(self.axis, -self.angle, self.angle_in_radians)
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class OxidationStateDecorationTransformation(AbstractTransformation):
"""
This transformation decorates a structure with oxidation states.
"""
def __init__(self, oxidation_states):
"""
Args:
oxidation_states (dict): Oxidation states supplied as a dict,
e.g., {"Li":1, "O":-2}
"""
self.oxidation_states = oxidation_states
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Oxidation state decorated Structure.
"""
s = structure.copy()
s.add_oxidation_state_by_element(self.oxidation_states)
return s
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class AutoOxiStateDecorationTransformation(AbstractTransformation):
"""
This transformation automatically decorates a structure with oxidation
states using a bond valence approach.
"""
def __init__(
self,
symm_tol=0.1,
max_radius=4,
max_permutations=100000,
distance_scale_factor=1.015,
):
"""
Args:
symm_tol (float): Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius (float): Maximum radius in Angstrom used to find nearest
neighbors.
max_permutations (int): Maximum number of permutations of oxidation
states to test.
distance_scale_factor (float): A scale factor to be applied. This is
useful for scaling distances, esp in the case of
calculation-relaxed structures, which may tend to under (GGA) or
over bind (LDA). The default of 1.015 works for GGA. For
experimental structure, set this to 1.
"""
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.distance_scale_factor = distance_scale_factor
self.analyzer = BVAnalyzer(symm_tol, max_radius, max_permutations, distance_scale_factor)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Oxidation state decorated Structure.
"""
return self.analyzer.get_oxi_state_decorated_structure(structure)
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class OxidationStateRemovalTransformation(AbstractTransformation):
"""
This transformation removes oxidation states from a structure.
"""
def __init__(self):
"""
No arg needed.
"""
pass
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Non-oxidation state decorated Structure.
"""
s = structure.copy()
s.remove_oxidation_states()
return s
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class SupercellTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
"""
def __init__(self, scaling_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1))):
"""
Args:
scaling_matrix: A matrix of transforming the lattice vectors.
Defaults to the identity matrix. Has to be all integers. e.g.,
[[2,1,0],[0,3,0],[0,0,1]] generates a new structure with
lattice vectors a" = 2a + b, b" = 3b, c" = c where a, b, and c
are the lattice vectors of the original structure.
"""
self.scaling_matrix = scaling_matrix
@staticmethod
def from_scaling_factors(scale_a=1, scale_b=1, scale_c=1):
"""
Convenience method to get a SupercellTransformation from a simple
series of three numbers for scaling each lattice vector. Equivalent to
calling the normal with [[scale_a, 0, 0], [0, scale_b, 0],
[0, 0, scale_c]]
Args:
scale_a: Scaling factor for lattice direction a. Defaults to 1.
scale_b: Scaling factor for lattice direction b. Defaults to 1.
scale_c: Scaling factor for lattice direction c. Defaults to 1.
Returns:
SupercellTransformation.
"""
return SupercellTransformation([[scale_a, 0, 0], [0, scale_b, 0], [0, 0, scale_c]])
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Supercell Structure.
"""
return structure * self.scaling_matrix
def __str__(self):
return "Supercell Transformation with scaling matrix " + "{}".format(self.scaling_matrix)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Raises: NotImplementedError
"""
raise NotImplementedError()
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class SubstitutionTransformation(AbstractTransformation):
"""
This transformation substitutes species for one another.
"""
def __init__(self, species_map):
"""
Args:
species_map: A dict or list of tuples containing the species mapping in
string-string pairs. E.g., {"Li":"Na"} or [("Fe2+","Mn2+")].
Multiple substitutions can be done. Overloaded to accept
sp_and_occu dictionary E.g. {"Si: {"Ge":0.75, "C":0.25}},
which substitutes a single species with multiple species to
generate a disordered structure.
"""
self.species_map = species_map
self._species_map = dict(species_map)
for k, v in self._species_map.items():
if isinstance(v, (tuple, list)):
self._species_map[k] = dict(v)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Substituted Structure.
"""
species_map = {}
for k, v in self._species_map.items():
if isinstance(v, dict):
value = {get_el_sp(x): y for x, y in v.items()}
else:
value = get_el_sp(v)
species_map[get_el_sp(k)] = value
s = structure.copy()
s.replace_species(species_map)
return s
def __str__(self):
return "Substitution Transformation :" + ", ".join(
[str(k) + "->" + str(v) for k, v in self._species_map.items()]
)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
inverse_map = {v: k for k, v in self._species_map.items()}
return SubstitutionTransformation(inverse_map)
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class RemoveSpeciesTransformation(AbstractTransformation):
"""
Remove all occurrences of some species from a structure.
"""
def __init__(self, species_to_remove):
"""
Args:
species_to_remove: List of species to remove. E.g., ["Li", "Mn"]
"""
self.species_to_remove = species_to_remove
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Structure with species removed.
"""
s = structure.copy()
for sp in self.species_to_remove:
s.remove_species([get_el_sp(sp)])
return s
def __str__(self):
return "Remove Species Transformation :" + ", ".join(self.species_to_remove)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class PartialRemoveSpecieTransformation(AbstractTransformation):
"""
Remove fraction of specie from a structure.
Requires an oxidation state decorated structure for ewald sum to be
computed.
Given that the solution to selecting the right removals is NP-hard, there
are several algorithms provided with varying degrees of accuracy and speed.
Please see
:class:`pymatgen.transformations.site_transformations.PartialRemoveSitesTransformation`.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
ALGO_ENUMERATE = 3
def __init__(self, specie_to_remove, fraction_to_remove, algo=ALGO_FAST):
"""
Args:
specie_to_remove: Species to remove. Must have oxidation state E.g.,
"Li+"
fraction_to_remove: Fraction of specie to remove. E.g., 0.5
algo: This parameter allows you to choose the algorithm to perform
ordering. Use one of PartialRemoveSpecieTransformation.ALGO_*
variables to set the algo.
"""
self.specie_to_remove = specie_to_remove
self.fraction_to_remove = fraction_to_remove
self.algo = algo
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool/int): Boolean stating whether or not
multiple structures are returned. If return_ranked_list is
an int, that number of structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
sp = get_el_sp(self.specie_to_remove)
specie_indices = [i for i in range(len(structure)) if structure[i].species == Composition({sp: 1})]
trans = PartialRemoveSitesTransformation([specie_indices], [self.fraction_to_remove], algo=self.algo)
return trans.apply_transformation(structure, return_ranked_list)
@property
def is_one_to_many(self):
"""
Returns: True
"""
return True
def __str__(self):
spec_str = [
"Species = {}".format(self.specie_to_remove),
"Fraction to remove = {}".format(self.fraction_to_remove),
"ALGO = {}".format(self.algo),
]
return "PartialRemoveSpecieTransformation : " + ", ".join(spec_str)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
class OrderDisorderedStructureTransformation(AbstractTransformation):
"""
Order a disordered structure. The disordered structure must be oxidation
state decorated for ewald sum to be computed. No attempt is made to perform
symmetry determination to reduce the number of combinations.
Hence, attempting to performing ordering on a large number of disordered
sites may be extremely expensive. The time scales approximately with the
number of possible combinations. The algorithm can currently compute
approximately 5,000,000 permutations per minute.
Also, simple rounding of the occupancies are performed, with no attempt
made to achieve a target composition. This is usually not a problem for
most ordering problems, but there can be times where rounding errors may
result in structures that do not have the desired composition.
This second step will be implemented in the next iteration of the code.
If multiple fractions for a single species are found for different sites,
these will be treated separately if the difference is above a threshold
tolerance. currently this is .1
For example, if a fraction of .25 Li is on sites 0,1,2,3 and .5 on sites
4, 5, 6, 7 then 1 site from [0,1,2,3] will be filled and 2 sites from [4,5,6,7]
will be filled, even though a lower energy combination might be found by
putting all lithium in sites [4,5,6,7].
USE WITH CARE.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
def __init__(self, algo=ALGO_FAST, symmetrized_structures=False, no_oxi_states=False):
"""
Args:
algo (int): Algorithm to use.
symmetrized_structures (bool): Whether the input structures are
instances of SymmetrizedStructure, and that their symmetry
should be used for the grouping of sites.
no_oxi_states (bool): Whether to remove oxidation states prior to
ordering.
"""
self.algo = algo
self._all_structures = []
self.no_oxi_states = no_oxi_states
self.symmetrized_structures = symmetrized_structures
def apply_transformation(self, structure, return_ranked_list=False):
"""
For this transformation, the apply_transformation method will return
only the ordered structure with the lowest Ewald energy, to be
consistent with the method signature of the other transformations.
However, all structures are stored in the all_structures attribute in
the transformation object for easy access.
Args:
structure: Oxidation state decorated disordered structure to order
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
num_to_return = max(1, num_to_return)
if self.no_oxi_states:
structure = Structure.from_sites(structure)
for i, site in enumerate(structure):
structure[i] = {"%s0+" % k.symbol: v for k, v in site.species.items()}
equivalent_sites = []
exemplars = []
# generate list of equivalent sites to order
# equivalency is determined by sp_and_occu and symmetry
# if symmetrized structure is true
for i, site in enumerate(structure):
if site.is_ordered:
continue
for j, ex in enumerate(exemplars):
sp = ex.species
if not site.species.almost_equals(sp):
continue
if self.symmetrized_structures:
sym_equiv = structure.find_equivalent_sites(ex)
sym_test = site in sym_equiv
else:
sym_test = True
if sym_test:
equivalent_sites[j].append(i)
break
else:
equivalent_sites.append([i])
exemplars.append(site)
# generate the list of manipulations and input structure
s = Structure.from_sites(structure)
m_list = []
for g in equivalent_sites:
total_occupancy = sum([structure[i].species for i in g], Composition())
total_occupancy = dict(total_occupancy.items())
# round total occupancy to possible values
for k, v in total_occupancy.items():
if abs(v - round(v)) > 0.25:
raise ValueError("Occupancy fractions not consistent " "with size of unit cell")
total_occupancy[k] = int(round(v))
# start with an ordered structure
initial_sp = max(total_occupancy.keys(), key=lambda x: abs(x.oxi_state))
for i in g:
s[i] = initial_sp
# determine the manipulations
for k, v in total_occupancy.items():
if k == initial_sp:
continue
m = [
k.oxi_state / initial_sp.oxi_state if initial_sp.oxi_state else 0,
v,
list(g),
k,
]
m_list.append(m)
# determine the number of empty sites
empty = len(g) - sum(total_occupancy.values())
if empty > 0.5:
m_list.append([0, empty, list(g), None])
matrix = EwaldSummation(s).total_energy_matrix
ewald_m = EwaldMinimizer(matrix, m_list, num_to_return, self.algo)
self._all_structures = []
lowest_energy = ewald_m.output_lists[0][0]
num_atoms = sum(structure.composition.values())
for output in ewald_m.output_lists:
s_copy = s.copy()
# do deletions afterwards because they screw up the indices of the
# structure
del_indices = []
for manipulation in output[1]:
if manipulation[1] is None:
del_indices.append(manipulation[0])
else:
s_copy[manipulation[0]] = manipulation[1]
s_copy.remove_sites(del_indices)
if self.no_oxi_states:
s_copy.remove_oxidation_states()
self._all_structures.append(
{
"energy": output[0],
"energy_above_minimum": (output[0] - lowest_energy) / num_atoms,
"structure": s_copy.get_sorted_structure(),
}
)
if return_ranked_list:
return self._all_structures[:num_to_return]
return self._all_structures[0]["structure"]
def __str__(self):
return "Order disordered structure transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: True
"""
return True
@property
def lowest_energy_structure(self):
"""
:return: Lowest energy structure found.
"""
return self._all_structures[0]["structure"]
class PrimitiveCellTransformation(AbstractTransformation):
"""
This class finds the primitive cell of the input structure.
It returns a structure that is not necessarily orthogonalized
Author: Will Richards
"""
def __init__(self, tolerance=0.5):
"""
Args:
tolerance (float): Tolerance for each coordinate of a particular
site. For example, [0.5, 0, 0.5] in cartesian coordinates will be
considered to be on the same coordinates as [0, 0, 0] for a
tolerance of 0.5. Defaults to 0.5.
"""
self.tolerance = tolerance
def apply_transformation(self, structure):
"""
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The most primitive structure found. The returned structure is
guaranteed to have len(new structure) <= len(structure).
"""
return structure.get_primitive_structure(tolerance=self.tolerance)
def __str__(self):
return "Primitive cell transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ConventionalCellTransformation(AbstractTransformation):
"""
This class finds the conventional cell of the input structure.
"""
def __init__(self, symprec=0.01, angle_tolerance=5, international_monoclinic=True):
"""
Args:
symprec (float): tolerance as in SpacegroupAnalyzer
angle_tolerance (float): angle tolerance as in SpacegroupAnalyzer
international_monoclinic (bool): whether to use beta (True) or alpha (False)
as the non-right-angle in the unit cell
"""
self.symprec = symprec
self.angle_tolerance = angle_tolerance
self.international_monoclinic = international_monoclinic
def apply_transformation(self, structure):
"""
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The same structure in a conventional standard setting
"""
sga = SpacegroupAnalyzer(structure, symprec=self.symprec, angle_tolerance=self.angle_tolerance)
return sga.get_conventional_standard_structure(international_monoclinic=self.international_monoclinic)
def __str__(self):
return "Conventional cell transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class PerturbStructureTransformation(AbstractTransformation):
"""
This transformation perturbs a structure by a specified distance in random
directions. Used for breaking symmetries.
"""
def __init__(
self,
distance: float = 0.01,
min_distance: Optional[Union[int, float]] = None,
):
"""
Args:
distance: Distance of perturbation in angstroms. All sites
will be perturbed by exactly that distance in a random
direction.
min_distance: if None, all displacements will be equidistant. If int
or float, perturb each site a distance drawn from the uniform
distribution between 'min_distance' and 'distance'.
"""
self.distance = distance
self.min_distance = min_distance
def apply_transformation(self, structure: Structure) -> Structure:
"""
Apply the transformation.
Args:
structure: Input Structure
Returns:
Structure with sites perturbed.
"""
s = structure.copy()
s.perturb(self.distance, min_distance=self.min_distance)
return s
def __str__(self):
return "PerturbStructureTransformation : " + "Min_distance = {}".format(self.min_distance)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class DeformStructureTransformation(AbstractTransformation):
"""
This transformation deforms a structure by a deformation gradient matrix
"""
def __init__(self, deformation=((1, 0, 0), (0, 1, 0), (0, 0, 1))):
"""
Args:
deformation (array): deformation gradient for the transformation
"""
self._deform = Deformation(deformation)
self.deformation = self._deform.tolist()
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Deformed Structure.
"""
return self._deform.apply_to_structure(structure)
def __str__(self):
return "DeformStructureTransformation : " + "Deformation = {}".format(str(self.deformation))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
return DeformStructureTransformation(self._deform.inv)
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class DiscretizeOccupanciesTransformation(AbstractTransformation):
"""
Discretizes the site occupancies in a disordered structure; useful for
grouping similar structures or as a pre-processing step for order-disorder
transformations.
"""
def __init__(self, max_denominator=5, tol=None, fix_denominator=False):
"""
Args:
max_denominator:
An integer maximum denominator for discretization. A higher
denominator allows for finer resolution in the site occupancies.
tol:
A float that sets the maximum difference between the original and
discretized occupancies before throwing an error. If None, it is
set to 1 / (4 * max_denominator).
fix_denominator(bool):
If True, will enforce a common denominator for all species.
This prevents a mix of denominators (for example, 1/3, 1/4)
that might require large cell sizes to perform an enumeration.
'tol' needs to be > 1.0 in some cases.
"""
self.max_denominator = max_denominator
self.tol = tol if tol is not None else 1 / (4 * max_denominator)
self.fix_denominator = fix_denominator
def apply_transformation(self, structure):
"""
Discretizes the site occupancies in the structure.
Args:
structure: disordered Structure to discretize occupancies
Returns:
A new disordered Structure with occupancies discretized
"""
if structure.is_ordered:
return structure
species = [dict(sp) for sp in structure.species_and_occu]
for sp in species:
for k, v in sp.items():
old_occ = sp[k]
new_occ = float(Fraction(old_occ).limit_denominator(self.max_denominator))
if self.fix_denominator:
new_occ = around(old_occ * self.max_denominator) / self.max_denominator
if round(abs(old_occ - new_occ), 6) > self.tol:
raise RuntimeError("Cannot discretize structure within tolerance!")
sp[k] = new_occ
return Structure(structure.lattice, species, structure.frac_coords)
def __str__(self):
return "DiscretizeOccupanciesTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ChargedCellTransformation(AbstractTransformation):
"""
The ChargedCellTransformation applies a charge to a structure (or defect
object).
"""
def __init__(self, charge=0):
"""
Args:
charge: A integer charge to apply to the structure.
Defaults to zero. Has to be a single integer. e.g. 2
"""
self.charge = charge
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Charged Structure.
"""
s = structure.copy()
s.set_charge(self.charge)
return s
def __str__(self):
return "Structure with charge " + "{}".format(self.charge)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Raises: NotImplementedError
"""
raise NotImplementedError()
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ScaleToRelaxedTransformation(AbstractTransformation):
"""
Takes the unrelaxed and relaxed structure and applies its site and volume
relaxation to a structurally similar structures (e.g. bulk: NaCl and PbTe
(rock-salt), slab: Sc(10-10) and Mg(10-10) (hcp), GB: Mo(001) sigma 5 GB,
Fe(001) sigma 5). Useful for finding an initial guess of a set of similar
structures closer to its most relaxed state.
"""
def __init__(self, unrelaxed_structure, relaxed_structure, species_map=None):
"""
Args:
unrelaxed_structure (Structure): Initial, unrelaxed structure
relaxed_structure (Structure): Relaxed structure
species_map (dict): A dict or list of tuples containing the species mapping in
string-string pairs. The first species corresponds to the relaxed
structure while the second corresponds to the species in the
structure to be scaled. E.g., {"Li":"Na"} or [("Fe2+","Mn2+")].
Multiple substitutions can be done. Overloaded to accept
sp_and_occu dictionary E.g. {"Si: {"Ge":0.75, "C":0.25}},
which substitutes a single species with multiple species to
generate a disordered structure.
"""
# Get the ratio matrix for lattice relaxation which can be
# applied to any similar structure to simulate volumetric relaxation
relax_params = list(relaxed_structure.lattice.abc)
relax_params.extend(relaxed_structure.lattice.angles)
unrelax_params = list(unrelaxed_structure.lattice.abc)
unrelax_params.extend(unrelaxed_structure.lattice.angles)
self.params_percent_change = []
for i, p in enumerate(relax_params):
self.params_percent_change.append(relax_params[i] / unrelax_params[i])
self.unrelaxed_structure = unrelaxed_structure
self.relaxed_structure = relaxed_structure
self.species_map = species_map
def apply_transformation(self, structure):
"""
Returns a copy of structure with lattice parameters
and sites scaled to the same degree as the relaxed_structure.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
"""
if self.species_map is None:
match = StructureMatcher()
s_map = match.get_best_electronegativity_anonymous_mapping(self.unrelaxed_structure, structure)
else:
s_map = self.species_map
params = list(structure.lattice.abc)
params.extend(structure.lattice.angles)
new_lattice = Lattice.from_parameters(*[p * self.params_percent_change[i] for i, p in enumerate(params)])
species, frac_coords = [], []
for site in self.relaxed_structure:
species.append(s_map[site.specie])
frac_coords.append(site.frac_coords)
return Structure(new_lattice, species, frac_coords)
def __str__(self):
return "ScaleToRelaxedTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
| |
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import test
class AggregatesAdminV3Test(base.BaseV3ComputeAdminTest):
"""
Tests Aggregates API that require admin privileges
"""
_host_key = 'os-extended-server-attributes:host'
@classmethod
def setUpClass(cls):
super(AggregatesAdminV3Test, cls).setUpClass()
cls.client = cls.aggregates_admin_client
cls.aggregate_name_prefix = 'test_aggregate_'
cls.az_name_prefix = 'test_az_'
resp, hosts_all = cls.hosts_admin_client.list_hosts()
hosts = map(lambda x: x['host_name'],
filter(lambda y: y['service'] == 'compute', hosts_all))
cls.host = hosts[0]
@test.attr(type='gate')
def test_aggregate_create_delete(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.assertEqual(201, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertIsNone(aggregate['availability_zone'])
resp, _ = self.client.delete_aggregate(aggregate['id'])
self.assertEqual(204, resp.status)
self.client.wait_for_resource_deletion(aggregate['id'])
@test.attr(type='gate')
def test_aggregate_create_delete_with_az(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.assertEqual(201, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
resp, _ = self.client.delete_aggregate(aggregate['id'])
self.assertEqual(204, resp.status)
self.client.wait_for_resource_deletion(aggregate['id'])
@test.attr(type='gate')
def test_aggregate_create_verify_entry_in_list(self):
# Create an aggregate and ensure it is listed.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, aggregates = self.client.list_aggregates()
self.assertEqual(200, resp.status)
self.assertIn((aggregate['id'], aggregate['availability_zone']),
map(lambda x: (x['id'], x['availability_zone']),
aggregates))
@test.attr(type='gate')
def test_aggregate_create_update_metadata_get_details(self):
# Create an aggregate and ensure its details are returned.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(aggregate['name'], body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertEqual({}, body["metadata"])
# set the metadata of the aggregate
meta = {"key": "value"}
resp, body = self.client.set_metadata(aggregate['id'], meta)
self.assertEqual(200, resp.status)
self.assertEqual(meta, body["metadata"])
# verify the metadata has been set
resp, body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(meta, body["metadata"])
@test.attr(type='gate')
def test_aggregate_create_update_with_az(self):
# Update an aggregate and ensure properties are updated correctly
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertEqual(201, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.assertIsNotNone(aggregate['id'])
aggregate_id = aggregate['id']
new_aggregate_name = aggregate_name + '_new'
new_az_name = az_name + '_new'
resp, resp_aggregate = self.client.update_aggregate(aggregate_id,
new_aggregate_name,
new_az_name)
self.assertEqual(200, resp.status)
self.assertEqual(new_aggregate_name, resp_aggregate['name'])
self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
resp, aggregates = self.client.list_aggregates()
self.assertEqual(200, resp.status)
self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
map(lambda x:
(x['id'], x['name'], x['availability_zone']),
aggregates))
@test.attr(type='gate')
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, body = self.client.add_host(aggregate['id'], self.host)
self.assertEqual(202, resp.status)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertIn(self.host, body['hosts'])
resp, body = self.client.remove_host(aggregate['id'], self.host)
self.assertEqual(202, resp.status)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertNotIn(self.host, body['hosts'])
@test.attr(type='gate')
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
resp, aggregates = self.client.list_aggregates()
aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
self.assertEqual(1, len(aggs))
agg = aggs[0]
self.assertEqual(aggregate_name, agg['name'])
self.assertIsNone(agg['availability_zone'])
self.assertIn(self.host, agg['hosts'])
@test.attr(type='gate')
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
resp, body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(aggregate_name, body['name'])
self.assertIsNone(body['availability_zone'])
self.assertIn(self.host, body['hosts'])
@test.attr(type='gate')
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
server_name = data_utils.rand_name('test_server_')
admin_servers_client = self.servers_admin_client
resp, server = self.create_test_server(name=server_name,
availability_zone=az_name,
wait_until='ACTIVE')
resp, body = admin_servers_client.get_server(server['id'])
self.assertEqual(self.host, body[self._host_key])
| |
from __future__ import unicode_literals
import itertools
import os
import random
import unicodedata
from collections import defaultdict
from django.conf import settings
from django.core.files import File
from django.template.defaultfilters import slugify
from django.utils.six import moves
from faker import Factory
from faker.providers import BaseProvider
from payments import PaymentStatus
from prices import Price
from ...discount.models import Sale, Voucher
from ...order import OrderStatus
from ...order.models import DeliveryGroup, Order, OrderedItem, Payment
from ...product.models import (AttributeChoiceValue, Category, Product,
ProductAttribute, ProductClass, ProductImage,
ProductVariant, Stock, StockLocation)
from ...shipping.models import ANY_COUNTRY, ShippingMethod
from ...userprofile.models import Address, User
from ...userprofile.utils import store_user_address
fake = Factory.create()
STOCK_LOCATION = 'default'
DEFAULT_CATEGORY = 'Default'
DELIVERY_REGIONS = [ANY_COUNTRY, 'US', 'PL', 'DE', 'GB']
DEFAULT_SCHEMA = {
'T-Shirt': {
'category': 'Apparel',
'product_attributes': {
'Color': ['Blue', 'White'],
'Collar': ['Round', 'V-Neck', 'Polo'],
'Brand': ['Saleor']
},
'variant_attributes': {
'Size': ['XS', 'S', 'M', 'L', 'XL', 'XXL']
},
'images_dir': 't-shirts/',
'is_shipping_required': True
},
'Mugs': {
'category': 'Accessories',
'product_attributes': {
'Brand': ['Saleor']
},
'variant_attributes': {},
'images_dir': 'mugs/',
'is_shipping_required': True
},
'Coffee': {
'category': 'Groceries',
'product_attributes': {
'Coffee Genre': ['Arabica', 'Robusta'],
'Brand': ['Saleor']
},
'variant_attributes': {
'Box Size': ['100g', '250g', '500g', '1kg']
},
'different_variant_prices': True,
'images_dir': 'coffee/',
'is_shipping_required': True
},
'Candy': {
'category': 'Groceries',
'product_attributes': {
'Flavor': ['Sour', 'Sweet'],
'Brand': ['Saleor']
},
'variant_attributes': {
'Candy Box Size': ['100g', '250g', '500g']
},
'images_dir': 'candy/',
'different_variant_prices': True,
'is_shipping_required': True
},
'E-books': {
'category': 'Books',
'product_attributes': {
'Author': ['John Doe', 'Milionare Pirate'],
'Publisher': ['Mirumee Press', 'Saleor Publishing'],
'Language': ['English', 'Pirate']
},
'variant_attributes': {},
'images_dir': 'books/',
'is_shipping_required': False
},
'Books': {
'category': 'Books',
'product_attributes': {
'Author': ['John Doe', 'Milionare Pirate'],
'Publisher': ['Mirumee Press', 'Saleor Publishing'],
'Language': ['English', 'Pirate']
},
'variant_attributes': {
'Cover': ['Soft', 'Hard']
},
'images_dir': 'books/',
'different_variant_prices': True,
'is_shipping_required': True
}
}
def create_attributes_and_values(attribute_data):
attributes = []
for attribute_name, attribute_values in attribute_data.items():
attribute = create_attribute(
slug=slugify(attribute_name), name=attribute_name)
for value in attribute_values:
create_attribute_value(attribute, name=value)
attributes.append(attribute)
return attributes
def create_product_class_with_attributes(name, schema):
product_attributes_schema = schema.get('product_attributes', {})
variant_attributes_schema = schema.get('variant_attributes', {})
is_shipping_required = schema.get('is_shipping_required', True)
product_class = get_or_create_product_class(
name=name, is_shipping_required=is_shipping_required)
product_attributes = create_attributes_and_values(
product_attributes_schema)
variant_attributes = create_attributes_and_values(
variant_attributes_schema)
product_class.product_attributes.add(*product_attributes)
product_class.variant_attributes.add(*variant_attributes)
return product_class
def create_product_classes_by_schema(root_schema):
results = []
for product_class_name, schema in root_schema.items():
product_class = create_product_class_with_attributes(
product_class_name, schema)
results.append((product_class, schema))
return results
def set_product_attributes(product, product_class):
attr_dict = {}
for product_attribute in product_class.product_attributes.all():
value = random.choice(product_attribute.values.all())
attr_dict[str(product_attribute.pk)] = str(value.pk)
product.attributes = attr_dict
product.save(update_fields=['attributes'])
def set_variant_attributes(variant, product_class):
attr_dict = {}
existing_variants = variant.product.variants.values_list('attributes',
flat=True)
existing_variant_attributes = defaultdict(list)
for variant_attrs in existing_variants:
for attr_id, value_id in variant_attrs.items():
existing_variant_attributes[attr_id].append(value_id)
for product_attribute in product_class.variant_attributes.all():
available_values = product_attribute.values.exclude(
pk__in=[int(pk) for pk
in existing_variant_attributes[str(product_attribute.pk)]])
if not available_values:
return
value = random.choice(available_values)
attr_dict[str(product_attribute.pk)] = str(value.pk)
variant.attributes = attr_dict
variant.save(update_fields=['attributes'])
def get_variant_combinations(product):
# Returns all possible variant combinations
# For example: product class has two variant attributes: Size, Color
# Size has available values: [S, M], Color has values [Red, Green]
# All combinations will be generated (S, Red), (S, Green), (M, Red),
# (M, Green)
# Output is list of dicts, where key is product attribute id and value is
# attribute value id. Casted to string.
variant_attr_map = {attr: attr.values.all()
for attr
in product.product_class.variant_attributes.all()}
all_combinations = itertools.product(*variant_attr_map.values())
return [{str(attr_value.attribute.pk): str(attr_value.pk)
for attr_value in combination}
for combination in all_combinations]
def get_price_override(schema, combinations_num, current_price):
prices = []
if schema.get('different_variant_prices'):
prices = sorted(
[current_price + fake.price() for _ in range(combinations_num)],
reverse=True)
return prices
def create_products_by_class(product_class, schema,
placeholder_dir, how_many=10, create_images=True,
stdout=None):
category_name = schema.get('category') or DEFAULT_CATEGORY
category = get_or_create_category(category_name)
for dummy in range(how_many):
product = create_product(product_class=product_class)
set_product_attributes(product, product_class)
product.categories.add(category)
if create_images:
class_placeholders = os.path.join(
placeholder_dir, schema['images_dir'])
create_product_images(
product, random.randrange(1, 5), class_placeholders)
variant_combinations = get_variant_combinations(product)
prices = get_price_override(
schema, len(variant_combinations), product.price)
variants_with_prices = moves.zip_longest(
variant_combinations, prices)
for i, variant_price in enumerate(variants_with_prices, start=1337):
attr_combination, price = variant_price
sku = '%s-%s' % (product.pk, i)
create_variant(
product, attributes=attr_combination, sku=sku,
price_override=price)
if not variant_combinations:
# Create min one variant for products without variant level attrs
sku = '%s-%s' % (product.pk, fake.random_int(1000, 100000))
create_variant(product, sku=sku)
if stdout is not None:
stdout.write('Product: %s (%s), %s variant(s)' % (
product, product_class.name, len(variant_combinations) or 1))
def create_products_by_schema(placeholder_dir, how_many, create_images,
stdout=None, schema=DEFAULT_SCHEMA):
for product_class, class_schema in create_product_classes_by_schema(schema):
create_products_by_class(
product_class, class_schema, placeholder_dir,
how_many=how_many, create_images=create_images, stdout=stdout)
class SaleorProvider(BaseProvider):
def price(self):
return Price(fake.pydecimal(2, 2, positive=True),
currency=settings.DEFAULT_CURRENCY)
def delivery_region(self):
return random.choice(DELIVERY_REGIONS)
def shipping_method(self):
return random.choice(ShippingMethod.objects.all())
fake.add_provider(SaleorProvider)
def get_email(first_name, last_name):
_first = unicodedata.normalize('NFD', first_name).encode('ascii', 'ignore')
_last = unicodedata.normalize('NFD', last_name).encode('ascii', 'ignore')
return '%s.%s@example.com' % (
_first.lower().decode('utf-8'), _last.lower().decode('utf-8'))
def get_or_create_category(name, **kwargs):
defaults = {
'description': fake.text()}
defaults.update(kwargs)
defaults['slug'] = fake.slug(name)
return Category.objects.get_or_create(name=name, defaults=defaults)[0]
def get_or_create_product_class(name, **kwargs):
return ProductClass.objects.get_or_create(name=name, defaults=kwargs)[0]
def create_product(**kwargs):
defaults = {
'name': fake.company(),
'price': fake.price(),
'description': '\n\n'.join(fake.paragraphs(5))}
defaults.update(kwargs)
return Product.objects.create(**defaults)
def create_stock(variant, **kwargs):
default_location = StockLocation.objects.get_or_create(
name=STOCK_LOCATION)[0]
defaults = {
'variant': variant,
'location': default_location,
'quantity': fake.random_int(1, 50)}
defaults.update(kwargs)
return Stock.objects.create(**defaults)
def create_variant(product, **kwargs):
defaults = {
'product': product}
defaults.update(kwargs)
variant = ProductVariant.objects.create(**defaults)
create_stock(variant)
return variant
def create_product_image(product, placeholder_dir):
placeholder_root = os.path.join(settings.PROJECT_ROOT, placeholder_dir)
img_path = '%s/%s' % (placeholder_dir,
random.choice(os.listdir(placeholder_root)))
image = ProductImage(
product=product,
image=File(open(img_path, 'rb'))).save()
return image
def create_attribute(**kwargs):
slug = fake.word()
defaults = {
'slug': slug,
'name': slug.title()}
defaults.update(kwargs)
attribute = ProductAttribute.objects.get_or_create(**defaults)[0]
return attribute
def create_attribute_value(attribute, **kwargs):
name = fake.word()
defaults = {
'attribute': attribute,
'name': name}
defaults.update(kwargs)
defaults['slug'] = slugify(defaults['name'])
attribute_value = AttributeChoiceValue.objects.get_or_create(**defaults)[0]
return attribute_value
def create_product_images(product, how_many, placeholder_dir):
for dummy in range(how_many):
create_product_image(product, placeholder_dir)
def create_address():
address = Address.objects.create(
first_name=fake.first_name(),
last_name=fake.last_name(),
street_address_1=fake.street_address(),
city=fake.city(),
postal_code=fake.postcode(),
country=fake.country_code())
return address
def create_fake_user():
address = create_address()
email = get_email(address.first_name, address.last_name)
user = User.objects.create_user(email=email, password='password')
user.addresses.add(address)
user.default_billing_address = address
user.default_shipping_address = address
user.is_active = True
user.save()
return user
def create_payment(delivery_group):
order = delivery_group.order
status = random.choice(
[PaymentStatus.WAITING, PaymentStatus.PREAUTH, PaymentStatus.CONFIRMED])
payment = Payment.objects.create(
order=order,
status=status,
variant='default',
transaction_id=str(fake.random_int(1, 100000)),
currency=settings.DEFAULT_CURRENCY,
total=order.get_total().gross,
delivery=delivery_group.shipping_price.gross,
customer_ip_address=fake.ipv4(),
billing_first_name=order.billing_address.first_name,
billing_last_name=order.billing_address.last_name,
billing_address_1=order.billing_address.street_address_1,
billing_city=order.billing_address.city,
billing_postcode=order.billing_address.postal_code,
billing_country_code=order.billing_address.country)
if status == PaymentStatus.CONFIRMED:
payment.captured_amount = payment.total
payment.save()
return payment
def create_delivery_group(order):
region = order.shipping_address.country
if region not in DELIVERY_REGIONS:
region = ANY_COUNTRY
shipping_method = fake.shipping_method()
shipping_country = shipping_method.price_per_country.get_or_create(
country_code=region, defaults={'price': fake.price()})[0]
delivery_group = DeliveryGroup.objects.create(
status=random.choice(['new', 'shipped']),
order=order,
shipping_method_name=str(shipping_country),
shipping_price=shipping_country.price)
return delivery_group
def create_order_line(delivery_group):
product = Product.objects.all().order_by('?')[0]
variant = product.variants.all()[0]
return OrderedItem.objects.create(
delivery_group=delivery_group,
product=product,
product_name=product.name,
product_sku=variant.sku,
quantity=random.randrange(1, 5),
unit_price_net=product.price.net,
unit_price_gross=product.price.gross)
def create_order_lines(delivery_group, how_many=10):
for dummy in range(how_many):
yield create_order_line(delivery_group)
def create_fake_order():
user = random.choice([None, User.objects.filter(
is_superuser=False).order_by('?').first()])
if user:
user_data = {
'user': user,
'billing_address': user.default_billing_address,
'shipping_address': user.default_shipping_address}
else:
address = create_address()
user_data = {
'billing_address': address,
'shipping_address': address,
'user_email': get_email(
address.first_name, address.last_name)}
order = Order.objects.create(**user_data)
order.change_status(OrderStatus.PAYMENT_PENDING)
delivery_group = create_delivery_group(order)
lines = create_order_lines(delivery_group, random.randrange(1, 5))
order.total = sum(
[line.get_total() for line in lines], delivery_group.shipping_price)
order.save()
payment = create_payment(delivery_group)
if payment.status == PaymentStatus.CONFIRMED:
order.change_status(OrderStatus.FULLY_PAID)
if random.choice([True, False]):
order.change_status(OrderStatus.SHIPPED)
return order
def create_fake_sale():
sale = Sale.objects.create(
name='Happy %s day!' % fake.word(),
type=Sale.PERCENTAGE,
value=random.choice([10, 20, 30, 40, 50]))
for product in Product.objects.all().order_by('?')[:4]:
sale.products.add(product)
return sale
def create_users(how_many=10):
for dummy in range(how_many):
user = create_fake_user()
yield 'User: %s' % (user.email,)
def create_orders(how_many=10):
for dummy in range(how_many):
order = create_fake_order()
yield 'Order: %s' % (order,)
def create_product_sales(how_many=5):
for dummy in range(how_many):
sale = create_fake_sale()
yield 'Sale: %s' % (sale,)
def create_shipping_methods():
shipping_method = ShippingMethod.objects.create(name='UPC')
shipping_method.price_per_country.create(price=fake.price())
yield 'Shipping method #%d' % shipping_method.id
shipping_method = ShippingMethod.objects.create(name='DHL')
shipping_method.price_per_country.create(price=fake.price())
yield 'Shipping method #%d' % shipping_method.id
def create_vouchers():
voucher, created = Voucher.objects.get_or_create(
code='FREESHIPPING', defaults={
'type': Voucher.SHIPPING_TYPE,
'name': 'Free shipping',
'discount_value_type': Voucher.DISCOUNT_VALUE_PERCENTAGE,
'discount_value': 100})
if created:
yield 'Voucher #%d' % voucher.id
else:
yield 'Shipping voucher already exists'
voucher, created = Voucher.objects.get_or_create(
code='DISCOUNT', defaults={
'type': Voucher.VALUE_TYPE,
'name': 'Big order discount',
'discount_value_type': Voucher.DISCOUNT_VALUE_FIXED,
'discount_value': 25,
'limit': 200})
if created:
yield 'Voucher #%d' % voucher.id
else:
yield 'Value voucher already exists'
def set_featured_products(how_many=8):
pks = Product.objects.order_by('?')[:how_many].values_list('pk', flat=True)
Product.objects.filter(pk__in=pks).update(is_featured=True)
yield 'Featured products created'
def add_address_to_admin(email):
address = create_address()
user = User.objects.get(email=email)
store_user_address(user, address, True, True)
| |
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import itertools
import string
import traceback
from oslo.config import cfg
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.i18n import _LW
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log
from nova import rpc
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
def exception_to_dict(fault):
"""Converts exceptions to a dict for use in notifications."""
# TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
try:
message = fault.format_message()
# These exception handlers are broad so we don't fail to log the fault
# just because there is an unexpected error retrieving the message
except Exception:
try:
message = unicode(fault)
except Exception:
message = None
if not message:
message = fault.__class__.__name__
# NOTE(dripton) The message field in the database is limited to 255 chars.
# MySQL silently truncates overly long messages, but PostgreSQL throws an
# error if we don't truncate it.
u_message = unicode(message)[:255]
fault_dict = dict(exception=fault)
fault_dict["message"] = u_message
fault_dict["code"] = code
return fault_dict
def _get_fault_details(exc_info, error_code):
details = ''
if exc_info and error_code == 500:
tb = exc_info[2]
if tb:
details = ''.join(traceback.format_tb(tb))
return unicode(details)
def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
fault_obj = objects.InstanceFault(context=context)
fault_obj.host = CONF.host
fault_obj.instance_uuid = instance['uuid']
fault_obj.update(exception_to_dict(fault))
code = fault_obj.code
fault_obj.details = _get_fault_details(exc_info, code)
fault_obj.create()
def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping.
"""
mappings = block_device.instance_block_mapping(instance, bdms)
return get_next_device_name(instance, mappings.values(),
mappings['root'], device)
def default_device_names_for_instance(instance, root_device_name,
*block_device_lists):
"""Generate missing device names for an instance."""
dev_list = [bdm.device_name
for bdm in itertools.chain(*block_device_lists)
if bdm.device_name]
if root_device_name not in dev_list:
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if not dev:
dev = get_next_device_name(instance, dev_list,
root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
def get_next_device_name(instance, device_name_list,
root_device_name=None, device=None):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the root_device_name (if provided) and
the list of used devices to find valid device names. If the device
name is valid but applicable to a different backend (for example
/dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format.
"""
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
if not root_device_name:
root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
try:
prefix = block_device.match_device(root_device_name)[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=root_device_name)
# NOTE(vish): remove this when xenapi is setting default_root_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in device_name_list:
letter = block_device.get_device_letter(device_path)
used_letters.add(letter)
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
flavor = flavors.extract_flavor(instance)
if flavor['ephemeral_gb']:
used_letters.add('b')
if flavor['swap']:
used_letters.add('c')
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
return prefix + req_letter
def _get_unused_letter(used_letters):
doubles = [first + second for second in string.ascii_lowercase
for first in string.ascii_lowercase]
all_letters = set(list(string.ascii_lowercase) + doubles)
letters = list(all_letters - used_letters)
# NOTE(vish): prepend ` so all shorter sequences sort first
letters.sort(key=lambda x: x.rjust(2, '`'))
return letters[0]
def get_image_metadata(context, image_api, image_id_or_uri, instance):
# If the base image is still available, get its metadata
try:
image = image_api.get(context, image_id_or_uri)
except (exception.ImageNotAuthorized,
exception.ImageNotFound,
exception.Invalid) as e:
LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
{"image_id": image_id_or_uri, "error": e},
instance=instance)
image_system_meta = {}
else:
flavor = flavors.extract_flavor(instance)
image_system_meta = utils.get_system_metadata_from_image(image, flavor)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Merge the metadata from the instance with the image's, if any
system_meta.update(image_system_meta)
# Convert the system metadata to image metadata
return utils.get_image_from_system_metadata(system_meta)
def notify_usage_exists(notifier, context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' notification for an instance for usage auditing
purposes.
:param notifier: a messaging.Notifier
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata DB entries for the instance,
if not None. *NOTE*: Currently unused here in trunk, but needed for
potential custom modifications.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
bw = notifications.bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data)
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
bandwidth=bw, image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(notifier, context, instance_ref, 'exists',
system_metadata=system_metadata, extra_usage_info=extra_info)
def notify_about_instance_usage(notifier, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
"""Send a notification about an instance.
:param notifier: a messaging.Notifier
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param system_metadata: system_metadata DB entries for the instance,
if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
"""
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.info_from_instance(context, instance,
network_info, system_metadata, **extra_usage_info)
if fault:
# NOTE(johngarbutt) mirrors the format in wrap_exception
fault_payload = exception_to_dict(fault)
LOG.debug(fault_payload["message"], instance=instance)
usage_info.update(fault_payload)
if event_suffix.endswith("error"):
method = notifier.error
else:
method = notifier.info
method(context, 'compute.instance.%s' % event_suffix, usage_info)
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
"""Send a notification about aggregate update.
:param event_suffix: Event type like "create.start" or "create.end"
:param aggregate_payload: payload for aggregate update
"""
aggregate_identifier = aggregate_payload.get('aggregate_id', None)
if not aggregate_identifier:
aggregate_identifier = aggregate_payload.get('name', None)
if not aggregate_identifier:
LOG.debug("No aggregate id or name specified for this "
"notification and it will be ignored")
return
notifier = rpc.get_notifier(service='aggregate',
host=aggregate_identifier)
notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload)
def notify_about_host_update(context, event_suffix, host_payload):
"""Send a notification about host update.
:param event_suffix: Event type like "create.start" or "create.end"
:param host_payload: payload for host update. It is a dict and there
should be at least the 'host_name' key in this
dict.
"""
host_identifier = host_payload.get('host_name')
if not host_identifier:
LOG.warn(_LW("No host name specified for the notification of "
"HostAPI.%s and it will be ignored"), event_suffix)
return
notifier = rpc.get_notifier(service='api', host=host_identifier)
notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload)
def get_nw_info_for_instance(instance):
if isinstance(instance, obj_base.NovaObject):
if instance.info_cache is None:
return network_model.NetworkInfo.hydrate([])
return instance.info_cache.network_info
# FIXME(comstud): Transitional while we convert to objects.
info_cache = instance['info_cache'] or {}
nw_info = info_cache.get('network_info') or []
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = network_model.NetworkInfo.hydrate(nw_info)
return nw_info
def has_audit_been_run(context, conductor, host, timestamp=None):
begin, end = utils.last_completed_audit_period(before=timestamp)
task_log = conductor.task_log_get(context, "instance_usage_audit",
begin, end, host)
if task_log:
return True
else:
return False
def start_instance_usage_audit(context, conductor, begin, end, host,
num_instances):
conductor.task_log_begin_task(context, "instance_usage_audit", begin,
end, host, num_instances,
"Instance usage audit started...")
def finish_instance_usage_audit(context, conductor, begin, end, host, errors,
message):
conductor.task_log_end_task(context, "instance_usage_audit", begin, end,
host, errors, message)
def usage_volume_info(vol_usage):
def null_safe_str(s):
return str(s) if s else ''
tot_refreshed = vol_usage['tot_last_refreshed']
curr_refreshed = vol_usage['curr_last_refreshed']
if tot_refreshed and curr_refreshed:
last_refreshed_time = max(tot_refreshed, curr_refreshed)
elif tot_refreshed:
last_refreshed_time = tot_refreshed
else:
# curr_refreshed must be set
last_refreshed_time = curr_refreshed
usage_info = dict(
volume_id=vol_usage['volume_id'],
tenant_id=vol_usage['project_id'],
user_id=vol_usage['user_id'],
availability_zone=vol_usage['availability_zone'],
instance_id=vol_usage['instance_uuid'],
last_refreshed=null_safe_str(last_refreshed_time),
reads=vol_usage['tot_reads'] + vol_usage['curr_reads'],
read_bytes=vol_usage['tot_read_bytes'] +
vol_usage['curr_read_bytes'],
writes=vol_usage['tot_writes'] + vol_usage['curr_writes'],
write_bytes=vol_usage['tot_write_bytes'] +
vol_usage['curr_write_bytes'])
return usage_info
def get_reboot_type(task_state, current_power_state):
"""Checks if the current instance state requires a HARD reboot."""
if current_power_state != power_state.RUNNING:
return 'HARD'
soft_types = [task_states.REBOOT_STARTED, task_states.REBOOT_PENDING,
task_states.REBOOTING]
reboot_type = 'SOFT' if task_state in soft_types else 'HARD'
return reboot_type
class EventReporter(object):
"""Context manager to report instance action events."""
def __init__(self, context, event_name, *instance_uuids):
self.context = context
self.event_name = event_name
self.instance_uuids = instance_uuids
def __enter__(self):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_start(
self.context, uuid, self.event_name, want_result=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_finish_with_failure(
self.context, uuid, self.event_name, exc_val=exc_val,
exc_tb=exc_tb, want_result=False)
return False
def periodic_task_spacing_warn(config_option_name):
"""Decorator to warn about an upcoming breaking change in methods which
use the @periodic_task decorator.
Some methods using the @periodic_task decorator specify spacing=0 or
None to mean "do not call this method", but the decorator itself uses
0/None to mean "call at the default rate".
Starting with the K release the Nova methods will be changed to conform
to the Oslo decorator. This decorator should be present wherever a
spacing value from user-supplied config is passed to @periodic_task, and
there is also a check to skip the method if the value is zero. It will
log a warning if the spacing value from config is 0/None.
"""
# TODO(gilliard) remove this decorator, its usages and the early returns
# near them after the K release.
def wrapper(f):
if (hasattr(f, "_periodic_spacing") and
(f._periodic_spacing == 0 or f._periodic_spacing is None)):
LOG.warning(_LW("Value of 0 or None specified for %s."
" This behaviour will change in meaning in the K release, to"
" mean 'call at the default rate' rather than 'do not call'."
" To keep the 'do not call' behaviour, use a negative value."),
config_option_name)
return f
return wrapper
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import datetime
import unittest
from unittest.mock import ANY, patch
import httplib2
from googleapiclient.errors import HttpError
from airflow import DAG
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.operators.mlengine import (
MLEngineCreateModelOperator, MLEngineCreateVersionOperator, MLEngineDeleteModelOperator,
MLEngineDeleteVersionOperator, MLEngineGetModelOperator, MLEngineListVersionsOperator,
MLEngineManageModelOperator, MLEngineManageVersionOperator, MLEngineSetDefaultVersionOperator,
MLEngineStartBatchPredictionJobOperator, MLEngineStartTrainingJobOperator,
MLEngineTrainingJobFailureOperator,
)
DEFAULT_DATE = datetime.datetime(2017, 6, 6)
TEST_PROJECT_ID = "test-project-id"
TEST_MODEL_NAME = "test-model-name"
TEST_VERSION_NAME = "test-version"
TEST_GCP_CONN_ID = "test-gcp-conn-id"
TEST_DELEGATE_TO = "test-delegate-to"
TEST_MODEL = {
'name': TEST_MODEL_NAME,
}
TEST_VERSION = {
'name': 'v1',
'deploymentUri': 'gs://some-bucket/jobs/test_training/model.pb',
'runtimeVersion': '1.6'
}
class TestMLEngineBatchPredictionOperator(unittest.TestCase):
INPUT_MISSING_ORIGIN = {
'dataFormat': 'TEXT',
'inputPaths': ['gs://legal-bucket/fake-input-path/*'],
'outputPath': 'gs://legal-bucket/fake-output-path',
'region': 'us-east1',
}
SUCCESS_MESSAGE_MISSING_INPUT = {
'jobId': 'test_prediction',
'predictionOutput': {
'outputPath': 'gs://fake-output-path',
'predictionCount': 5000,
'errorCount': 0,
'nodeHours': 2.78
},
'state': 'SUCCEEDED'
}
BATCH_PREDICTION_DEFAULT_ARGS = {
'project_id': 'test-project',
'job_id': 'test_prediction',
'region': 'us-east1',
'data_format': 'TEXT',
'input_paths': ['gs://legal-bucket-dash-Capital/legal-input-path/*'],
'output_path':
'gs://12_legal_bucket_underscore_number/legal-output-path',
'task_id': 'test-prediction'
}
def setUp(self):
super().setUp()
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_with_model(self, mock_hook):
input_with_model = self.INPUT_MISSING_ORIGIN.copy()
input_with_model['modelName'] = \
'projects/test-project/models/test_model'
success_message = self.SUCCESS_MESSAGE_MISSING_INPUT.copy()
success_message['predictionInput'] = input_with_model
hook_instance = mock_hook.return_value
hook_instance.get_job.side_effect = HttpError(
resp=httplib2.Response({
'status': 404
}), content=b'some bytes')
hook_instance.create_job.return_value = success_message
prediction_task = MLEngineStartBatchPredictionJobOperator(
job_id='test_prediction',
project_id='test-project',
region=input_with_model['region'],
data_format=input_with_model['dataFormat'],
input_paths=input_with_model['inputPaths'],
output_path=input_with_model['outputPath'],
model_name=input_with_model['modelName'].split('/')[-1],
dag=self.dag,
task_id='test-prediction')
prediction_output = prediction_task.execute(None)
mock_hook.assert_called_once_with('google_cloud_default', None)
hook_instance.create_job.assert_called_once_with(
project_id='test-project',
job={
'jobId': 'test_prediction',
'predictionInput': input_with_model
},
use_existing_job_fn=ANY
)
self.assertEqual(success_message['predictionOutput'], prediction_output)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_with_version(self, mock_hook):
input_with_version = self.INPUT_MISSING_ORIGIN.copy()
input_with_version['versionName'] = \
'projects/test-project/models/test_model/versions/test_version'
success_message = self.SUCCESS_MESSAGE_MISSING_INPUT.copy()
success_message['predictionInput'] = input_with_version
hook_instance = mock_hook.return_value
hook_instance.get_job.side_effect = HttpError(
resp=httplib2.Response({
'status': 404
}), content=b'some bytes')
hook_instance.create_job.return_value = success_message
prediction_task = MLEngineStartBatchPredictionJobOperator(
job_id='test_prediction',
project_id='test-project',
region=input_with_version['region'],
data_format=input_with_version['dataFormat'],
input_paths=input_with_version['inputPaths'],
output_path=input_with_version['outputPath'],
model_name=input_with_version['versionName'].split('/')[-3],
version_name=input_with_version['versionName'].split('/')[-1],
dag=self.dag,
task_id='test-prediction')
prediction_output = prediction_task.execute(None)
mock_hook.assert_called_once_with('google_cloud_default', None)
hook_instance.create_job.assert_called_once_with(
project_id='test-project',
job={
'jobId': 'test_prediction',
'predictionInput': input_with_version
},
use_existing_job_fn=ANY
)
self.assertEqual(success_message['predictionOutput'], prediction_output)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_with_uri(self, mock_hook):
input_with_uri = self.INPUT_MISSING_ORIGIN.copy()
input_with_uri['uri'] = 'gs://my_bucket/my_models/savedModel'
success_message = self.SUCCESS_MESSAGE_MISSING_INPUT.copy()
success_message['predictionInput'] = input_with_uri
hook_instance = mock_hook.return_value
hook_instance.get_job.side_effect = HttpError(
resp=httplib2.Response({
'status': 404
}), content=b'some bytes')
hook_instance.create_job.return_value = success_message
prediction_task = MLEngineStartBatchPredictionJobOperator(
job_id='test_prediction',
project_id='test-project',
region=input_with_uri['region'],
data_format=input_with_uri['dataFormat'],
input_paths=input_with_uri['inputPaths'],
output_path=input_with_uri['outputPath'],
uri=input_with_uri['uri'],
dag=self.dag,
task_id='test-prediction')
prediction_output = prediction_task.execute(None)
mock_hook.assert_called_once_with('google_cloud_default', None)
hook_instance.create_job.assert_called_once_with(
project_id='test-project',
job={
'jobId': 'test_prediction',
'predictionInput': input_with_uri
},
use_existing_job_fn=ANY
)
self.assertEqual(success_message['predictionOutput'], prediction_output)
def test_invalid_model_origin(self):
# Test that both uri and model is given
task_args = self.BATCH_PREDICTION_DEFAULT_ARGS.copy()
task_args['uri'] = 'gs://fake-uri/saved_model'
task_args['model_name'] = 'fake_model'
with self.assertRaises(AirflowException) as context:
MLEngineStartBatchPredictionJobOperator(**task_args).execute(None)
self.assertEqual('Ambiguous model origin: Both uri and '
'model/version name are provided.',
str(context.exception))
# Test that both uri and model/version is given
task_args = self.BATCH_PREDICTION_DEFAULT_ARGS.copy()
task_args['uri'] = 'gs://fake-uri/saved_model'
task_args['model_name'] = 'fake_model'
task_args['version_name'] = 'fake_version'
with self.assertRaises(AirflowException) as context:
MLEngineStartBatchPredictionJobOperator(**task_args).execute(None)
self.assertEqual('Ambiguous model origin: Both uri and '
'model/version name are provided.',
str(context.exception))
# Test that a version is given without a model
task_args = self.BATCH_PREDICTION_DEFAULT_ARGS.copy()
task_args['version_name'] = 'bare_version'
with self.assertRaises(AirflowException) as context:
MLEngineStartBatchPredictionJobOperator(**task_args).execute(None)
self.assertEqual('Missing model: Batch prediction expects a model '
'name when a version name is provided.',
str(context.exception))
# Test that none of uri, model, model/version is given
task_args = self.BATCH_PREDICTION_DEFAULT_ARGS.copy()
with self.assertRaises(AirflowException) as context:
MLEngineStartBatchPredictionJobOperator(**task_args).execute(None)
self.assertEqual(
'Missing model origin: Batch prediction expects a '
'model, a model & version combination, or a URI to a savedModel.',
str(context.exception))
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_http_error(self, mock_hook):
http_error_code = 403
input_with_model = self.INPUT_MISSING_ORIGIN.copy()
input_with_model['modelName'] = \
'projects/experimental/models/test_model'
hook_instance = mock_hook.return_value
hook_instance.create_job.side_effect = HttpError(
resp=httplib2.Response({
'status': http_error_code
}),
content=b'Forbidden')
with self.assertRaises(HttpError) as context:
prediction_task = MLEngineStartBatchPredictionJobOperator(
job_id='test_prediction',
project_id='test-project',
region=input_with_model['region'],
data_format=input_with_model['dataFormat'],
input_paths=input_with_model['inputPaths'],
output_path=input_with_model['outputPath'],
model_name=input_with_model['modelName'].split('/')[-1],
dag=self.dag,
task_id='test-prediction')
prediction_task.execute(None)
mock_hook.assert_called_once_with('google_cloud_default', None)
hook_instance.create_job.assert_called_once_with(
'test-project', {
'jobId': 'test_prediction',
'predictionInput': input_with_model
}, ANY)
self.assertEqual(http_error_code, context.exception.resp.status)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_failed_job_error(self, mock_hook):
hook_instance = mock_hook.return_value
hook_instance.create_job.return_value = {
'state': 'FAILED',
'errorMessage': 'A failure message'
}
task_args = self.BATCH_PREDICTION_DEFAULT_ARGS.copy()
task_args['uri'] = 'a uri'
with self.assertRaises(RuntimeError) as context:
MLEngineStartBatchPredictionJobOperator(**task_args).execute(None)
self.assertEqual('A failure message', str(context.exception))
class TestMLEngineTrainingOperator(unittest.TestCase):
TRAINING_DEFAULT_ARGS = {
'project_id': 'test-project',
'job_id': 'test_training',
'package_uris': ['gs://some-bucket/package1'],
'training_python_module': 'trainer',
'training_args': '--some_arg=\'aaa\'',
'region': 'us-east1',
'scale_tier': 'STANDARD_1',
'task_id': 'test-training'
}
TRAINING_INPUT = {
'jobId': 'test_training',
'trainingInput': {
'scaleTier': 'STANDARD_1',
'packageUris': ['gs://some-bucket/package1'],
'pythonModule': 'trainer',
'args': '--some_arg=\'aaa\'',
'region': 'us-east1'
}
}
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_create_training_job(self, mock_hook):
success_response = self.TRAINING_INPUT.copy()
success_response['state'] = 'SUCCEEDED'
hook_instance = mock_hook.return_value
hook_instance.create_job.return_value = success_response
training_op = MLEngineStartTrainingJobOperator(
**self.TRAINING_DEFAULT_ARGS)
training_op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id='google_cloud_default', delegate_to=None)
# Make sure only 'create_job' is invoked on hook instance
self.assertEqual(len(hook_instance.mock_calls), 1)
hook_instance.create_job.assert_called_once_with(
project_id='test-project', job=self.TRAINING_INPUT, use_existing_job_fn=ANY)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_create_training_job_with_optional_args(self, mock_hook):
training_input = copy.deepcopy(self.TRAINING_INPUT)
training_input['trainingInput']['runtimeVersion'] = '1.6'
training_input['trainingInput']['pythonVersion'] = '3.5'
training_input['trainingInput']['jobDir'] = 'gs://some-bucket/jobs/test_training'
success_response = self.TRAINING_INPUT.copy()
success_response['state'] = 'SUCCEEDED'
hook_instance = mock_hook.return_value
hook_instance.create_job.return_value = success_response
training_op = MLEngineStartTrainingJobOperator(
runtime_version='1.6',
python_version='3.5',
job_dir='gs://some-bucket/jobs/test_training',
**self.TRAINING_DEFAULT_ARGS)
training_op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id='google_cloud_default', delegate_to=None)
# Make sure only 'create_job' is invoked on hook instance
self.assertEqual(len(hook_instance.mock_calls), 1)
hook_instance.create_job.assert_called_once_with(
project_id='test-project', job=training_input, use_existing_job_fn=ANY)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_http_error(self, mock_hook):
http_error_code = 403
hook_instance = mock_hook.return_value
hook_instance.create_job.side_effect = HttpError(
resp=httplib2.Response({
'status': http_error_code
}),
content=b'Forbidden')
with self.assertRaises(HttpError) as context:
training_op = MLEngineStartTrainingJobOperator(
**self.TRAINING_DEFAULT_ARGS)
training_op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id='google_cloud_default', delegate_to=None)
# Make sure only 'create_job' is invoked on hook instance
self.assertEqual(len(hook_instance.mock_calls), 1)
hook_instance.create_job.assert_called_once_with(
project_id='test-project', job=self.TRAINING_INPUT, use_existing_job_fn=ANY)
self.assertEqual(http_error_code, context.exception.resp.status)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_failed_job_error(self, mock_hook):
failure_response = self.TRAINING_INPUT.copy()
failure_response['state'] = 'FAILED'
failure_response['errorMessage'] = 'A failure message'
hook_instance = mock_hook.return_value
hook_instance.create_job.return_value = failure_response
with self.assertRaises(RuntimeError) as context:
training_op = MLEngineStartTrainingJobOperator(
**self.TRAINING_DEFAULT_ARGS)
training_op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id='google_cloud_default', delegate_to=None)
# Make sure only 'create_job' is invoked on hook instance
self.assertEqual(len(hook_instance.mock_calls), 1)
hook_instance.create_job.assert_called_once_with(
project_id='test-project', job=self.TRAINING_INPUT, use_existing_job_fn=ANY)
self.assertEqual('A failure message', str(context.exception))
class TestMLEngineTrainingJobFailureOperator(unittest.TestCase):
TRAINING_DEFAULT_ARGS = {
'project_id': 'test-project',
'job_id': 'test_training',
'task_id': 'test-training'
}
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_cancel_training_job(self, mock_hook):
success_response = {}
hook_instance = mock_hook.return_value
hook_instance.cancel_job.return_value = success_response
cancel_training_op = MLEngineTrainingJobFailureOperator(
**self.TRAINING_DEFAULT_ARGS)
cancel_training_op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id='google_cloud_default', delegate_to=None)
# Make sure only 'cancel_job' is invoked on hook instance
self.assertEqual(len(hook_instance.mock_calls), 1)
hook_instance.cancel_job.assert_called_once_with(
project_id=self.TRAINING_DEFAULT_ARGS['project_id'], job_id=self.TRAINING_DEFAULT_ARGS['job_id'])
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_http_error(self, mock_hook):
http_error_code = 403
hook_instance = mock_hook.return_value
hook_instance.cancel_job.side_effect = HttpError(
resp=httplib2.Response({
'status': http_error_code
}),
content=b'Forbidden')
with self.assertRaises(HttpError) as context:
cancel_training_op = MLEngineTrainingJobFailureOperator(
**self.TRAINING_DEFAULT_ARGS)
cancel_training_op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id='google_cloud_default', delegate_to=None)
# Make sure only 'create_job' is invoked on hook instance
self.assertEqual(len(hook_instance.mock_calls), 1)
hook_instance.cancel_job.assert_called_once_with(
project_id=self.TRAINING_DEFAULT_ARGS['project_id'], job_id=self.TRAINING_DEFAULT_ARGS['job_id'])
self.assertEqual(http_error_code, context.exception.resp.status)
class TestMLEngineModelOperator(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_create_model(self, mock_hook):
task = MLEngineManageModelOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model=TEST_MODEL,
operation="create",
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO
)
task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.create_model.assert_called_once_with(
project_id=TEST_PROJECT_ID, model=TEST_MODEL
)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_get_model(self, mock_hook):
task = MLEngineManageModelOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model=TEST_MODEL,
operation="get",
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO
)
result = task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.get_model.assert_called_once_with(
project_id=TEST_PROJECT_ID, model_name=TEST_MODEL_NAME
)
self.assertEqual(mock_hook.return_value.get_model.return_value, result)
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_fail(self, mock_hook):
task = MLEngineManageModelOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model=TEST_MODEL,
operation="invalid",
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO
)
with self.assertRaises(ValueError):
task.execute(None)
class TestMLEngineCreateModelOperator(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_create_model(self, mock_hook):
task = MLEngineCreateModelOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model=TEST_MODEL,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO
)
task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.create_model.assert_called_once_with(
project_id=TEST_PROJECT_ID, model=TEST_MODEL
)
class TestMLEngineGetModelOperator(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_get_model(self, mock_hook):
task = MLEngineGetModelOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO
)
result = task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.get_model.assert_called_once_with(
project_id=TEST_PROJECT_ID, model_name=TEST_MODEL_NAME
)
self.assertEqual(mock_hook.return_value.get_model.return_value, result)
class TestMLEngineDeleteModelOperator(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_delete_model(self, mock_hook):
task = MLEngineDeleteModelOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
delete_contents=True
)
task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.delete_model.assert_called_once_with(
project_id=TEST_PROJECT_ID, model_name=TEST_MODEL_NAME, delete_contents=True
)
class TestMLEngineVersionOperator(unittest.TestCase):
VERSION_DEFAULT_ARGS = {
'project_id': 'test-project',
'model_name': 'test-model',
'task_id': 'test-version'
}
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success_create_version(self, mock_hook):
success_response = {'name': 'some-name', 'done': True}
hook_instance = mock_hook.return_value
hook_instance.create_version.return_value = success_response
training_op = MLEngineManageVersionOperator(
version=TEST_VERSION,
**self.VERSION_DEFAULT_ARGS)
training_op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id='google_cloud_default', delegate_to=None)
# Make sure only 'create_version' is invoked on hook instance
self.assertEqual(len(hook_instance.mock_calls), 1)
hook_instance.create_version.assert_called_once_with(
project_id='test-project', model_name='test-model', version_spec=TEST_VERSION)
class TestMLEngineCreateVersion(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success(self, mock_hook):
task = MLEngineCreateVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version=TEST_VERSION,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.create_version.assert_called_once_with(
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version_spec=TEST_VERSION
)
def test_missing_model_name(self):
with self.assertRaises(AirflowException):
MLEngineCreateVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=None,
version=TEST_VERSION,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
def test_missing_version(self):
with self.assertRaises(AirflowException):
MLEngineCreateVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version=None,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
class TestMLEngineSetDefaultVersion(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success(self, mock_hook):
task = MLEngineSetDefaultVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version_name=TEST_VERSION_NAME,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.set_default_version.assert_called_once_with(
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version_name=TEST_VERSION_NAME
)
def test_missing_model_name(self):
with self.assertRaises(AirflowException):
MLEngineSetDefaultVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=None,
version_name=TEST_VERSION_NAME,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
def test_missing_version_name(self):
with self.assertRaises(AirflowException):
MLEngineSetDefaultVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version_name=None,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
class TestMLEngineListVersions(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success(self, mock_hook):
task = MLEngineListVersionsOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.list_versions.assert_called_once_with(
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
)
def test_missing_model_name(self):
with self.assertRaises(AirflowException):
MLEngineListVersionsOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=None,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
class TestMLEngineDeleteVersion(unittest.TestCase):
@patch('airflow.providers.google.cloud.operators.mlengine.MLEngineHook')
def test_success(self, mock_hook):
task = MLEngineDeleteVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version_name=TEST_VERSION_NAME,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
task.execute(None)
mock_hook.assert_called_once_with(delegate_to=TEST_DELEGATE_TO, gcp_conn_id=TEST_GCP_CONN_ID)
mock_hook.return_value.delete_version.assert_called_once_with(
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version_name=TEST_VERSION_NAME
)
def test_missing_version_name(self):
with self.assertRaises(AirflowException):
MLEngineDeleteVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=TEST_MODEL_NAME,
version_name=None,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
def test_missing_model_name(self):
with self.assertRaises(AirflowException):
MLEngineDeleteVersionOperator(
task_id="task-id",
project_id=TEST_PROJECT_ID,
model_name=None,
version_name=TEST_VERSION_NAME,
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
if __name__ == '__main__':
unittest.main()
| |
#encoding=utf-8
'''
Created on Dec 8, 2015
@author: lowitty
'''
import logging, sys, os, re
logSbcHander = logging.getLogger('server.SbcHandler')
from xml.etree import ElementTree
from threading import Lock
from com.ericsson.xn.server.parser.SbcParser import SbcNodeInfo
lock = Lock()
versionTuple = sys.version_info[:2]
version = '.'.join(repr(v) for v in versionTuple)
logSbcHander.info('Python version is: ' + str(version))
class SbcSetOperations():
def __init__(self, line, xmlPath, logPath):
self.returnStr = []
self.avaiableSetCMDs = ['tcp_channel', 'li_activate', 'li_deactivate', 'pm_update',
'get_stat', 'get_config', 'get_appl_trace']
self.xmlPath = xmlPath
self.logPath = logPath
self.line = line
self.er = None
#pkg_path = 'com' + os.path.sep + 'ericsson' + os.path.sep + 'xn' + os.path.sep + 'server' + os.path.sep + 'handler'
#self.pardir = os.path.dirname(os.path.abspath(__file__)).split(pkg_path)[0]
self.performAction()
def performAction(self):
try:
self.er = ElementTree.parse(self.xmlPath)
except Exception as e:
logSbcHander.error('Read the XML as DOM failed, Error Info: ' + str(e))
if(self.checkCmd()):
if(self.line.startswith('tcp_channel add')):
self.addChannel()
elif(self.line.startswith('tcp_channel remove')):
self.removeChannel()
elif(self.line.startswith('li_activate')):
self.liActive()
elif(self.line.startswith('li_deactivate')):
self.liDeactive()
elif(self.line.startswith('pm_update')):
self.pmUpdate()
elif(self.line.startswith('get_config')):
self.getConfig()
elif(self.line.startswith('get_appl_trace')):
self.getApplTrace()
elif(self.line.startswith('get_stat')):
self.getStat()
else:
msg = 'OPERATION FAILED DUE TO COMMAND NOT SUPPORT.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
msg = 'OPERATION FAILED DUE TO COMMAND NOT SUPPORT.'
self.returnStr.append(msg)
logSbcHander.error(msg)
def getStat(self):
lineSplit = self.line.split()
nodeInfoInstance = SbcNodeInfo(self.xmlPath)
node = nodeInfoInstance.getNodeInfoMap()
if(lineSplit[1] != node['nodeParas']['froID']):
msg = 'OPERATION FAILED DUE TO FROID MISMATCH.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
froID = node['nodeParas']['froID']
sortedKeys = sorted(node['channels'].keys())
self.returnStr.append('LIC ID: ' + str(froID))
self.returnStr.append('Channel | sentPackets | sentOctets | droppedPackets | droppedOctets | sentTunnelCreateReqs | successfulTunnelCreates')
for k in sortedKeys:
v = node['channels'][k]
#' ' * (7- len(str(k))) + str(k) + ' | ' + ' ' * (11 - len(str(v[0]))) + str(v[0]) + ' | ' + ' ' * (10 - len(str(v[1]))) + str(v[1]) + ' | ' + ' ' * (14 - len(str(v[2]))) + str(v[2]) + ' | ' + ' ' * (13 - len(str(v[3]))) + str(v[3]) + ' | ' + ' ' * (20 - len(str(v[4]))) + str(v[4]) + ' | ' + ' ' * (23 - len(str(v[5]))) + str(v[5]) + '\n'
self.returnStr.append(' ' * (7 - len(k)) + str(k) + ' | ' + ' ' * (11 - len(v['c1'])) + str(v['c1']) + ' | ' + ' ' * (10 - len(v['c2'])) + str(v['c2']) + ' | ' + ' ' * (14 - len(v['c3'])) + str(v['c3']) + ' | ' + ' ' * (13 - len(v['c4'])) + str(v['c4']) + ' | ' + ' ' * (20 - len(v['c5'])) + str(v['c5']) + ' | ' + ' ' * (23 - len(v['c6'])) + str(v['c6']))
logSbcHander.info('GET COUNTER SUCCESSFULLY.')
def checkCmd(self):
if(self.line.split()[0].strip() not in self.avaiableSetCMDs):
return False
return True
def addChannel(self):
lineSplit = self.line.split()
nodeInfoInstance = SbcNodeInfo(self.xmlPath)
node = nodeInfoInstance.getNodeInfoMap()
if(14 != len(lineSplit)):
msg = 'OPERATION FAILED DUE TO COMMAND LENGTH IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if(lineSplit[2] != node['nodeParas']['froID']):
msg = 'OPERATION FAILED DUE TO FROID MISMATCH.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
isChannelIDValid = True
try:
newChannelID = int(lineSplit[3])
except Exception as e:
isChannelIDValid = False
if(newChannelID < 0 or newChannelID > 63):
isChannelIDValid = False
if(node['channels'].has_key(lineSplit[3]) or (not isChannelIDValid) or len(node['channels']) > 7):
msg = 'OPERATION FAILED DUE TO CHANNEL ID EXIST OR NOT IN [0, 63] OR ALREADY 8 CHANNELS.'
self.returnStr.append(msg)
logSbcHander.error(msg)
elif('CONFIGURED' != node['nodeParas']['state']):
msg = 'OPERATION FAILED DUE TO CANNOT ADD CHANNEL WHEN INTERFACE IS CLOSED .'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if('CONFIGURED' != node['nodeParas']['state']):
msg = 'OPERATION FAILED DUE TO IINTERFACE NOT UP.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
cmds = {}
for index in range(4, 14, 2):
cmds[lineSplit[index].lower().strip()[1:]] = lineSplit[index + 1].strip()
if(cmds.has_key('licid') and cmds.has_key('localip') and cmds.has_key('localport') and cmds.has_key('remoteip') and cmds.has_key('remoteport')):
pat = '^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
prog = re.compile(pat)
res0 = prog.match(cmds['localip'])
res1 = prog.match(cmds['remoteip'])
if(res0 and res1):
try:
lPort = int(cmds['localport'])
rPort = int(cmds['remoteport'])
except Exception as e:
#Local port or Remote port is not number.
msg = 'OPERATION FAILED DUE TO PORT IS NOT NUMBER.'
self.returnStr.append(msg)
logSbcHander.error(msg)
if(-1 < lPort < 65536 and -1 < rPort < 65536):
#Can check if IP already in other channels, will not check in this version.
try:
newChannel = ElementTree.Element('channel')
channelId = ElementTree.SubElement(newChannel, 'channelId')
channelId.text = lineSplit[3]
channelId.tail = "\n\t\t"
licid = ElementTree.SubElement(newChannel, 'licId')
licid.text = cmds['licid']
licid.tail = "\n\t\t"
localIp = ElementTree.SubElement(newChannel, 'localIp')
localIp.text = cmds['localip']
localIp.tail = "\n\t\t"
localTcpPort = ElementTree.SubElement(newChannel, 'localTcpPort')
localTcpPort.text = cmds['localport']
localTcpPort.tail = "\n\t\t"
remoteIp = ElementTree.SubElement(newChannel, 'remoteIp')
remoteIp.text = cmds['remoteip']
remoteIp.tail = "\n\t\t"
remoteTcpPort = ElementTree.SubElement(newChannel, 'remoteTcpPort')
remoteTcpPort.text = cmds['remoteport']
remoteTcpPort.tail = "\n\t\t"
c1 = ElementTree.SubElement(newChannel, 'c1')
c1.text = '0'
c1.tail = "\n\t\t"
c2 = ElementTree.SubElement(newChannel, 'c2')
c2.text = '0'
c2.tail = "\n\t\t"
c3 = ElementTree.SubElement(newChannel, 'c3')
c3.text = '0'
c3.tail = "\n\t\t"
c4 = ElementTree.SubElement(newChannel, 'c4')
c4.text = '0'
c4.tail = "\n\t\t"
c5 = ElementTree.SubElement(newChannel, 'c5')
c5.text = '0'
c5.tail = "\n\t\t"
c6 = ElementTree.SubElement(newChannel, 'c6')
c6.text = '0'
c6.tail = "\n\t\t"
newChannel.tail = "\n\t\t"
newChannel.text = "\n\t\t"
self.er.getroot().append(newChannel)
self.writeBack2XmlFile()
except Exception as e:
msg = ('OPERATION FAILED DUE TO set to xml file failed, ERROR: ' + str(e)).upper()
self.returnStr.append(msg)
logSbcHander.error(msg)
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 0')
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 1')
logSbcHander.info('ADD CHANNEL SUCCESSFULLY.')
else:
#Local port or Remote port is invalid number.
msg = 'OPERATION FAILED DUE TO PORT IS INVALID NUMBER.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
#Local IP or Remote IP's format is wrong.
msg = 'OPERATION FAILED DUE TO IP FORMAT IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
#command format is wrong.
msg = 'OPERATION FAILED DUE TO COMMAND FORMAT IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
def removeChannel(self):
lineSplit = self.line.split()
nodeInfoInstance = SbcNodeInfo(self.xmlPath)
node = nodeInfoInstance.getNodeInfoMap()
if(4 != len(lineSplit)):
msg = 'OPERATION FAILED DUE TO COMMAND LENGTH IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if(lineSplit[2] != node['nodeParas']['froID']):
msg = 'OPERATION FAILED DUE TO FROID MISMATCH.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if('CONFIGURED' != node['nodeParas']['state']):
msg = 'OPERATION FAILED DUE TO INTERFACE NOT UP.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if(node['channels'].has_key(lineSplit[3])):
try:
channels = self.er.findall('./channel')
for channel in channels:
if(channel.find('./channelId').text == lineSplit[3]):
self.er.getroot().remove(channel)
break
self.writeBack2XmlFile()
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 0')
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 1')
logSbcHander.info('R SUCCESSFULLY.')
except Exception as e:
msg = ('OPERATION FAILED DUE TO Write Back To XML File Failed, ERROR: ' + str(e)).upper()
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
msg = 'OPERATION FAILED DUE TO CHANNEL ID NOT EXIST.'
self.returnStr.append(msg)
logSbcHander.error(msg)
def liActive(self):
lineSplit = self.line.split()
nodeInfoInstance = SbcNodeInfo(self.xmlPath)
node = nodeInfoInstance.getNodeInfoMap()
if(18 != len(lineSplit)):
msg = 'OPERATION FAILED DUE TO COMMAND FORMAT IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if(lineSplit[1] != node['nodeParas']['froID']):
msg = 'OPERATION FAILED DUE TO FROID MISMATCH.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if('CONFIGURED' != node['nodeParas']['state']):
try:
self.er.find('./state').text = 'CONFIGURED'
self.writeBack2XmlFile()
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 0')
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 1')
logSbcHander.info('ACTIVE INTERFACE SUCCESSFULLY.')
except Exception as e:
msg = ('OPERATION FAILED DUE TO Write Back To XML File Failed, ERROR: ' + str(e)).upper()
self.returnStr.append(msg)
logSbcHander.error(msg)
def liDeactive(self):
lineSplit = self.line.split()
nodeInfoInstance = SbcNodeInfo(self.xmlPath)
node = nodeInfoInstance.getNodeInfoMap()
if(2 != len(lineSplit)):
msg = 'OPERATION FAILED DUE TO COMMAND FORMAT IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if(lineSplit[1] != node['nodeParas']['froID']):
msg = 'OPERATION FAILED DUE TO FROID MISMATCH.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if len(node['channels']) > 0:
msg = 'OPERATION FAILED DUE TO CANNOT CLOSE INTERFACE WHEN CHANELLS EXIST ON NODE.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if('NOT_CONFIGURED' != node['nodeParas']['state']):
try:
self.er.find('./state').text = 'NOT_CONFIGURED'
self.writeBack2XmlFile()
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 0')
self.returnStr.append('OPERATION SUCCESSFUL - VPP: 1')
logSbcHander.info('ACTIVE INTERFACE SUCCESSFULLY.')
except Exception as e:
msg = ('OPERATION FAILED DUE TO Write Back To XML File Failed, ERROR: ' + str(e)).upper()
self.returnStr.append(msg)
logSbcHander.error(msg)
def pmUpdate(self):
pass
'''
Ch_li> get_config 2
OPERATION SUCCESSFUL -
BGF: 2
VPP: 0 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-neId: BGF01
-nextHopIp: 10.166.89.97
-vlanId: 150
-portLocation: 0
-tos: 0
-pbits: 0
-checkTime: 30
VPP: 0 tcpChannelId: 0 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 0 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.100
-localTcpPort: 30000
-remoteIp: 10.185.127.90
-remoteTcpPort: 30001
VPP: 0 tcpChannelId: 1 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 1 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.100
-localTcpPort: 30001
-remoteIp: 10.185.127.90
-remoteTcpPort: 30001
VPP: 0 tcpChannelId: 2 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 2 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.100
-localTcpPort: 30002
-remoteIp: 10.185.127.90
-remoteTcpPort: 30001
VPP: 0 tcpChannelId: 3 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 3 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.100
-localTcpPort: 30003
-remoteIp: 10.185.127.90
-remoteTcpPort: 30001
VPP: 0 tcpChannelId: 4 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 4 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.100
-localTcpPort: 30004
-remoteIp: 10.185.127.90
-remoteTcpPort: 30001
VPP: 0 tcpChannelId: 5 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 5 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.100
-localTcpPort: 30005
-remoteIp: 10.185.127.90
-remoteTcpPort: 30001
VPP: 0 tcpChannelId: 6 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 6 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.100
-localTcpPort: 30006
-remoteIp: 10.185.127.90
-remoteTcpPort: 30001
VPP: 0 tcpChannelId: 7 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>
VPP: 1 tcpChannelId: 7 config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>
-licId: LIC
-localIp: 10.166.89.101
-localTcpPort: 30001
-remoteIp: 10.185.127.91
-remoteTcpPort: 30001
No of TCP channels: 8
'''
def getConfig(self):
lineSplit = self.line.split()
nodeInfoInstance = SbcNodeInfo(self.xmlPath)
node = nodeInfoInstance.getNodeInfoMap()
if(2 != len(lineSplit)):
msg = 'OPERATION FAILED DUE TO COMMAND FORMAT IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if(lineSplit[1] != node['nodeParas']['froID']):
msg = 'OPERATION FAILED DUE TO FROID MISMATCH.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
self.returnStr.append('')
self.returnStr.append('OPERATION SUCCESSFUL -')
self.returnStr.append('')
self.returnStr.append('BGF: ' + node['nodeParas']['froID'])
self.returnStr.append('')
state = node['nodeParas']['state']
self.returnStr.append('VPP: 0 config: SET vpp state: ' + state +' lastReturnCode: 0 lastReturnMsg: <VPP: 0>')
self.returnStr.append('VPP: 1 config: SET vpp state: ' + state +' lastReturnCode: 0 lastReturnMsg: <VPP: 1>')
self.returnStr.append('\t-neId:\t\t' + node['nodeParas']['neId'])
self.returnStr.append('\t-nextHopIp:\t\t' + node['nodeParas']['nextHopIp'])
self.returnStr.append('\t-vlanId:\t\t' + node['nodeParas']['vlanId'])
self.returnStr.append('\t-portLocation:\t\t' + node['nodeParas']['portLocation'])
self.returnStr.append('\t-tos:\t\t' + node['nodeParas']['tos'])
self.returnStr.append('\t-pbits:\t\t' + node['nodeParas']['pbits'])
self.returnStr.append('\t-checkTime:\t\t' + node['nodeParas']['checkTime'])
self.returnStr.append('')
sortedKeys = sorted(node['channels'].keys())
for k in sortedKeys:
#for k, v in node['channels'].iteritems():
v = node['channels'][k]
self.returnStr.append('VPP: 0 tcpChannelId: ' + k + ' config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 0>')
self.returnStr.append('VPP: 1 tcpChannelId: ' + k + ' config: SET vpp state: CONFIGURED lastReturnCode: 0 lastReturnMsg: <VPP: 1>')
self.returnStr.append('\t-licId:\t\t' + v['licId'])
self.returnStr.append('\t-localIp:\t\t' + v['localIp'])
self.returnStr.append('\t-localTcpPort:\t\t' + v['localTcpPort'])
self.returnStr.append('\t-remoteIp:\t\t' + v['remoteIp'])
self.returnStr.append('\t-remoteTcpPort:\t\t' + v['remoteTcpPort'])
self.returnStr.append('')
self.returnStr.append('No of TCP channels: ' + str(len(node['channels'])))
self.returnStr.append('')
def getApplTrace(self):
global lock
lineSplit = self.line.split()
nodeInfoInstance = SbcNodeInfo(self.xmlPath)
node = nodeInfoInstance.getNodeInfoMap()
if(2 != len(lineSplit)):
msg = 'OPERATION FAILED DUE TO COMMAND FORMAT IS WRONG.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
if(lineSplit[1] != node['nodeParas']['froID']):
msg = 'OPERATION FAILED DUE TO FROID MISMATCH.'
self.returnStr.append(msg)
logSbcHander.error(msg)
else:
try:
lock.acquire()
f = open(self.logPath, 'r')
lines = f.readlines()
f.close()
lock.release()
self.returnStr = [k.strip() for k in lines]
logSbcHander.info('SBC GET LOG SUCCESSFULLY.')
except Exception as e:
msg = "OPERATION FAILED DUE TO ERROR: " + str(e).upper()
self.returnStr.append(msg)
logSbcHander.error(msg)
def writeBack2XmlFile(self):
global version, lock
lock.acquire()
if('2.7' == version):
self.er.write(self.xmlPath, encoding='utf-8', xml_declaration=True, method='xml')
else:
self.er.write(self.xmlPath, encoding='utf-8')
lock.release()
def getActionResult(self):
encodedStr = [k.encode('utf-8') for k in self.returnStr]
return encodedStr
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.jvm.ivy_utils import IvyArtifact as IvyUtilArtifact
from pants.backend.jvm.ivy_utils import IvyInfo, IvyModule, IvyModuleRef
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_dependency import IvyArtifact, JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.ivy_resolve import IvyResolve
from pants.base.cache_manager import VersionedTargetSet
from pants.util.contextutil import temporary_dir
from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
class IvyResolveTest(JvmToolTaskTestBase):
"""Tests for the class IvyResolve."""
@classmethod
def task_type(cls):
return IvyResolve
def setUp(self):
super(IvyResolveTest, self).setUp()
self.set_options(use_nailgun=False)
self.set_options_for_scope('cache.{}'.format(self.options_scope),
read_from=None,
write_to=None)
def resolve(self, targets):
"""Given some targets, execute a resolve, and return the resulting compile_classpath."""
context = self.context(target_roots=targets)
self.create_task(context, 'unused').execute()
return context.products.get_data('compile_classpath', None)
#
# Test section
#
def test_resolve_specific(self):
# Create a jar_library with a single dep, and another library with no deps.
dep = JarDependency('commons-lang', 'commons-lang', '2.5')
jar_lib = self.make_target('//:a', JarLibrary, jars=[dep])
scala_lib = self.make_target('//:b', ScalaLibrary)
# Confirm that the deps were added to the appropriate targets.
compile_classpath = self.resolve([jar_lib, scala_lib])
self.assertEquals(1, len(compile_classpath.get_for_target(jar_lib)))
self.assertEquals(0, len(compile_classpath.get_for_target(scala_lib)))
def test_resolve_conflicted(self):
# Create jar_libraries with different versions of the same dep: this will cause
# a pre-ivy "eviction" in IvyUtils.generate_ivy, but the same case can be triggered
# due to an ivy eviction where the declared version loses to a transitive version.
losing_dep = JarDependency('com.google.guava', 'guava', '16.0',
artifacts=[IvyArtifact('guava16.0', classifier='default')])
winning_dep = JarDependency('com.google.guava', 'guava', '16.0.1',
artifacts=[IvyArtifact('guava16.0.1', classifier='default')])
losing_lib = self.make_target('//:a', JarLibrary, jars=[losing_dep])
winning_lib = self.make_target('//:b', JarLibrary, jars=[winning_dep])
# Confirm that the same artifact was added to each target.
context = self.context(target_roots=[losing_lib, winning_lib])
def artifact_path(name):
return os.path.join(self.build_root, 'ivy_artifact', name)
symlink_map = {artifact_path('bogus0'): artifact_path('bogus0'),
artifact_path('bogus1'): artifact_path('bogus1'),
artifact_path('unused'): artifact_path('unused')}
context.products.safe_create_data('ivy_resolve_symlink_map', lambda: symlink_map)
task = self.create_task(context, 'unused')
def mock_ivy_resolve(targets, *args, **kw):
if targets:
cache_manager = task.create_cache_manager(False)
vts = VersionedTargetSet(cache_manager, cache_manager.wrap_targets(targets))
cache_key = vts.cache_key.hash
else:
cache_key = None
return ([], cache_key)
task.ivy_resolve = mock_ivy_resolve
def mock_generate_ivy_jar_products(cache_key_ignored):
ivy_products = defaultdict(list)
ivy_info = IvyInfo()
# Guava 16.0 would be evicted by Guava 16.0.1. But in a real
# resolve, it's possible that before it was evicted, it would
# generate some resolution data.
artifact_1 = IvyUtilArtifact(artifact_path('bogus0'), 'default')
unused_artifact = IvyUtilArtifact(artifact_path('unused'), 'default')
# Because guava 16.0 was evicted, it has no artifacts
guava_0 = IvyModule(IvyModuleRef('com.google.guava', 'guava', '16.0'),
[], [])
guava_1 = IvyModule(IvyModuleRef('com.google.guava', 'guava', '16.0.1'),
[artifact_1], [])
ivy_info.add_module(guava_0)
ivy_info.add_module(guava_1)
artifact_dep_1 = IvyUtilArtifact(artifact_path('bogus1'), 'default')
# Because fake#dep 16.0 was evicted before it was resolved,
# its deps are never examined, so we don't call add_module.
guava_dep_0 = IvyModule(IvyModuleRef('com.google.fake', 'dep', '16.0.0'),
[], [guava_0.ref])
guava_dep_1 = IvyModule(IvyModuleRef('com.google.fake', 'dep', '16.0.1'),
[artifact_dep_1], [guava_1.ref])
ivy_info.add_module(guava_dep_0)
ivy_info.add_module(guava_dep_1)
# Add an unrelated module to ensure that it's not returned
unrelated_parent = IvyModuleRef('com.google.other', 'parent', '1.0')
unrelated = IvyModule(IvyModuleRef('com.google.unrelated', 'unrelated', '1.0'),
[unused_artifact], [unrelated_parent])
ivy_info.add_module(unrelated)
ivy_products['default'] = [ivy_info]
return ivy_products
task._generate_ivy_jar_products = mock_generate_ivy_jar_products
task.execute()
compile_classpath = context.products.get_data('compile_classpath', None)
losing_cp = compile_classpath.get_for_target(losing_lib)
winning_cp = compile_classpath.get_for_target(winning_lib)
self.assertEquals(losing_cp, winning_cp)
self.assertEquals(OrderedSet([(u'default', artifact_path(u'bogus0')),
(u'default', artifact_path(u'bogus1'))]),
winning_cp)
def test_resolve_multiple_artifacts1(self):
no_classifier = JarDependency('junit', 'junit', rev='4.12')
classifier_and_no_classifier = JarDependency('junit', 'junit', rev='4.12', classifier='sources',
artifacts=[IvyArtifact('junit')])
no_classifier_lib = self.make_target('//:a', JarLibrary, jars=[no_classifier])
classifier_and_no_classifier_lib = self.make_target('//:b', JarLibrary, jars=[classifier_and_no_classifier])
compile_classpath = self.resolve([no_classifier_lib, classifier_and_no_classifier_lib])
no_classifier_cp = compile_classpath.get_for_target(no_classifier_lib)
classifier_and_no_classifier_cp = compile_classpath.get_for_target(classifier_and_no_classifier_lib)
sources_jar = 'junit-4.12-sources.jar'
regular_jar = 'junit-4.12.jar'
self.assertIn(sources_jar, (os.path.basename(j[-1]) for j in classifier_and_no_classifier_cp))
self.assertIn(regular_jar, (os.path.basename(j[-1]) for j in classifier_and_no_classifier_cp))
self.assertNotIn(sources_jar, (os.path.basename(j[-1]) for j in no_classifier_cp))
self.assertIn(regular_jar, (os.path.basename(j[-1]) for j in no_classifier_cp))
def test_resolve_multiple_artifacts2(self):
no_classifier2 = JarDependency('org.apache.avro', 'avro', rev='1.7.7')
classifier = JarDependency('org.apache.avro', 'avro', rev='1.7.7', classifier='tests')
lib = self.make_target('//:c', JarLibrary, jars=[no_classifier2, classifier])
compile_classpath = self.resolve([lib])
cp = compile_classpath.get_for_target(lib)
tests_jar = 'avro-1.7.7-tests.jar'
regular_jar = 'avro-1.7.7.jar'
self.assertIn(tests_jar, list((os.path.basename(j[-1]) for j in cp)))
self.assertIn(regular_jar, list((os.path.basename(j[-1]) for j in cp)))
# TODO(Eric Ayers): I can't replicate the test in test_resolve_multiple_artifacts1
# probably because the previous example creates a unique key for the jar_dependency for //:b
# with a classifier.
def test_excludes_in_java_lib_excludes_all_from_jar_lib(self):
junit_dep = JarDependency('junit', 'junit', rev='4.12')
junit_jar_lib = self.make_target('//:a', JarLibrary, jars=[junit_dep])
excluding_target = self.make_target('//:b', JavaLibrary, excludes=[Exclude('junit', 'junit')])
compile_classpath = self.resolve([junit_jar_lib, excluding_target])
junit_jar_cp = compile_classpath.get_for_target(junit_jar_lib)
excluding_cp = compile_classpath.get_for_target(excluding_target)
self.assertEquals(0, len(junit_jar_cp))
self.assertEquals(0, len(excluding_cp))
def test_mapjars_excludes_excludes_all_in_jar_dependencies_even_with_soft_excludes(self):
junit_dep = JarDependency('junit', 'junit', rev='4.12')
junit_jar_lib = self.make_target('//:junit_lib', JarLibrary, jars=[junit_dep])
excluding_target = self.make_target('//:excluding_bin', JvmBinary, dependencies=[junit_jar_lib],
excludes=[Exclude('junit', 'junit')],
configurations=['default'])
self.set_options(soft_excludes=True)
context = self.context(target_roots=[junit_jar_lib, excluding_target])
context.products.require('jar_dependencies', predicate=lambda t: isinstance(t, JvmBinary))
with temporary_dir() as workdir:
self.create_task(context, workdir).execute()
jardepmap = context.products.get('jar_dependencies')
self.assertTrue(jardepmap.empty(), 'jardepmap')
def test_resolve_no_deps(self):
# Resolve a library with no deps, and confirm that the empty product is created.
target = self.make_target('//:a', ScalaLibrary)
self.assertTrue(self.resolve([target]))
def test_resolve_symlinked_cache(self):
"""Test to make sure resolve works when --ivy-cache-dir is a symlinked path.
When ivy returns the path to a resolved jar file, it might be the realpath to the jar file,
not the symlink'ed path we are expecting for --ivy-cache-dir. Make sure that resolve correctly
recognizes these as belonging in the cache dir and lookups for either the symlinked cache
dir or the realpath to the cache dir are recognized.
"""
with temporary_dir() as realcachedir:
with temporary_dir() as symlinkdir:
symlink_cache_dir = os.path.join(symlinkdir, 'symlinkedcache')
os.symlink(realcachedir, symlink_cache_dir)
self.set_options_for_scope('ivy', cache_dir=symlink_cache_dir)
dep = JarDependency('commons-lang', 'commons-lang', '2.5')
jar_lib = self.make_target('//:a', JarLibrary, jars=[dep])
# Confirm that the deps were added to the appropriate targets.
compile_classpath = self.resolve([jar_lib])
self.assertEquals(1, len(compile_classpath.get_for_target(jar_lib)))
| |
import theano
import copy
from theano import Op
from theano.gof import local_optimizer
from theano.sandbox.cuda import cuda_available, GpuOp
from theano.sandbox.cuda.basic_ops import GpuFlatten
from theano.tensor.extra_ops import CumsumOp
if cuda_available:
from theano.sandbox.cuda import CudaNdarrayType
from theano.sandbox.cuda.basic_ops import host_from_gpu, gpu_from_host, HostFromGpu
from theano.sandbox.cuda.opt import register_opt as register_gpu_opt
class GpuCumsum(CumsumOp, GpuOp):
"""
Parameters
----------
axis
Can not be None. If you want the array flatten, do it before.
"""
SUPPORTED_NDIMS = 3
__props__ = ('axis', 'max_threads_dim0', 'max_grid_size1', 'max_grid_size2')
def __init__(self, axis):
self.axis = axis
self.max_threads_dim0 = None
self.max_grid_size1 = None
self.max_grid_size2 = None
# We must reuse the same method, not reimplement and call it.
# Otherwise DebugMode will print many warnings.
perform = Op.perform
def make_node(self, x):
assert x.dtype == 'float32'
if not isinstance(x.type, CudaNdarrayType):
raise TypeError('x must be a CudaNdarrayType', x)
if x.ndim > GpuCumsum.SUPPORTED_NDIMS:
raise NotImplementedError('Only cumsum on 1D, 2D and 3D array are supported right now!')
if self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError('axis(={1}) out of bounds'.format(self.axis))
return theano.Apply(self, [x], [x.type()])
def make_thunk(self, node, storage_map, compute_map, no_recycling):
node_ = copy.copy(node)
assert node.op is node_.op
if node_.op.max_threads_dim0 is None or node_.op.max_grid_size1 is None or node_.op.max_grid_size2 is None:
cuda = theano.sandbox.cuda
device_id = cuda.use.device_number
if device_id is None:
cuda.use("gpu",
force=False,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False,
test_driver=True)
device_id = cuda.use.device_number
cuda_ndarray = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray
prop = cuda_ndarray.device_properties(device_id)
node_.op.max_threads_dim0 = prop['maxThreadsDim0']
node_.op.max_grid_size1 = prop['maxGridSize1']
node_.op.max_grid_size2 = prop['maxGridSize2']
return super(GpuCumsum, node_.op).make_thunk(node_, storage_map,
compute_map, no_recycling)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.axis)
def c_code_cache_version(self):
return (9,)
def c_support_code_apply(self, node, nodename):
return """
__device__
void k_reductionPhase_%(nodename)s(float* partialCumSum) {
// Traverse down from leaves to root building partial sums at internal nodes in the tree.
for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) {
__syncthreads();
unsigned int index = (threadIdx.x + 1) * (stride * 2) - 1;
if(index < blockDim.x*2) {
partialCumSum[index] += partialCumSum[index - stride];
}
}
}
__device__
void k_reversePhase_%(nodename)s(float* partialCumSum) {
// Traverse back up the tree building the scan from the partial sums
for (unsigned int stride = exp2(ceil(log2((float)blockDim.x))); stride > 0; stride /= 2) {
__syncthreads();
unsigned int index = (threadIdx.x + 1) * (stride * 2) - 1;
if(index + stride < blockDim.x*2) {
partialCumSum[index + stride] += partialCumSum[index];
}
}
}
__device__
void k_fetchData_%(nodename)s(float* partialCumSum, float* input, int globalThreadID, dim3 dataStrides, int offsetY, int offsetZ) {
// blockIdx.y and blockIdx.z represents the current independent cumsum
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ;
int offset = idY * dataStrides.y + idZ * dataStrides.z;
int idx_even = (globalThreadID*2 ) * dataStrides.x + offset;
int idx_odd = (globalThreadID*2 + 1) * dataStrides.x + offset;
partialCumSum[threadIdx.x*2] = input[idx_even];
partialCumSum[threadIdx.x*2 + 1] = input[idx_odd];
}
__device__
void k_pushData_%(nodename)s(float* partialCumSum, float* output, int globalThreadID, dim3 dataStrides, int offsetY, int offsetZ) {
__syncthreads();
// blockIdx.y and blockIdx.z represents the current independent cumsum
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ;
int offset = idY * dataStrides.y + idZ * dataStrides.z;
int idx_even = (globalThreadID*2 ) * dataStrides.x + offset;
int idx_odd = (globalThreadID*2 + 1) * dataStrides.x + offset;
output[idx_even] = partialCumSum[threadIdx.x*2];
output[idx_odd] = partialCumSum[threadIdx.x*2 + 1];
}
__global__
void k_cumadd_%(nodename)s(float* input, float* output, dim3 inputStrides, dim3 outputStrides, int offsetY, int offsetZ, int beforeLastElementIdx, int lastElementIdx) {
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ;
int dataOffsetY_input = idY * inputStrides.y + idZ * inputStrides.z;
int dataOffsetY_output = idY * outputStrides.y + idZ * outputStrides.z;
int idx_last_input = lastElementIdx*inputStrides.x + dataOffsetY_input;
int idx_last_output = lastElementIdx*outputStrides.x + dataOffsetY_output;
int idx_beforelast = beforeLastElementIdx*outputStrides.x + dataOffsetY_output;
output[idx_last_output] = input[idx_last_input] + output[idx_beforelast];
}
__global__
void k_finalCumSum_%(nodename)s(float* output, float* blockSum, int nbElementsPerCumsum, dim3 dataStrides, int offsetY, int offsetZ) {
int globalThreadID = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
// Check if current has data to process.
if (globalThreadID >= ceil(nbElementsPerCumsum/2.0)) {
return;
}
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ;
const float currentBlockSum = blockSum[blockIdx.x*(gridDim.y*gridDim.z) + idY*gridDim.z + idZ];
int offset = idY * dataStrides.y + idZ * dataStrides.z;
int idx_even = (globalThreadID*2 ) * dataStrides.x + offset;
int idx_odd = (globalThreadID*2 + 1) * dataStrides.x + offset;
output[idx_even] += currentBlockSum;
output[idx_odd] += currentBlockSum;
}
__global__
void k_blockCumSum_%(nodename)s(float* input, float* output, int nbElementsPerCumsum, dim3 inputStrides, dim3 outputStrides, int offsetY, int offsetZ, float* blockSum) {
// Regarding blockIdx and threadIdx, 'Cumsum' is always performed along the X axis.
// The Y and Z axis of the grid will contain all independent cumsums of the 2D/3D case.
int globalThreadID = blockIdx.x * blockDim.x + threadIdx.x;
// Check if current thread has data to process.
if (globalThreadID >= ceil(nbElementsPerCumsum/2.0)) {
return;
}
extern __shared__ float partialCumSum[];
// Load data in shared memory
k_fetchData_%(nodename)s(partialCumSum, input, globalThreadID, inputStrides, offsetY, offsetZ);
// Use a dichotomy approach to compute the cumsum (i.e. balanced binary tree).
// The tree is sweeped from the leaves to the root and from the root to the leaves.
// Similar to http://www.umiacs.umd.edu/~ramani/cmsc828e_gpusci/ScanTalk.pdf
k_reductionPhase_%(nodename)s(partialCumSum);
k_reversePhase_%(nodename)s(partialCumSum);
// Write the final output to global memory
k_pushData_%(nodename)s(partialCumSum, output, globalThreadID, outputStrides, offsetY, offsetZ);
if (blockSum != NULL){
if (threadIdx.x == blockDim.x - 1) {
blockSum[blockIdx.x*(gridDim.y*gridDim.z) + (blockIdx.y + offsetY)*gridDim.z + blockIdx.z + offsetZ] = partialCumSum[threadIdx.x*2 + 1];
}
}
}
int cumSum_%(nodename)s(CudaNdarray* input, CudaNdarray* output, int axis, int maxThreads, int maxGridY, int maxGridZ) {
int shape[3] = { 1, 1, 1 };
dim3 inputStrides(0, 0, 0);
dim3 outputStrides(0, 0, 0);
switch (CudaNdarray_NDIM(input))
{
case 1:
shape[0] = CudaNdarray_HOST_DIMS(input)[0];
inputStrides.x = CudaNdarray_HOST_STRIDES(input)[0];
outputStrides.x = CudaNdarray_HOST_STRIDES(output)[0];
break;
case 2:
shape[0] = CudaNdarray_HOST_DIMS(input)[0];
shape[1] = CudaNdarray_HOST_DIMS(input)[1];
inputStrides.x = CudaNdarray_HOST_STRIDES(input)[0];
inputStrides.y = CudaNdarray_HOST_STRIDES(input)[1];
outputStrides.x = CudaNdarray_HOST_STRIDES(output)[0];
outputStrides.y = CudaNdarray_HOST_STRIDES(output)[1];
break;
case 3:
shape[0] = CudaNdarray_HOST_DIMS(input)[0];
shape[1] = CudaNdarray_HOST_DIMS(input)[1];
shape[2] = CudaNdarray_HOST_DIMS(input)[2];
inputStrides.x = CudaNdarray_HOST_STRIDES(input)[0];
inputStrides.y = CudaNdarray_HOST_STRIDES(input)[1];
inputStrides.z = CudaNdarray_HOST_STRIDES(input)[2];
outputStrides.x = CudaNdarray_HOST_STRIDES(output)[0];
outputStrides.y = CudaNdarray_HOST_STRIDES(output)[1];
outputStrides.z = CudaNdarray_HOST_STRIDES(output)[2];
break;
default:
return -1;
}
if (shape[axis] <= 1) {
CudaNdarray_CopyFromCudaNdarray(output, input);
return 0;
}
// Perform cumsum on array of even size.
int nbElementsPerCumsum = shape[axis] - (shape[axis] %% 2);
// Determine how many elements can be processed in one block.
int dimBlockX = ceil( min(nbElementsPerCumsum, 2*maxThreads) / 2.0);
// Determine how many blocks are needed in total.
int dimGridX = ceil(nbElementsPerCumsum / (2.0*dimBlockX)); // Nb. of blocks needed per cumsum.
int dimGridY; // Nb. of independent cumsums (width).
int dimGridZ; // Nb. of independent cumsums (height).
int tmp;
switch (axis)
{
case 0:
dimGridY = shape[1];
dimGridZ = shape[2];
break;
case 1:
dimGridY = shape[0];
dimGridZ = shape[2];
tmp = inputStrides.x;
inputStrides.x = inputStrides.y;
inputStrides.y = tmp;
tmp = outputStrides.x;
outputStrides.x = outputStrides.y;
outputStrides.y = tmp;
break;
case 2:
dimGridY = shape[1];
dimGridZ = shape[0];
tmp = inputStrides.x;
inputStrides.x = inputStrides.z;
inputStrides.z = tmp;
tmp = outputStrides.x;
outputStrides.x = outputStrides.z;
outputStrides.z = tmp;
break;
default:
return -1;
}
const int shapeBlockSum[2] = { dimGridX, dimGridY*dimGridZ };
CudaNdarray* deviceBlockSum = (CudaNdarray*) CudaNdarray_NewDims(2, shapeBlockSum);
// Perform `maxGridY`*`maxGridZ` cumsums in parallel.
for (int offsetY = 0; offsetY < dimGridY; offsetY += maxGridY){
int localDimGridY = min(dimGridY - offsetY, maxGridY);
for (int offsetZ = 0; offsetZ < dimGridZ; offsetZ += maxGridZ){
int localDimGridZ = min(dimGridZ - offsetZ, maxGridZ);
dim3 dimGrid(dimGridX, localDimGridY, localDimGridZ);
dim3 dimBlock(dimBlockX, 1, 1); // One cumsum per block.
int sharedBytes = (2*dimBlockX) * sizeof(float);
k_blockCumSum_%(nodename)s<<<dimGrid, dimBlock, sharedBytes>>>
(
CudaNdarray_DEV_DATA(input),
CudaNdarray_DEV_DATA(output),
nbElementsPerCumsum,
inputStrides,
outputStrides,
offsetY,
offsetZ,
CudaNdarray_DEV_DATA(deviceBlockSum)
);
if (dimGridX > 1) {
// Do a cumsum over the blockSum (recursive).
if (cumSum_%(nodename)s(deviceBlockSum, deviceBlockSum, 0, maxThreads, maxGridY, maxGridZ) == -1){
Py_DECREF(deviceBlockSum);
return -1;
}
// Since there are more than one block (i.e. `dimGridX > 1`)
// report partial cumsums of previous blocks to subsequents ones.
dim3 dimGrid(dimGridX, localDimGridY, localDimGridZ);
dim3 dimBlock(dimBlockX, 1, 1);
k_finalCumSum_%(nodename)s<<<dimGrid, dimBlock>>>
(
CudaNdarray_DEV_DATA(output),
CudaNdarray_DEV_DATA(deviceBlockSum),
nbElementsPerCumsum,
outputStrides,
offsetY,
offsetZ
);
}
// If shape[axis] is odd, the last element is compute manually
if (shape[axis] != nbElementsPerCumsum){
dim3 dimGrid(1, localDimGridY, localDimGridZ);
dim3 dimBlock(1, 1, 1);
k_cumadd_%(nodename)s<<<dimGrid, dimBlock>>>
(
CudaNdarray_DEV_DATA(input),
CudaNdarray_DEV_DATA(output),
inputStrides,
outputStrides,
offsetY,
offsetZ,
shape[axis]-2,
shape[axis]-1
);
}
}
}
Py_DECREF(deviceBlockSum);
CNDA_THREAD_SYNC;
return 0;
}
""" % locals()
def c_code(self, node, nodename, inames, onames, sub):
x, = inames
z, = onames
# We assume array has been already flattened if needed.
axis = self.axis if self.axis is not None else 0
fail = sub['fail']
max_threads_dim0 = self.max_threads_dim0
max_grid_size1 = self.max_grid_size1
max_grid_size2 = self.max_grid_size2
if max_threads_dim0 is None or max_grid_size1 is None or max_grid_size2 is None:
raise NotImplementedError("GpuCumsum.c_code should not be called "
"directly. It should be called by "
"make_thunk() that add some information "
"related to the selected GPU.")
code = """
const int* shape = CudaNdarray_HOST_DIMS(%(x)s);
bool needAllocation = !%(z)s || CudaNdarray_NDIM(%(x)s) != CudaNdarray_NDIM(%(z)s);
int axis = %(axis)s;
if (axis < 0) {
// Convert negative axis to positive axis.
axis += CudaNdarray_NDIM(%(x)s);
}
// If output is already allocated, check if its shape matches the input's one.
if (!needAllocation) {
for (int i= 0; i < CudaNdarray_NDIM(%(x)s); ++i) {
if (CudaNdarray_HOST_DIMS(%(x)s)[i] != CudaNdarray_HOST_DIMS(%(z)s)[i]) {
needAllocation = true;
}
}
}
if (needAllocation){
Py_XDECREF(%(z)s);
%(z)s = (CudaNdarray*) CudaNdarray_NewDims(CudaNdarray_NDIM(%(x)s), shape);
}
if (!%(z)s) {
%(fail)s;
}
{ // Namespace for kernel calls //
if (cumSum_%(nodename)s(%(x)s, %(z)s, axis, %(max_threads_dim0)s, %(max_grid_size1)s, %(max_grid_size2)s) == -1){
%(fail)s;
}
cudaError_t sts = cudaGetLastError();
if (cudaSuccess != sts)
{
PyErr_Format(PyExc_RuntimeError,
"Cuda error: %%s: %%s.\\n",
"cumSum_%(nodename)s",
cudaGetErrorString(sts));
%(fail)s;
}
}
""" % locals()
return code
def values_eq_approx_high_tol(a, b):
"""
This fct is needed to don't have DebugMode raise useless
error due to rounding error.
This happen with big input size due to change in the order of
operation.
"""
rtol = None
if a.size > 100000:
# For float32 the default rtol is 1e-5
rtol = 5e-5
return CudaNdarrayType.values_eq_approx(a, b, rtol=rtol)
@register_gpu_opt()
@local_optimizer([CumsumOp])
def use_gpu_cumsum(node):
if type(node.op) is CumsumOp \
and node.inputs[0].dtype == 'float32' \
and node.inputs[0].owner \
and isinstance(node.inputs[0].owner.op, HostFromGpu):
axis = node.op.axis
x = node.inputs[0]
if axis is not None and x.ndim > GpuCumsum.SUPPORTED_NDIMS:
return None
x = gpu_from_host(x)
if axis is None and x.ndim > 1:
x = GpuFlatten()(x)
# ``gpu_cumsum`` assume array has been flattened if needed.
if axis is None:
axis = 0
ret = host_from_gpu(GpuCumsum(axis)(x))
ret.values_eq_approx = values_eq_approx_high_tol
return [ret]
| |
import unittest
import textwrap
import antlr3
import testbase
class t042ast(testbase.ANTLRTest):
## def lexerClass(self, base):
## class TLexer(base):
## def reportError(self, re):
## # no error recovery yet, just crash!
## raise re
## return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def parse(self, text, method, rArgs=(), **kwargs):
self.compileGrammar() #options='-trace')
cStream = antlr3.StringStream(text)
self.lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(self.lexer)
self.parser = self.getParser(tStream)
for attr, val in kwargs.items():
setattr(self.parser, attr, val)
return getattr(self.parser, method)(*rArgs)
def testR1(self):
r = self.parse("1 + 2", 'r1')
self.assertEqual(
r.tree.toStringTree(),
'(+ 1 2)'
)
def testR2a(self):
r = self.parse("assert 2+3;", 'r2')
self.assertEqual(
r.tree.toStringTree(),
'(assert (+ 2 3))'
)
def testR2b(self):
r = self.parse("assert 2+3 : 5;", 'r2')
self.assertEqual(
r.tree.toStringTree(),
'(assert (+ 2 3) 5)'
)
def testR3a(self):
r = self.parse("if 1 fooze", 'r3')
self.assertEqual(
r.tree.toStringTree(),
'(if 1 fooze)'
)
def testR3b(self):
r = self.parse("if 1 fooze else fooze", 'r3')
self.assertEqual(
r.tree.toStringTree(),
'(if 1 fooze fooze)'
)
def testR4a(self):
r = self.parse("while 2 fooze", 'r4')
self.assertEqual(
r.tree.toStringTree(),
'(while 2 fooze)'
)
def testR5a(self):
r = self.parse("return;", 'r5')
self.assertEqual(
r.tree.toStringTree(),
'return'
)
def testR5b(self):
r = self.parse("return 2+3;", 'r5')
self.assertEqual(
r.tree.toStringTree(),
'(return (+ 2 3))'
)
def testR6a(self):
r = self.parse("3", 'r6')
self.assertEqual(
r.tree.toStringTree(),
'3'
)
def testR6b(self):
r = self.parse("3 a", 'r6')
self.assertEqual(
r.tree.toStringTree(),
'3 a'
)
def testR7(self):
r = self.parse("3", 'r7')
self.assertIsNone(r.tree)
def testR8(self):
r = self.parse("var foo:bool", 'r8')
self.assertEqual(
r.tree.toStringTree(),
'(var bool foo)'
)
def testR9(self):
r = self.parse("int foo;", 'r9')
self.assertEqual(
r.tree.toStringTree(),
'(VARDEF int foo)'
)
def testR10(self):
r = self.parse("10", 'r10')
self.assertEqual(
r.tree.toStringTree(),
'10.0'
)
def testR11a(self):
r = self.parse("1+2", 'r11')
self.assertEqual(
r.tree.toStringTree(),
'(EXPR (+ 1 2))'
)
def testR11b(self):
r = self.parse("", 'r11')
self.assertEqual(
r.tree.toStringTree(),
'EXPR'
)
def testR12a(self):
r = self.parse("foo", 'r12')
self.assertEqual(
r.tree.toStringTree(),
'foo'
)
def testR12b(self):
r = self.parse("foo, bar, gnurz", 'r12')
self.assertEqual(
r.tree.toStringTree(),
'foo bar gnurz'
)
def testR13a(self):
r = self.parse("int foo;", 'r13')
self.assertEqual(
r.tree.toStringTree(),
'(int foo)'
)
def testR13b(self):
r = self.parse("bool foo, bar, gnurz;", 'r13')
self.assertEqual(
r.tree.toStringTree(),
'(bool foo bar gnurz)'
)
def testR14a(self):
r = self.parse("1+2 int", 'r14')
self.assertEqual(
r.tree.toStringTree(),
'(EXPR (+ 1 2) int)'
)
def testR14b(self):
r = self.parse("1+2 int bool", 'r14')
self.assertEqual(
r.tree.toStringTree(),
'(EXPR (+ 1 2) int bool)'
)
def testR14c(self):
r = self.parse("int bool", 'r14')
self.assertEqual(
r.tree.toStringTree(),
'(EXPR int bool)'
)
def testR14d(self):
r = self.parse("fooze fooze int bool", 'r14')
self.assertEqual(
r.tree.toStringTree(),
'(EXPR fooze fooze int bool)'
)
def testR14e(self):
r = self.parse("7+9 fooze fooze int bool", 'r14')
self.assertEqual(
r.tree.toStringTree(),
'(EXPR (+ 7 9) fooze fooze int bool)'
)
def testR15(self):
r = self.parse("7", 'r15')
self.assertEqual(
r.tree.toStringTree(),
'7 7'
)
def testR16a(self):
r = self.parse("int foo", 'r16')
self.assertEqual(
r.tree.toStringTree(),
'(int foo)'
)
def testR16b(self):
r = self.parse("int foo, bar, gnurz", 'r16')
self.assertEqual(
r.tree.toStringTree(),
'(int foo) (int bar) (int gnurz)'
)
def testR17a(self):
r = self.parse("for ( fooze ; 1 + 2 ; fooze ) fooze", 'r17')
self.assertEqual(
r.tree.toStringTree(),
'(for fooze (+ 1 2) fooze fooze)'
)
def testR18a(self):
r = self.parse("for", 'r18')
self.assertEqual(
r.tree.toStringTree(),
'BLOCK'
)
def testR19a(self):
r = self.parse("for", 'r19')
self.assertEqual(
r.tree.toStringTree(),
'for'
)
def testR20a(self):
r = self.parse("for", 'r20')
self.assertEqual(
r.tree.toStringTree(),
'FOR'
)
def testR21a(self):
r = self.parse("for", 'r21')
self.assertEqual(
r.tree.toStringTree(),
'BLOCK'
)
def testR22a(self):
r = self.parse("for", 'r22')
self.assertEqual(
r.tree.toStringTree(),
'for'
)
def testR23a(self):
r = self.parse("for", 'r23')
self.assertEqual(
r.tree.toStringTree(),
'FOR'
)
def testR24a(self):
r = self.parse("fooze 1 + 2", 'r24')
self.assertEqual(
r.tree.toStringTree(),
'(fooze (+ 1 2))'
)
def testR25a(self):
r = self.parse("fooze, fooze2 1 + 2", 'r25')
self.assertEqual(
r.tree.toStringTree(),
'(fooze (+ 1 2))'
)
def testR26a(self):
r = self.parse("fooze, fooze2", 'r26')
self.assertEqual(
r.tree.toStringTree(),
'(BLOCK fooze fooze2)'
)
def testR27a(self):
r = self.parse("fooze 1 + 2", 'r27')
self.assertEqual(
r.tree.toStringTree(),
'(fooze (fooze (+ 1 2)))'
)
def testR28(self):
r = self.parse("foo28a", 'r28')
self.assertIsNone(r.tree)
def testR29(self):
self.assertRaises(RuntimeError, self.parse, "", 'r29')
# FIXME: broken upstream?
## def testR30(self):
## try:
## r = self.parse("fooze fooze", 'r30')
## self.fail(r.tree.toStringTree())
## except RuntimeError:
## pass
def testR31a(self):
r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=0)
self.assertEqual(
r.tree.toStringTree(),
'(VARDEF gnurz public int (+ 1 2))'
)
def testR31b(self):
r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=1)
self.assertEqual(
r.tree.toStringTree(),
'(VARIABLE gnurz public int (+ 1 2))'
)
def testR31c(self):
r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=2)
self.assertEqual(
r.tree.toStringTree(),
'(FIELD gnurz public int (+ 1 2))'
)
def testR32a(self):
r = self.parse("gnurz 32", 'r32', [1], flag=2)
self.assertEqual(
r.tree.toStringTree(),
'gnurz'
)
def testR32b(self):
r = self.parse("gnurz 32", 'r32', [2], flag=2)
self.assertEqual(
r.tree.toStringTree(),
'32'
)
def testR32c(self):
r = self.parse("gnurz 32", 'r32', [3], flag=2)
self.assertIsNone(r.tree)
def testR33a(self):
r = self.parse("public private fooze", 'r33')
self.assertEqual(
r.tree.toStringTree(),
'fooze'
)
def testR34a(self):
r = self.parse("public class gnurz { fooze fooze2 }", 'r34')
self.assertEqual(
r.tree.toStringTree(),
'(class gnurz public fooze fooze2)'
)
def testR34b(self):
r = self.parse("public class gnurz extends bool implements int, bool { fooze fooze2 }", 'r34')
self.assertEqual(
r.tree.toStringTree(),
'(class gnurz public (extends bool) (implements int bool) fooze fooze2)'
)
def testR35(self):
self.assertRaises(RuntimeError, self.parse, "{ extends }", 'r35')
def testR36a(self):
r = self.parse("if ( 1 + 2 ) fooze", 'r36')
self.assertEqual(
r.tree.toStringTree(),
'(if (EXPR (+ 1 2)) fooze)'
)
def testR36b(self):
r = self.parse("if ( 1 + 2 ) fooze else fooze2", 'r36')
self.assertEqual(
r.tree.toStringTree(),
'(if (EXPR (+ 1 2)) fooze fooze2)'
)
def testR37(self):
r = self.parse("1 + 2 + 3", 'r37')
self.assertEqual(
r.tree.toStringTree(),
'(+ (+ 1 2) 3)'
)
def testR38(self):
r = self.parse("1 + 2 + 3", 'r38')
self.assertEqual(
r.tree.toStringTree(),
'(+ (+ 1 2) 3)'
)
def testR39a(self):
r = self.parse("gnurz[1]", 'r39')
self.assertEqual(
r.tree.toStringTree(),
'(INDEX gnurz 1)'
)
def testR39b(self):
r = self.parse("gnurz(2)", 'r39')
self.assertEqual(
r.tree.toStringTree(),
'(CALL gnurz 2)'
)
def testR39c(self):
r = self.parse("gnurz.gnarz", 'r39')
self.assertEqual(
r.tree.toStringTree(),
'(FIELDACCESS gnurz gnarz)'
)
def testR39d(self):
r = self.parse("gnurz.gnarz.gnorz", 'r39')
self.assertEqual(
r.tree.toStringTree(),
'(FIELDACCESS (FIELDACCESS gnurz gnarz) gnorz)'
)
def testR40(self):
r = self.parse("1 + 2 + 3;", 'r40')
self.assertEqual(
r.tree.toStringTree(),
'(+ 1 2 3)'
)
def testR41(self):
r = self.parse("1 + 2 + 3;", 'r41')
self.assertEqual(
r.tree.toStringTree(),
'(3 (2 1))'
)
def testR42(self):
r = self.parse("gnurz, gnarz, gnorz", 'r42')
self.assertEqual(
r.tree.toStringTree(),
'gnurz gnarz gnorz'
)
def testR43(self):
r = self.parse("gnurz, gnarz, gnorz", 'r43')
self.assertIsNone(r.tree)
self.assertEqual(
r.res,
['gnurz', 'gnarz', 'gnorz']
)
def testR44(self):
r = self.parse("gnurz, gnarz, gnorz", 'r44')
self.assertEqual(
r.tree.toStringTree(),
'(gnorz (gnarz gnurz))'
)
def testR45(self):
r = self.parse("gnurz", 'r45')
self.assertEqual(
r.tree.toStringTree(),
'gnurz'
)
def testR46(self):
r = self.parse("gnurz, gnarz, gnorz", 'r46')
self.assertIsNone(r.tree)
self.assertEqual(
r.res,
['gnurz', 'gnarz', 'gnorz']
)
def testR47(self):
r = self.parse("gnurz, gnarz, gnorz", 'r47')
self.assertEqual(
r.tree.toStringTree(),
'gnurz gnarz gnorz'
)
def testR48(self):
r = self.parse("gnurz, gnarz, gnorz", 'r48')
self.assertEqual(
r.tree.toStringTree(),
'gnurz gnarz gnorz'
)
def testR49(self):
r = self.parse("gnurz gnorz", 'r49')
self.assertEqual(
r.tree.toStringTree(),
'(gnurz gnorz)'
)
def testR50(self):
r = self.parse("gnurz", 'r50')
self.assertEqual(
r.tree.toStringTree(),
'(1.0 gnurz)'
)
def testR51(self):
r = self.parse("gnurza gnurzb gnurzc", 'r51')
self.assertEqual(
r.res.toStringTree(),
'gnurzb'
)
def testR52(self):
r = self.parse("gnurz", 'r52')
self.assertEqual(
r.res.toStringTree(),
'gnurz'
)
def testR53(self):
r = self.parse("gnurz", 'r53')
self.assertEqual(
r.res.toStringTree(),
'gnurz'
)
def testR54(self):
r = self.parse("gnurza 1 + 2 gnurzb", 'r54')
self.assertEqual(
r.tree.toStringTree(),
'(+ 1 2)'
)
def testR55a(self):
r = self.parse("public private 1 + 2", 'r55')
self.assertEqual(
r.tree.toStringTree(),
'public private (+ 1 2)'
)
def testR55b(self):
r = self.parse("public fooze", 'r55')
self.assertEqual(
r.tree.toStringTree(),
'public fooze'
)
def testR56(self):
r = self.parse("a b c d", 'r56')
self.assertEqual(
r.tree.toStringTree(),
'foo'
)
def testR57(self):
r = self.parse("a b c d", 'r57')
self.assertEqual(
r.tree.toStringTree(),
'foo'
)
def testR59(self):
r = self.parse("a b c fooze", 'r59')
self.assertEqual(
r.tree.toStringTree(),
'(a fooze) (b fooze) (c fooze)'
)
if __name__ == '__main__':
unittest.main()
| |
from functools import partial
from django.core.exceptions import ValidationError
from cyder.base.tests import ModelTestMixin
from cyder.core.ctnr.models import Ctnr
from cyder.core.system.models import System
from cyder.cydns.tests.utils import create_reverse_domain, create_zone, DNSTest
from cyder.cydns.ip.utils import ip_to_reverse_name
from cyder.cydns.domain.models import Domain
from cyder.cydns.ptr.models import PTR
from cyder.cydns.ip.models import Ip
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.range.models import Range
from cyder.cydhcp.vrf.models import Vrf
class PTRTests(DNSTest, ModelTestMixin):
def setUp(self):
super(PTRTests, self).setUp()
Vrf.objects.create(name='test_vrf')
self._128 = create_zone('128.in-addr.arpa')
create_zone('8.ip6.arpa')
self.c1 = Ctnr.objects.create(name='test_ctnr1')
self.n = Network.objects.create(
vrf=Vrf.objects.get(name='test_vrf'), ip_type='4',
network_str='128.193.0.0/24')
self.r = Range.objects.create(
network=self.n, range_type='st', start_str='128.193.0.2',
end_str='128.193.0.100')
self.c1.ranges.add(self.r)
for name in ('edu', 'oregonstate.edu', 'bar.oregonstate.edu',
'nothing', 'nothing.nothing', 'nothing.nothing.nothing'):
d = Domain.objects.create(name=name)
self.c1.domains.add(d)
create_reverse_domain('8.6.2.0', ip_type='6')
self.osu_block = "8620:105:F000:"
self.create_network_range(
network_str="8620:105::/32", start_str='8620:105:F000::1',
end_str='8620:105:F000::1000', ip_type='6')
def create_network_range(self, network_str, start_str, end_str,
range_type="st", ip_type='4', domain=None):
if domain is None:
domain = Domain.objects.get(name="oregonstate.edu")
n = Network.objects.create(
vrf=Vrf.objects.get(name='test_vrf'), ip_type=ip_type,
network_str=network_str)
r = Range.objects.create(
network=n, range_type=range_type, start_str=start_str,
end_str=end_str, domain=domain, ip_type=ip_type)
self.c1.ranges.add(r)
def create_ptr(self, **kwargs):
kwargs.setdefault('ctnr', self.c1)
return PTR.objects.create(**kwargs)
@property
def objs(self):
"""Create objects for test_create_delete."""
return (
self.create_ptr(
ip_str='128.123.123.2', ip_type='4', fqdn='a.oregonstate.edu'),
self.create_ptr(
ip_str='128.123.123.45', ip_type='4',
fqdn='bbbbbbbbbbbbbb.nothing.nothing'),
self.create_ptr(
ip_str='128.123.123.197', ip_type='4',
fqdn='c-c-c-c-c-c.nothing'),
self.create_ptr(
ip_str='128.123.123.254', ip_type='4', fqdn='d1d.edu'),
)
def test_no_domain(self):
for fqdn in ('lol.foo', 'oregonstate.com', 'me.oregondfastate.edu'):
self.assertRaises(
ValidationError, self.create_ptr,
ip_str='244.123.123.123', ip_type='4', fqdn=fqdn)
def test_invalid_name(self):
ptr_v4 = self.create_ptr(
ip_str='128.123.123.99', ip_type='4', fqdn='foo.oregonstate.edu')
ptr_v6 = self.create_ptr(
ip_str=(self.osu_block + ':1'), ip_type='6',
fqdn='foo.oregonstate.edu')
bad_fqdns = (
'2134!@#$!@', 'asdflj..com', 'A' * 257, '.oregonstate.edu',
'%.s#.com')
for fqdn in bad_fqdns:
self.assertRaises(
ValidationError, self.create_ptr,
ip_str='128.123.123.123', ip_type='4', fqdn=fqdn)
self.assertRaises(
ValidationError, self.do_generic_update,
ptr_v4, fqdn=fqdn)
self.assertRaises(
ValidationError, self.create_ptr,
ip_str=(self.osu_block + ':2'), ip_type='6', fqdn=fqdn)
self.assertRaises(
ValidationError, self.do_generic_update,
ptr_v6, fqdn=fqdn)
def test_invalid_ip(self):
ptr_v4 = self.create_ptr(
ip_str='128.123.123.99', ip_type='4', fqdn='foo.oregonstate.edu')
bad_ipv4_ips = (
'123.123', 'asdfasdf', 32141243, '128.123.123.123.123', '....',
'1234.', None, False, True)
for ip_str in bad_ipv4_ips:
self.assertRaises(
ValidationError, self.create_ptr,
fqdn='oregonstate.edu', ip_str=ip_str, ip_type='4')
self.assertRaises(
ValidationError, self.do_generic_update,
ptr_v4, ip_str=ip_str)
ptr_v6 = self.create_ptr(
ip_str=(self.osu_block + ':1'), ip_type='6',
fqdn='foo.oregonstate.edu')
bad_ipv6_ips = (
'123.123.123.123.', '123:!23:!23:', ':::', None, True, False,
lambda x: x, '8::9:9:1', '11:9:9::1', '8.9.9.1', '11.9.9.1')
for ip_str in bad_ipv6_ips:
self.assertRaises(
ValidationError, self.create_ptr,
ip_str=ip_str, fqdn='oregonstate.edu', ip_type='6')
self.assertRaises(
ValidationError, self.do_generic_update,
ptr_v6, ip_str=ip_str)
def test_no_reverse_domain(self):
self.assertRaises(
ValidationError, self.create_ptr,
fqdn='oregonstate.edu', ip_str='8.9.9.1', ip_type='4')
self.assertRaises(
ValidationError, self.create_ptr,
fqdn='oregonstate.edu', ip_str='11.9.9.1', ip_type='4')
def do_generic_remove(self, ip_str, fqdn, ip_type):
ptr = PTR.objects.create(
ip_str=ip_str, fqdn=fqdn, ip_type=ip_type, ctnr=self.c1)
ptr.delete()
ip = Ip(ip_str=ip_str, ip_type=ip_type)
ip.clean_ip()
self.assertFalse(PTR.objects.filter(
fqdn=fqdn, ip_upper=ip.ip_upper, ip_lower=ip.ip_lower).exists())
def test_remove_ipv4(self):
self.create_network_range(
network_str='128.255.1.0/16', start_str='128.255.1.1',
end_str='128.255.233.254')
self.do_generic_remove(
ip_str='128.255.233.244', ip_type='4',
fqdn='asdf34foo.bar.oregonstate.edu')
self.do_generic_remove(
ip_str='128.255.11.13', ip_type='4',
fqdn='fo124kfasdfko.bar.oregonstate.edu')
self.do_generic_remove(
ip_str='128.255.9.1', ip_type='4',
fqdn='or1fdsaflkegonstate.edu')
self.do_generic_remove(
ip_str='128.255.1.7', ip_type='4',
fqdn='12.bar.oregonstate.edu')
self.do_generic_remove(
ip_str='128.255.1.3', ip_type='4',
fqdn='fcwoo.bar.oregonstate.edu')
self.do_generic_remove(
ip_str='128.255.1.2', ip_type='4',
fqdn='asffad124jfasf-oregonstate.edu')
def test_remove_ipv6(self):
self.do_generic_remove(
ip_str=(self.osu_block + ":1"), ip_type='6',
fqdn='asdf34foo.bar.oregonstate.edu')
self.do_generic_remove(
ip_str=(self.osu_block + ":2"), ip_type='6',
fqdn='fo124kfasdfko.bar.oregonstate.edu')
self.do_generic_remove(
ip_str=(self.osu_block + ":8"), ip_type='6',
fqdn='or1fdsaflkegonstate.edu')
self.do_generic_remove(
ip_str=(self.osu_block + ":8"), ip_type='6',
fqdn='12.bar.oregonstate.edu')
self.do_generic_remove(
ip_str=(self.osu_block + ":20"), ip_type='6',
fqdn='fcwoo.bar.oregonstate.edu')
self.do_generic_remove(
ip_str=(self.osu_block + ":ad"), ip_type='6',
fqdn='asffad124jfasf-oregonstate.edu')
def do_generic_update(self, ptr, fqdn=None, ip_str=None):
if fqdn is not None:
ptr.fqdn = fqdn
if ip_str is not None:
ptr.ip_str = ip_str
ptr.save()
db_ptr = PTR.objects.get(
fqdn=ptr.fqdn, ip_upper=ptr.ip_upper, ip_lower=ptr.ip_lower)
self.assertEqual(ptr.fqdn, db_ptr.fqdn)
self.assertEqual(ptr.ip_str, db_ptr.ip_str)
def test_update_ipv4(self):
self.create_network_range(
network_str='128.193.1.0/24', start_str='128.193.1.1',
end_str='128.193.1.100')
ptr = self.create_ptr(
ip_str='128.193.1.1', ip_type='4', fqdn='oregonstate.edu')
self.do_generic_update(ptr, fqdn='nothing.nothing.nothing')
self.do_generic_update(ptr, fqdn='google.edu')
self.do_generic_update(ptr, fqdn='bar.oregonstate.edu')
def test_update_ipv6(self):
ptr = self.create_ptr(
ip_str=(self.osu_block + ':1'), ip_type='6',
fqdn='oregonstate.edu')
self.do_generic_update(ptr, fqdn="nothing.nothing.nothing")
self.do_generic_update(ptr, fqdn="google.edu")
self.do_generic_update(ptr, fqdn="bar.oregonstate.edu")
def test_ctnr_range(self):
"""Test that a PTR is allowed only in its IP's range's containers"""
c2 = Ctnr.objects.create(name='test_ctnr2')
r = self.r
self.c1.ranges.add(r)
self.create_ptr(
fqdn='www1.oregonstate.edu', ip_str='128.193.0.2', ip_type='4',
ctnr=self.c1)
with self.assertRaises(ValidationError):
self.create_ptr(
fqdn='www2.oregonstate.edu', ip_str='128.193.0.3', ip_type='4',
ctnr=c2)
def test_target_existence(self):
"""Test that a PTR's target is not required to exist"""
self.create_ptr(
ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',
ip_type='4')
def test_domain_ctnr(self):
"""Test that a PTR's container is independent of its domain's container
"""
self.c1.domains.add(Domain.objects.get(name='oregonstate.edu'))
c2 = Ctnr.objects.create(name='test_ctnr2')
c2.ranges.add(self.r)
self.create_ptr(
ip_str='128.193.0.2', fqdn='foo1.oregonstate.edu',
ip_type='4', ctnr=self.c1)
self.create_ptr(
ip_str='128.193.0.3', fqdn='foo2.oregonstate.edu',
ip_type='4', ctnr=c2)
def test_target_resembles_ip(self):
"""Test that a PTR's target cannot resemble an IP address"""
for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):
with self.assertRaises(ValidationError):
self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,
ip_type='4')
def test_same_ip_as_static_intr(self):
"""Test that a PTR and a static inteface cannot share an IP
(It doesn't matter whether the static interface is enabled.)
"""
def create_si(dns_enabled):
s = System.objects.create(name='test_system')
return StaticInterface.objects.create(
mac='be:ef:fa:ce:12:34', label='foo1',
domain=Domain.objects.get(name='oregonstate.edu'),
ip_str='128.193.0.2', ip_type='4', system=s,
ctnr=self.c1, dns_enabled=dns_enabled)
create_si_enabled = partial(create_si, True)
create_si_enabled.name = "StaticInterface with DNS enabled"
create_si_disabled = partial(create_si, False)
create_si_disabled.name = "StaticInterface with DNS disabled"
def create_ptr():
return self.create_ptr(
ip_str='128.193.0.2', ip_type='4', fqdn='foo2.oregonstate.edu')
create_ptr.name = 'PTR'
self.assertObjectsConflict((create_si_enabled, create_ptr))
self.assertObjectsConflict((create_si_disabled, create_ptr))
def test_same_ip(self):
"""Test that two PTRs cannot have the same IP"""
self.create_ptr(
ip_str='128.193.0.2', ip_type='4', fqdn='foo1.oregonstate.edu')
with self.assertRaises(ValidationError):
self.create_ptr(
ip_str='128.193.0.2', ip_type='4', fqdn='foo2.oregonstate.edu')
def test_ptr_in_dynamic_range(self):
"""Test that the IP cannot be in a dynamic range"""
self.create_network_range(
network_str='128.193.1.0/24', start_str='128.193.1.2',
end_str='128.193.1.100', range_type='dy')
with self.assertRaises(ValidationError):
self.create_ptr(
ip_str='128.193.1.2', ip_type='4', fqdn='foo.oregonstate.edu')
| |
#!/usr/bin/env python3
# Copyright (c) 2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from io import StringIO
import json
import re
import sys
from pylint import lint
from pylint.reporters import text
ignore_codes = [
# Note(maoy): E1103 is error code related to partial type inference
"E1103"
]
ignore_messages = [
# Note(fengqian): this message is the pattern of [E0611].
# It should be ignored because use six module to keep py3.X compatibility.
"No name 'urllib' in module '_MovedItems'",
# Note(xyang): these error messages are for the code [E1101].
# They should be ignored because 'sha256' and 'sha224' are functions in
# 'hashlib'.
"Module 'hashlib' has no 'sha256' member",
"Module 'hashlib' has no 'sha224' member",
]
ignore_modules = ["os_brick/tests/",
"tools/lintstack.head.py"]
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def from_line(cls, line):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
line_content = cls._cached_content[lineno - 1].rstrip()
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""Converts pytlint message to a unique-error dictionary.
From the output of pylint msg, to a dict, where each key
is a unique error identifier, value is a list of LintOutput
"""
result = {}
for line in msg.splitlines():
obj = cls.from_line(line)
if obj.is_ignored():
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def is_ignored(self):
if self.code in ignore_codes:
return True
if any(self.filename.startswith(name) for name in ignore_modules):
return True
return False
def key(self):
if self.code in ["E1101", "E1103"]:
# These two types of errors are like Foo class has no member bar.
# We discard the source code so that the error will be ignored
# next time another Foo.bar is encountered.
return self.message, ""
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" %
{'filename': self.filename,
'lineno': self.lineno,
'line_content': self.line_content,
'code': self.code,
'message': self.message})
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
reporter = text.ParseableTextReporter(output=buff)
args = ["--include-ids=y", "-E", "os_brick"]
lint.Run(args, reporter=reporter, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print(err.lintoutput)
print()
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print("Please fix the errors above. If you believe they are false "
"positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()
| |
"""A base class session manager."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import uuid
try:
import sqlite3
except ImportError:
# fallback on pysqlite2 if Python was build without sqlite
from pysqlite2 import dbapi2 as sqlite3
from tornado import gen, web
from traitlets.config.configurable import LoggingConfigurable
from ipython_genutils.py3compat import unicode_type
from traitlets import Instance
from notebook.utils import maybe_future
class SessionManager(LoggingConfigurable):
kernel_manager = Instance('notebook.services.kernels.kernelmanager.MappingKernelManager')
contents_manager = Instance('notebook.services.contents.manager.ContentsManager')
# Session database initialized below
_cursor = None
_connection = None
_columns = {'session_id', 'path', 'name', 'type', 'kernel_id'}
@property
def cursor(self):
"""Start a cursor and create a database called 'session'"""
if self._cursor is None:
self._cursor = self.connection.cursor()
self._cursor.execute("""CREATE TABLE session
(session_id, path, name, type, kernel_id)""")
return self._cursor
@property
def connection(self):
"""Start a database connection"""
if self._connection is None:
self._connection = sqlite3.connect(':memory:')
self._connection.row_factory = sqlite3.Row
return self._connection
def close(self):
"""Close the sqlite connection"""
if self._cursor is not None:
self._cursor.close()
self._cursor = None
def __del__(self):
"""Close connection once SessionManager closes"""
self.close()
@gen.coroutine
def session_exists(self, path):
"""Check to see if the session of a given name exists"""
exists = False
self.cursor.execute("SELECT * FROM session WHERE path=?", (path,))
row = self.cursor.fetchone()
if row is not None:
# Note, although we found a row for the session, the associated kernel may have
# been culled or died unexpectedly. If that's the case, we should delete the
# row, thereby terminating the session. This can be done via a call to
# row_to_model that tolerates that condition. If row_to_model returns None,
# we'll return false, since, at that point, the session doesn't exist anyway.
model = yield maybe_future(self.row_to_model(row, tolerate_culled=True))
if model is not None:
exists = True
raise gen.Return(exists)
def new_session_id(self):
"Create a uuid for a new session"
return unicode_type(uuid.uuid4())
@gen.coroutine
def create_session(self, path=None, name=None, type=None, kernel_name=None, kernel_id=None):
"""Creates a session and returns its model"""
session_id = self.new_session_id()
if kernel_id is not None and kernel_id in self.kernel_manager:
pass
else:
kernel_id = yield self.start_kernel_for_session(session_id, path, name, type, kernel_name)
result = yield maybe_future(
self.save_session(session_id, path=path, name=name, type=type, kernel_id=kernel_id)
)
# py2-compat
raise gen.Return(result)
@gen.coroutine
def start_kernel_for_session(self, session_id, path, name, type, kernel_name):
"""Start a new kernel for a given session."""
# allow contents manager to specify kernels cwd
kernel_path = self.contents_manager.get_kernel_path(path=path)
kernel_id = yield maybe_future(
self.kernel_manager.start_kernel(path=kernel_path, kernel_name=kernel_name)
)
# py2-compat
raise gen.Return(kernel_id)
@gen.coroutine
def save_session(self, session_id, path=None, name=None, type=None, kernel_id=None):
"""Saves the items for the session with the given session_id
Given a session_id (and any other of the arguments), this method
creates a row in the sqlite session database that holds the information
for a session.
Parameters
----------
session_id : str
uuid for the session; this method must be given a session_id
path : str
the path for the given session
name: str
the name of the session
type: string
the type of the session
kernel_id : str
a uuid for the kernel associated with this session
Returns
-------
model : dict
a dictionary of the session model
"""
self.cursor.execute("INSERT INTO session VALUES (?,?,?,?,?)",
(session_id, path, name, type, kernel_id)
)
result = yield maybe_future(self.get_session(session_id=session_id))
raise gen.Return(result)
@gen.coroutine
def get_session(self, **kwargs):
"""Returns the model for a particular session.
Takes a keyword argument and searches for the value in the session
database, then returns the rest of the session's info.
Parameters
----------
**kwargs : keyword argument
must be given one of the keywords and values from the session database
(i.e. session_id, path, name, type, kernel_id)
Returns
-------
model : dict
returns a dictionary that includes all the information from the
session described by the kwarg.
"""
if not kwargs:
raise TypeError("must specify a column to query")
conditions = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r", column)
conditions.append("%s=?" % column)
query = "SELECT * FROM session WHERE %s" % (' AND '.join(conditions))
self.cursor.execute(query, list(kwargs.values()))
try:
row = self.cursor.fetchone()
except KeyError:
# The kernel is missing, so the session just got deleted.
row = None
if row is None:
q = []
for key, value in kwargs.items():
q.append("%s=%r" % (key, value))
raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))
model = yield maybe_future(self.row_to_model(row))
raise gen.Return(model)
@gen.coroutine
def update_session(self, session_id, **kwargs):
"""Updates the values in the session database.
Changes the values of the session with the given session_id
with the values from the keyword arguments.
Parameters
----------
session_id : str
a uuid that identifies a session in the sqlite3 database
**kwargs : str
the key must correspond to a column title in session database,
and the value replaces the current value in the session
with session_id.
"""
yield maybe_future(self.get_session(session_id=session_id))
if not kwargs:
# no changes
return
sets = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r" % column)
sets.append("%s=?" % column)
query = "UPDATE session SET %s WHERE session_id=?" % (', '.join(sets))
self.cursor.execute(query, list(kwargs.values()) + [session_id])
def kernel_culled(self, kernel_id):
"""Checks if the kernel is still considered alive and returns true if its not found. """
return kernel_id not in self.kernel_manager
@gen.coroutine
def row_to_model(self, row, tolerate_culled=False):
"""Takes sqlite database session row and turns it into a dictionary"""
kernel_culled = yield maybe_future(self.kernel_culled(row['kernel_id']))
if kernel_culled:
# The kernel was culled or died without deleting the session.
# We can't use delete_session here because that tries to find
# and shut down the kernel - so we'll delete the row directly.
#
# If caller wishes to tolerate culled kernels, log a warning
# and return None. Otherwise, raise KeyError with a similar
# message.
self.cursor.execute("DELETE FROM session WHERE session_id=?",
(row['session_id'],))
msg = "Kernel '{kernel_id}' appears to have been culled or died unexpectedly, " \
"invalidating session '{session_id}'. The session has been removed.".\
format(kernel_id=row['kernel_id'],session_id=row['session_id'])
if tolerate_culled:
self.log.warning(msg + " Continuing...")
raise gen.Return(None)
raise KeyError(msg)
kernel_model = yield maybe_future(self.kernel_manager.kernel_model(row['kernel_id']))
model = {
'id': row['session_id'],
'path': row['path'],
'name': row['name'],
'type': row['type'],
'kernel': kernel_model
}
if row['type'] == 'notebook':
# Provide the deprecated API.
model['notebook'] = {'path': row['path'], 'name': row['name']}
raise gen.Return(model)
@gen.coroutine
def list_sessions(self):
"""Returns a list of dictionaries containing all the information from
the session database"""
c = self.cursor.execute("SELECT * FROM session")
result = []
# We need to use fetchall() here, because row_to_model can delete rows,
# which messes up the cursor if we're iterating over rows.
for row in c.fetchall():
try:
model = yield maybe_future(self.row_to_model(row))
result.append(model)
except KeyError:
pass
raise gen.Return(result)
@gen.coroutine
def delete_session(self, session_id):
"""Deletes the row in the session database with given session_id"""
session = yield maybe_future(self.get_session(session_id=session_id))
yield maybe_future(self.kernel_manager.shutdown_kernel(session['kernel']['id']))
self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,))
| |
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Module implementing low-level socket communication with MySQL servers.
"""
from collections import deque
import socket
import struct
import sys
import zlib
try:
import ssl
except:
# If import fails, we don't have SSL support.
pass
from . import constants, errors
from .catch23 import PY2, init_bytearray, struct_unpack
def _strioerror(err):
"""Reformat the IOError error message
This function reformats the IOError error message.
"""
if not err.errno:
return str(err)
return '{errno} {strerr}'.format(errno=err.errno, strerr=err.strerror)
def _prepare_packets(buf, pktnr):
"""Prepare a packet for sending to the MySQL server"""
pkts = []
pllen = len(buf)
maxpktlen = constants.MAX_PACKET_LENGTH
while pllen > maxpktlen:
pkts.append(b'\xff\xff\xff' + struct.pack('<B', pktnr)
+ buf[:maxpktlen])
buf = buf[maxpktlen:]
pllen = len(buf)
pktnr = pktnr + 1
pkts.append(struct.pack('<I', pllen)[0:3]
+ struct.pack('<B', pktnr) + buf)
return pkts
class BaseMySQLSocket(object):
"""Base class for MySQL socket communication
This class should not be used directly but overloaded, changing the
at least the open_connection()-method. Examples of subclasses are
mysql.connector.network.MySQLTCPSocket
mysql.connector.network.MySQLUnixSocket
"""
def __init__(self):
self.sock = None # holds the socket connection
self._connection_timeout = None
self._packet_number = -1
self._packet_queue = deque()
self.recvsize = 8192
@property
def next_packet_number(self):
"""Increments the packet number"""
self._packet_number = self._packet_number + 1
if self._packet_number > 255:
self._packet_number = 0
return self._packet_number
def open_connection(self):
"""Open the socket"""
raise NotImplementedError
def get_address(self):
"""Get the location of the socket"""
raise NotImplementedError
def shutdown(self):
"""Shut down the socket before closing it"""
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
del self._packet_queue
except (socket.error, AttributeError):
pass
def close_connection(self):
"""Close the socket"""
try:
self.sock.close()
del self._packet_queue
except (socket.error, AttributeError):
pass
def send_plain(self, buf, packet_number=None):
"""Send packets to the MySQL server"""
if packet_number is None:
self.next_packet_number # pylint: disable=W0104
else:
self._packet_number = packet_number
packets = _prepare_packets(buf, self._packet_number)
for packet in packets:
try:
if PY2:
self.sock.sendall(buffer(packet)) # pylint: disable=E0602
else:
self.sock.sendall(packet)
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
except AttributeError:
raise errors.OperationalError(errno=2006)
send = send_plain
def send_compressed(self, buf, packet_number=None):
"""Send compressed packets to the MySQL server"""
if packet_number is None:
self.next_packet_number # pylint: disable=W0104
else:
self._packet_number = packet_number
pktnr = self._packet_number
pllen = len(buf)
zpkts = []
maxpktlen = constants.MAX_PACKET_LENGTH
if pllen > maxpktlen:
pkts = _prepare_packets(buf, pktnr)
if PY2:
tmpbuf = bytearray()
for pkt in pkts:
tmpbuf += pkt
tmpbuf = buffer(tmpbuf) # pylint: disable=E0602
else:
tmpbuf = b''.join(pkts)
del pkts
seqid = 0
zbuf = zlib.compress(tmpbuf[:16384])
header = (struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', seqid)
+ b'\x00\x40\x00')
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + zbuf)
tmpbuf = tmpbuf[16384:]
pllen = len(tmpbuf)
seqid = seqid + 1
while pllen > maxpktlen:
zbuf = zlib.compress(tmpbuf[:maxpktlen])
header = (struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', seqid)
+ b'\xff\xff\xff')
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + zbuf)
tmpbuf = tmpbuf[maxpktlen:]
pllen = len(tmpbuf)
seqid = seqid + 1
if tmpbuf:
zbuf = zlib.compress(tmpbuf)
header = (struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', seqid)
+ struct.pack('<I', pllen)[0:3])
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + zbuf)
del tmpbuf
else:
pkt = (struct.pack('<I', pllen)[0:3] +
struct.pack('<B', pktnr) + buf)
if PY2:
pkt = buffer(pkt) # pylint: disable=E0602
pllen = len(pkt)
if pllen > 50:
zbuf = zlib.compress(pkt)
zpkts.append(struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', 0)
+ struct.pack('<I', pllen)[0:3]
+ zbuf)
else:
header = (struct.pack('<I', pllen)[0:3]
+ struct.pack('<B', 0)
+ struct.pack('<I', 0)[0:3])
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + pkt)
for zip_packet in zpkts:
try:
self.sock.sendall(zip_packet)
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
except AttributeError:
raise errors.OperationalError(errno=2006)
def recv_plain(self):
"""Receive packets from the MySQL server"""
try:
# Read the header of the MySQL packet, 4 bytes
packet = bytearray(b'')
packet_len = 0
while packet_len < 4:
chunk = self.sock.recv(4 - packet_len)
if not chunk:
raise errors.InterfaceError(errno=2013)
packet += chunk
packet_len = len(packet)
# Save the packet number and payload length
self._packet_number = packet[3]
if PY2:
payload_len = struct.unpack_from(
"<I",
buffer(packet[0:3] + b'\x00'))[0] # pylint: disable=E0602
else:
payload_len = struct.unpack("<I", packet[0:3] + b'\x00')[0]
# Read the payload
rest = payload_len
packet.extend(bytearray(payload_len))
packet_view = memoryview(packet) # pylint: disable=E0602
packet_view = packet_view[4:]
while rest:
read = self.sock.recv_into(packet_view, rest)
if read == 0 and rest > 0:
raise errors.InterfaceError(errno=2013)
packet_view = packet_view[read:]
rest -= read
return packet
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
def recv_py26_plain(self):
"""Receive packets from the MySQL server"""
try:
# Read the header of the MySQL packet, 4 bytes
header = bytearray(b'')
header_len = 0
while header_len < 4:
chunk = self.sock.recv(4 - header_len)
if not chunk:
raise errors.InterfaceError(errno=2013)
header += chunk
header_len = len(header)
# Save the packet number and payload length
self._packet_number = header[3]
payload_len = struct_unpack("<I", header[0:3] + b'\x00')[0]
# Read the payload
rest = payload_len
payload = init_bytearray(b'')
while rest > 0:
chunk = self.sock.recv(rest)
if not chunk:
raise errors.InterfaceError(errno=2013)
payload += chunk
rest = payload_len - len(payload)
return header + payload
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
if sys.version_info[0:2] == (2, 6):
recv = recv_py26_plain
recv_plain = recv_py26_plain
else:
recv = recv_plain
def _split_zipped_payload(self, packet_bunch):
"""Split compressed payload"""
while packet_bunch:
payload_length = struct_unpack("<I",
packet_bunch[0:3] + b'\x00')[0]
self._packet_queue.append(packet_bunch[0:payload_length + 4])
packet_bunch = packet_bunch[payload_length + 4:]
def recv_compressed(self):
"""Receive compressed packets from the MySQL server"""
try:
return self._packet_queue.popleft()
except IndexError:
pass
header = bytearray(b'')
packets = []
try:
abyte = self.sock.recv(1)
while abyte and len(header) < 7:
header += abyte
abyte = self.sock.recv(1)
while header:
if len(header) < 7:
raise errors.InterfaceError(errno=2013)
zip_payload_length = struct_unpack("<I",
header[0:3] + b'\x00')[0]
payload_length = struct_unpack("<I", header[4:7] + b'\x00')[0]
zip_payload = init_bytearray(abyte)
while len(zip_payload) < zip_payload_length:
chunk = self.sock.recv(zip_payload_length
- len(zip_payload))
if len(chunk) == 0:
raise errors.InterfaceError(errno=2013)
zip_payload = zip_payload + chunk
if payload_length == 0:
self._split_zipped_payload(zip_payload)
return self._packet_queue.popleft()
packets.append(header + zip_payload)
if payload_length != 16384:
break
header = init_bytearray(b'')
abyte = self.sock.recv(1)
while abyte and len(header) < 7:
header += abyte
abyte = self.sock.recv(1)
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
tmp = init_bytearray(b'')
for packet in packets:
payload_length = struct_unpack("<I", header[4:7] + b'\x00')[0]
if payload_length == 0:
tmp.append(packet[7:])
else:
if PY2:
tmp += zlib.decompress(
buffer(packet[7:])) # pylint: disable=E0602
else:
tmp += zlib.decompress(packet[7:])
self._split_zipped_payload(tmp)
del tmp
try:
return self._packet_queue.popleft()
except IndexError:
pass
def set_connection_timeout(self, timeout):
"""Set the connection timeout"""
self._connection_timeout = timeout
# pylint: disable=C0103
def switch_to_ssl(self, ca, cert, key, verify_cert=False):
"""Switch the socket to use SSL"""
if not self.sock:
raise errors.InterfaceError(errno=2048)
try:
if verify_cert:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(
self.sock, keyfile=key, certfile=cert, ca_certs=ca,
cert_reqs=cert_reqs, do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_TLSv1)
self.sock.do_handshake()
except NameError:
raise errors.NotSupportedError(
"Python installation has no SSL support")
except (ssl.SSLError, IOError) as err:
raise errors.InterfaceError(
errno=2055, values=(self.get_address(), _strioerror(err)))
except NotImplementedError as err:
raise errors.InterfaceError(str(err))
# pylint: enable=C0103
class MySQLUnixSocket(BaseMySQLSocket):
"""MySQL socket class using UNIX sockets
Opens a connection through the UNIX socket of the MySQL Server.
"""
def __init__(self, unix_socket='/tmp/mysql.sock'):
super(MySQLUnixSocket, self).__init__()
self.unix_socket = unix_socket
def get_address(self):
return self.unix_socket
def open_connection(self):
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.settimeout(self._connection_timeout)
self.sock.connect(self.unix_socket)
except IOError as err:
raise errors.InterfaceError(
errno=2002, values=(self.get_address(), _strioerror(err)))
except Exception as err:
raise errors.InterfaceError(str(err))
class MySQLTCPSocket(BaseMySQLSocket):
"""MySQL socket class using TCP/IP
Opens a TCP/IP connection to the MySQL Server.
"""
def __init__(self, host='127.0.0.1', port=3306, force_ipv6=False):
super(MySQLTCPSocket, self).__init__()
self.server_host = host
self.server_port = port
self.force_ipv6 = force_ipv6
self._family = 0
def get_address(self):
return "{0}:{1}".format(self.server_host, self.server_port)
def open_connection(self):
"""Open the TCP/IP connection to the MySQL server
"""
# Get address information
addrinfo = [None] * 5
try:
addrinfos = socket.getaddrinfo(self.server_host,
self.server_port,
0, socket.SOCK_STREAM,
socket.SOL_TCP)
# If multiple results we favor IPv4, unless IPv6 was forced.
for info in addrinfos:
if self.force_ipv6 and info[0] == socket.AF_INET6:
addrinfo = info
break
elif info[0] == socket.AF_INET:
addrinfo = info
break
if self.force_ipv6 and addrinfo[0] is None:
raise errors.InterfaceError(
"No IPv6 address found for {0}".format(self.server_host))
if addrinfo[0] is None:
addrinfo = addrinfos[0]
except IOError as err:
raise errors.InterfaceError(
errno=2003, values=(self.get_address(), _strioerror(err)))
else:
(self._family, socktype, proto, _, sockaddr) = addrinfo
# Instanciate the socket and connect
try:
self.sock = socket.socket(self._family, socktype, proto)
self.sock.settimeout(self._connection_timeout)
self.sock.connect(sockaddr)
except IOError as err:
raise errors.InterfaceError(
errno=2003, values=(self.get_address(), _strioerror(err)))
except Exception as err:
raise errors.OperationalError(str(err))
| |
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Name: Base.py
# Description: Base class used by MDCS/All Raster Solutions components.
# Version: 20151209
# Requirements: ArcGIS 10.1 SP1
# Author: Esri Imagery Workflows team
#------------------------------------------------------------------------------
#!/usr/bin/env python
import os
import sys
import arcpy
if (sys.version_info[0] < 3): # _winreg has been renamed as (winreg) in python3+
from _winreg import *
else:
from winreg import *
from datetime import datetime
from xml.dom import minidom
try:
import MDCS_UC
except Exception as inf:
print ('User-Code functions disabled.')
class DynaInvoke:
# log status types enums
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
# ends
def __init__(self, name, args, evnt_fnc_update_args = None, log = None):
self.m_name = name
self.m_args = args
self.m_evnt_update_args = evnt_fnc_update_args
self.m_log = log
def _message(self, msg, msg_type):
if (self.m_log):
return self.m_log(msg, msg_type)
print (msg)
def init(self):
try:
arg_count = eval('%s.__code__.co_argcount' % (self.m_name))
except Exception as exp:
self._message(str(exp), self.const_critical_text)
return False
len_args = len(self.m_args)
if (len_args < arg_count):
self._message('Args less than required, filling with default (#)', self.const_warning_text)
for i in range (len_args, arg_count):
self.m_args.append('#')
elif (len_args > arg_count):
self._message ('More args supplied than required to function (%s)' % (self.m_name), self.const_warning_text)
self.m_args = self.m_args[:arg_count]
return True
def invoke(self):
result = 'OK'
try:
if (self.m_evnt_update_args is not None):
usr_args = self.m_evnt_update_args(self.m_args, self.m_name)
if (usr_args is None): # set to (None) to skip fnc invocation, it's treated as a non-error.
return True
if (usr_args is not None and
len(usr_args) == len(self.m_args)): # user is only able to update the contents, not to trim or expand args.
self._message ('Original args may have been updated through custom code.', self.const_warning_text)
self.m_args = usr_args
self._message ('Calling (%s)' % (self.m_name), self.const_general_text)
ret = eval ('%s(*self.m_args)' % (self.m_name)) # gp-tools return NULL?
return True
except Exception as exp:
result = 'FAILED'
self._message (str(exp), self.const_critical_text)
return False
finally:
self._message ('Status: %s' % (result), self.const_general_text)
class Base(object):
#begin - constansts
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
const_cmd_default_text = "#defaults"
const_geodatabase_ext = '.GDB'
const_geodatabase_SDE_ext = '.SDE'
# base init codes. (const_strings)
const_init_ret_version = 'version'
const_init_ret_sde = 'sde'
const_init_ret_patch = 'patch'
# ends
#version specific
const_ver_len = 4
CMAJOR = 0
CMINOR = 1
CSP = 2
CBUILD = 3
CVERSION_ATTRIB = 'version'
# ends
# externally user defined functions specific
CCLASS_NAME = 'UserCode'
CMODULE_NAME = 'MDCS_UC'
# ends
# log status (codes)
CCMD_STATUS_OK = 'OK'
CCMD_STATUS_FAILED = 'Failed!'
# ends
#ends
def __init__(self):
self.m_log = None
self.m_doc = None
#the follwoing variables could be overridden by the command-line to replace respective values in XML config file.
self.m_workspace = ''
self.m_geodatabase = ''
self.m_mdName = '' #mosaic dataset name.
#ends
self.m_sources = ''
self.m_gdbName = ''
self.m_geoPath = ''
self.m_config = ''
self.m_commands = ''
self.m_sources = '' #source data paths for adding new rasters.
self.m_dynamic_params = {}
# art file update specific variables
self.m_art_apply_changes = ''
self.m_art_ws = ''
self.m_art_ds = ''
# ends
# To keep track of the last objectID before any new data items could be added.
self.m_last_AT_ObjectID = 0 #by default, take in all the previous records for any operation.
# SDE specific variables
self.m_IsSDE = False
self.m_SDE_database_user = ''
# ends
# set MDCS code base path
self.m_code_base = ''
self.setCodeBase(os.path.dirname(__file__))
# ends
# client_callback_ptrs
self.m_cli_callback_ptr = None
self.m_cli_msg_callback_ptr = None
# ends
def init(self): #return (status [true|false], reason)
if (self.m_doc == None):
return False
#version check.
try:
min = self.getXMLXPathValue("Application/ArcGISVersion/Product/Min", "Min").split('.')
max = self.getXMLXPathValue("Application/ArcGISVersion/Product/Max", "Max").split('.')
if (len(min) == self.const_ver_len): #version check is disabled if no values have been defined in the MDCS for min and max.
CMAJOR = 0
CBUILD = self.const_ver_len
if (len(max) != self.const_ver_len):
max = [0, 0, 0, 0] # zero up max if max version isn't defined / has errors.
for n in range(CMAJOR, CBUILD):
if (min[n] == ''):
min[n] = 0
if (max[n] == ''):
max[n] = 0
min[n] = int(min[n])
max[n] = int(max[n])
if (self.CheckMDCSVersion(min, max) == False):
return (False, self.const_init_ret_version) #version check failed.
except Exception as inst:
self.log('Version check failure/' + str(inst), self.const_critical_text)
return False
#ends
# ArcGIS patch test.
if (self.isArcGISPatched() == False):
self.log('An ArcGIS patch required to run MDCS is not yet installed. Unable to proceed.', self.const_critical_text)
return (False, self.const_init_ret_patch)
# ends
self.setUserDefinedValues() #replace user defined dynamic variables in config file with values provided at the command-line.
if (self.m_workspace == ''):
self.m_workspace = self.prefixFolderPath(self.getAbsPath(self.getXMLNodeValue(self.m_doc, "WorkspacePath")), self.const_workspace_path_)
if (self.m_geodatabase == ''):
self.m_geodatabase = self.getXMLNodeValue(self.m_doc, "Geodatabase")
if (self.m_mdName == ''):
self.m_mdName = self.getXMLXPathValue("Application/Workspace/MosaicDataset/Name", "Name")
const_len_ext = len(self.const_geodatabase_ext)
ext = self.m_geodatabase[-const_len_ext:].upper()
if (ext != self.const_geodatabase_ext and
ext != self.const_geodatabase_SDE_ext):
self.m_geodatabase += self.const_geodatabase_ext.lower() #if no extension specified, defaults to '.gdb'
self.m_gdbName = self.m_geodatabase[:len(self.m_geodatabase) - const_len_ext] #.gdb
self.m_geoPath = os.path.join(self.m_workspace, self.m_geodatabase)
self.m_commands = self.getXMLNodeValue(self.m_doc, "Command")
if (ext == self.const_geodatabase_SDE_ext):
self.m_IsSDE = True
try:
self.log('Reading SDE connection properties from (%s)' % (self.m_geoPath))
conProperties = arcpy.Describe(self.m_geoPath).connectionProperties
self.m_SDE_database_user = ('%s.%s.') % (conProperties.database, conProperties.user)
except Exception as inst:
self.log(str(inst), self.const_critical_text)
return (False, self.const_init_ret_sde)
return (True, 'OK')
def invokeDynamicFnCallback(self, args, fn_name = None):
if (fn_name == None):
return args
fn = fn_name.lower()
if (self.invoke_cli_callback(fn_name, args)):
return args
return None
# cli callback ptrs
def invoke_cli_callback(self, fname, args):
if (not self.m_cli_callback_ptr is None):
return self.m_cli_callback_ptr(fname, args)
return args
def invoke_cli_msg_callback(self, mtype, args):
if (not self.m_cli_msg_callback_ptr is None):
return self.m_cli_msg_callback_ptr(mtype, args)
return args
# ends
def setCodeBase(self, path):
if (os.path.exists(path) == False):
return None
self.m_code_base = path
self.const_statistics_path_ = os.path.join(self.m_code_base, '..\\..\\parameter\\Statistics')
self.const_raster_function_templates_path_ = os.path.join(self.m_code_base, '..\\..\\parameter\\RasterFunctionTemplates')
self.const_raster_type_path_ = os.path.join(self.m_code_base, '..\\..\\parameter\\Rastertype')
self.const_workspace_path_ = os.path.join(self.m_code_base, '..\\..\\') #.gdb output
self.const_import_geometry_features_path_ = os.path.join(self.m_code_base, '..\\..\\parameter')
return self.m_code_base
def getXMLXPathValue(self, xPath, key):
nodes = self.m_doc.getElementsByTagName(key)
for node in nodes:
parents = []
c = node
while(c.parentNode != None):
parents.insert(0, c.nodeName)
c = c.parentNode
p = '/'.join(parents)
if (p == xPath):
if (node.hasChildNodes() == False):
return ''
return str(node.firstChild.data).strip()
return ''
def setLog(self, log):
self.m_log = log
return True
def isLog(self):
return (not self.m_log == None)
def log(self, msg, level = const_general_text):
if (self.m_log != None):
return self.m_log.Message(msg, level)
errorTypeText = 'msg'
if (level > self.const_general_text):
errorTypeText = 'warning'
elif(level == self.const_critical_text):
errorTypeText = 'critical'
print ('log-' + errorTypeText + ': ' + msg)
return True
# user defined functions implementation code
def isUser_Function(self, name):
try:
frame = sys._getframe(0).f_globals
module = frame[self.CMODULE_NAME]
cls = getattr(module, self.CCLASS_NAME)
instance = cls()
fnc = getattr(instance, name)
except:
return False
return True
def invoke_user_function(self, name, data): # MDCS is always passed on which is the MD Configuration Script XML DOM
ret = False
try:
frame = sys._getframe(0).f_globals # default to first stack.
module = frame[self.CMODULE_NAME]
cls = getattr(module, self.CCLASS_NAME)
instance = cls()
fnc = getattr(instance, name)
try:
ret = fnc(data)
except Exception as inf:
self.log('Executing user defined function (%s)' % (name), self.const_critical_text)
self.log(str(inf), self.const_critical_text)
return False
except Exception as inf:
self.log('Error: please check if user function (%s) is found in class (%s) of MDCS_UC module.' % (name, CCLASS_NAME), self.const_critical_text)
self.log(str(inf), self.const_critical_text)
return False
return ret
#ends
def processEnv(self, node, pos, json): #support fnc for 'SE' command.
while(node.nextSibling != None):
if(node.nodeType != minidom.Node.TEXT_NODE):
k = str(pos)
if ((k in json.keys()) == False):
json[k] = {'key' : [], 'val' : [], 'type' : [] }
json[k]['key'].append(node.nodeName)
v = ''
if (node.firstChild != None):
v = node.firstChild.nodeValue.strip()
json[k]['val'].append(v)
json[k]['parent'] = node.parentNode.nodeName
json[k]['type'].append('c')
if (node.firstChild != None):
if (node.firstChild.nextSibling != None):
pos = len(json)
json[k]['type'][len(json[k]['type']) - 1] = 'p'
self.processEnv(node.firstChild.nextSibling, pos, json)
pos = 0 # defaults to root always, assuming only 1 level deep xml.
node = node.nextSibling
return True
def getAbsPath(self, input):
absPath = input
if (os.path.exists(absPath) == True):
absPath = os.path.abspath(input)
return absPath
def prefixFolderPath(self, input, prefix):
_file = input.strip()
_p, _f = os.path.split(_file)
_indx = _p.lower().find('.gdb')
if (_p == '' or _indx >= 0):
if (_indx >= 0):
_f = _p + '\\' + _f
_file = os.path.join(prefix, _f)
return _file
def isArcGISPatched(self): # return values [true | false]
# if we're running on python 3+, it's assumed we're on (ArcGIS Pro) and there's no need to check for patches.
if (sys.version_info[0] >= 3):
return True
# if the patch XML node is not properly formatted in structure/with values, MDCS returns an error and will abort the operation.
patch_node = self.getXMLNode(self.m_doc, "Patch")
if (patch_node ==''):
return True
if (patch_node.attributes.length == 0):
return False
if ((self.CVERSION_ATTRIB in patch_node.attributes.keys()) == False):
return False
target_ver = patch_node.attributes.getNamedItem(self.CVERSION_ATTRIB).nodeValue.strip()
if (len(target_ver) == 0):
return False
search_key = ''
patch_desc_node = patch_node.firstChild.nextSibling
while (patch_desc_node != None):
node_name = patch_desc_node.nodeName
if (node_name == 'Name'):
if (patch_desc_node.hasChildNodes() == True):
search_key = patch_desc_node.firstChild.nodeValue
break
patch_desc_node = patch_desc_node.nextSibling.nextSibling
if (len(search_key) == 0): # if no patch description could be found, return False
return False
ver = (target_ver + '.0.0.0.0').split('.')
for n in range(self.CMAJOR, self.CBUILD + 1):
if (ver[n] == ''):
ver[n] = 0
ver[n] = int(ver[n])
ver = ver[:4] # accept only the first 4 digits.
target_v_str = installed_v_str = ''
for i in range (self.CMAJOR, self.CBUILD + 1):
target_v_str += "%04d" % ver[i]
installed_ver = self.getDesktopVersion()
for i in range (self.CMAJOR, self.CBUILD + 1):
installed_v_str += "%04d" % installed_ver[i]
tVersion = int(target_v_str)
iVersion = int(installed_v_str)
if (iVersion > tVersion): # the first priority is to check for the patch version against the installed version
return True # if the installed ArcGIS version is greater than the patch's, it's OK to proceed.
# if the installed ArcGIS version is lower than the intended target patch version, continue with the registry key check for the
# possible patches installed.
#HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\ESRI\Desktop10.2\Updates
CPRODUCT_NAME = 'ProductName'
CVERSION = 'Version'
setupInfo = arcpy.GetInstallInfo()
if ((CVERSION in setupInfo.keys()) == False or
(CPRODUCT_NAME in setupInfo.keys()) == False):
return False
key = setupInfo[CPRODUCT_NAME] + setupInfo[CVERSION]
try:
reg_path = "Software\\Wow6432Node\\ESRI\\%s\\Updates" % (key)
arcgis = OpenKey(
HKEY_LOCAL_MACHINE, reg_path)
i = 0
while 1:
name = EnumKey(arcgis, i)
arcgis_sub = OpenKey(
HKEY_LOCAL_MACHINE, reg_path + '\\' + name)
try:
value, type = QueryValueEx(arcgis_sub, "Name")
if (type == 1): # reg_sz
if (value.lower().find(search_key.lower()) >= 0):
return True # return true if the value is found!
except:
pass
i += 1
except:
pass
return False
def getDesktopVersion(self): #returns major, minor, sp and the build number.
d = arcpy.GetInstallInfo()
version = []
buildNumber = 0
spNumber = 0
CVERSION = 'version'
CBUILDNUMBER = 'buildnumber'
CSPNUMBER = 'spnumber'
ValError = False
for k in d:
key = k.lower()
if (key == CVERSION or
key == CBUILDNUMBER or
key == CSPNUMBER):
try:
if (key == CVERSION):
[version.append(int(x)) for x in d[k].split(".")]
elif (key == CBUILDNUMBER):
buildNumber = int(d[k])
elif (key == CSPNUMBER):
spNumber = int(d[k]) # could be N/A
except:
ValError = True
CMAJOR_MINOR_REVISION = 3
if (len(version) < CMAJOR_MINOR_REVISION): # On a system with full-install, ArcGIS version piece of information could return 3 numbers (major, minor, revision/SP)
version.append(spNumber) # and thus the SP number shouldn't be added to the version sperately.
version.append(buildNumber)
return version
def CheckMDCSVersion(self, min, max, print_err_msg = True):
# if python version is >= 3, it's asssumed we're being run from ArcGIS Pro
if (sys.version_info[0] >= 3):
min = [1, 0, 0, 0]
max = [0, 0, 0, 0]
if (len(min) != self.const_ver_len or
len(max) != self.const_ver_len):
return False
CMAJOR = 0
CMINOR = 1
CSP = 2
CBUILD = 3
min_major = min[CMAJOR]
min_minor = min[CMINOR]
min_sp = min[CSP]
min_build = min[CBUILD]
max_major = max[CMAJOR]
max_minor = max[CMINOR]
max_cp = max[CSP]
max_build = max[CBUILD]
try:
version = self.getDesktopVersion()
if (len(version) >= self.const_ver_len): # major, minor, sp, build
inst_major = version[CMAJOR]
inst_minor = version[CMINOR]
inst_sp = version[CSP]
inst_build = version[CBUILD]
ver_failed = False
if (max_major > 0 and
inst_major > max_major):
ver_failed = True
elif (max_minor > 0 and
inst_minor > max_minor):
ver_failed = True
elif (max_cp > 0 and
inst_sp > max_cp):
ver_failed = True
elif (max_build > 0 and
inst_build > max_build):
ver_failed = True
elif (inst_major < min_major):
ver_failed = True
elif (inst_minor < min_minor):
ver_failed = True
elif (inst_sp < min_sp):
ver_failed = True
elif (min_build > 0 and
inst_build < min_build):
ver_failed = True
if (ver_failed):
if (print_err_msg == True):
self.log('MDCS can\'t proceed due to ArcGIS version incompatiblity.', self.const_critical_text)
self.log('ArcGIS Desktop version is (%s.%s.%s.%s). MDCS min and max versions are (%s.%s.%s.%s) and (%s.%s.%s.%s) respectively.' % \
(inst_major, inst_minor, inst_sp, inst_build, min_major, min_minor, min_sp, min_build, max_major, max_minor, max_cp, max_build), self.const_critical_text)
return False
except Exception as inst:
self.log('Version check failed: (%s)' % (str(inst)), self.const_critical_text)
return False
return True
def getXMLNodeValue(self, doc, nodeName) :
if (doc == None):
return ''
node = doc.getElementsByTagName(nodeName)
if (node == None or
node.length == 0 or
node[0].hasChildNodes() == False or
node[0].firstChild.nodeType != minidom.Node.TEXT_NODE):
return ''
return node[0].firstChild.data
def updateART(self, doc, workspace, dataset):
if (doc == None):
return False
if (workspace.strip() == ''
and dataset.strip() == ''):
return False # nothing to do.
try:
nodeName = 'NameString'
node_list = doc.getElementsByTagName(nodeName)
for node in node_list:
if (node.hasChildNodes() == True):
vals = node.firstChild.nodeValue.split(';')
upd_buff = []
for v in vals:
vs = v.split('=')
for vs_ in vs:
vs_ = vs_.lower()
if (vs_.find('workspace') > 0):
if (workspace != ''):
vs[1] = ' ' + workspace
if (node.nextSibling != None):
if (node.nextSibling.nextSibling.nodeName == 'PathName'):
node.nextSibling.nextSibling.firstChild.nodeValue = workspace
elif (vs_.find('rasterdataset') > 0):
if (dataset != ''):
vs[1] = ' ' + dataset
if (node.previousSibling != None):
if (node.previousSibling.previousSibling.nodeName == 'Name'):
node.previousSibling.previousSibling.firstChild.nodeValue = dataset
upd_buff.append('='.join(vs))
if (len(upd_buff) > 0):
upd_nodeValue = ';'.join(upd_buff)
node.firstChild.nodeValue = upd_nodeValue
nodeName = 'ConnectionProperties'
node_list = doc.getElementsByTagName(nodeName)
found = False
for node in node_list: # only one node should exist.
for n in node.firstChild.nextSibling.childNodes:
if (n.nextSibling != None):
if (n.nextSibling.firstChild != None):
if (n.nextSibling.firstChild.nextSibling.nodeName.lower() == 'key'):
if (n.nextSibling.firstChild.nextSibling.firstChild.nodeValue.lower() == 'database'):
n.nextSibling.firstChild.nextSibling.nextSibling.nextSibling.firstChild.nodeValue = workspace
found = True
break;
if (found == True):
break
except Exception as inst:
self.log(str(inst), self.const_critical_text)
return False
return True
def getInternalPropValue(self, dic, key):
if (key in dic.keys()):
return dic[key]
else:
return ''
def setUserDefinedValues(self):
nodes = self.m_doc.getElementsByTagName('*')
for node in nodes:
if (node.firstChild != None):
v = node.firstChild.data.strip()
if (v.find('$') == -1):
continue
usr_key = v
default = ''
d = v.split(';')
if (len(d) > 1):
default = d[0].strip()
usr_key = d[1].strip()
revalue = []
first = usr_key.find('$')
first += 1
second = first + usr_key[first+1:].find('$') + 1
if (first > 1):
revalue.append(usr_key[0:first - 1])
while(second >= 0):
uValue = usr_key[first:second]
if (uValue.upper() in self.m_dynamic_params.keys()):
revalue.append(self.m_dynamic_params[uValue.upper()])
else:
if (uValue.find('\$') >= 0):
uValue = uValue.replace('\$', '$')
else:
if (default == ''):
default = uValue
if (first == 1
and second == (len(usr_key) - 1)):
uValue = default
revalue.append(uValue)
first = second + 1
indx = usr_key[first+1:].find('$')
if (indx == -1):
if (first != len(usr_key)):
revalue.append(usr_key[first:len(usr_key)])
break
second = first + indx + 1
updateVal = ''.join(revalue)
node.firstChild.data = updateVal
def getXMLNode(self, doc, nodeName) :
if (doc == None):
return ''
node = doc.getElementsByTagName(nodeName)
if (node == None or
node.length == 0 or
node[0].hasChildNodes() == False or
node[0].firstChild.nodeType != minidom.Node.TEXT_NODE):
return ''
return node[0]
def foundLockFiles(self, folder_path):
file_list_ = os.listdir(folder_path)
found_lock_ = False
for i in file_list_:
if (i[-5:].lower() == '.lock'):
sp = i.split('.')
pid = os.getpid()
if (pid == int(sp[3])): #indx 3 == process id
found_lock_ = True
break;
return found_lock_
def waitForLockRelease(self, folder_path_):
if (os.path.exists(folder_path_) == False):
self.log('lock file path does not exist!. Quitting...', self.const_critical_text)
return -2 #path does not exist error code!
t0 = datetime.now()
duration_req_sec_ = 3
max_time_to_wait_sec_ = 10
tot_count_sec_ = 0
while True:
if (tot_count_sec_ == 0):
if (self.foundLockFiles(folder_path_) == False): #try to see if we could get lucky on the first try, else enter periodic check.
break;
t1 = datetime.now() - t0
if (t1.seconds > duration_req_sec_):
if (self.foundLockFiles(folder_path_) == False):
break;
tot_count_sec_ += duration_req_sec_
if (tot_count_sec_ > max_time_to_wait_sec_):
self.log('lock file release timed out!. Quitting...', self.const_critical_text)
tot_count_sec_ = -1
break;
t0 = datetime.now()
return tot_count_sec_
| |
from collections import namedtuple
from decimal import Decimal as D
from . import availability, prices
# A container for policies
PurchaseInfo = namedtuple(
'PurchaseInfo', ['price', 'availability', 'stockrecord'])
class Selector(object):
"""
Responsible for returning the appropriate strategy class for a given
user/session.
This can be called in three ways:
#) Passing a request and user. This is for determining
prices/availability for a normal user browsing the site.
#) Passing just the user. This is for offline processes that don't
have a request instance but do know which user to determine prices for.
#) Passing nothing. This is for offline processes that don't
correspond to a specific user. Eg, determining a price to store in
a search index.
"""
def strategy(self, request=None, user=None, **kwargs):
"""
Return an instanticated strategy instance
"""
# Default to the backwards-compatible strategy of picking the first
# stockrecord but charging zero tax.
return Default(request)
class Base(object):
"""
The base strategy class
Given a product, strategies are responsible for returning a
``PurchaseInfo`` instance which contains:
- The appropriate stockrecord for this customer
- A pricing policy instance
- An availability policy instance
"""
def __init__(self, request=None):
self.request = request
self.user = None
if request and request.user.is_authenticated():
self.user = request.user
def fetch_for_product(self, product, stockrecord=None):
"""
Given a product, return a ``PurchaseInfo`` instance.
The ``PurchaseInfo`` class is a named tuple with attributes:
- ``price``: a pricing policy object.
- ``availability``: an availability policy object.
- ``stockrecord``: the stockrecord that is being used
If a stockrecord is passed, return the appropriate ``PurchaseInfo``
instance for that product and stockrecord is returned.
"""
raise NotImplementedError(
"A strategy class must define a fetch_for_product method "
"for returning the availability and pricing "
"information."
)
def fetch_for_parent(self, product):
"""
Given a parent product, fetch a ``StockInfo`` instance
"""
raise NotImplementedError(
"A strategy class must define a fetch_for_parent method "
"for returning the availability and pricing "
"information."
)
def fetch_for_line(self, line, stockrecord=None):
"""
Given a basket line instance, fetch a ``PurchaseInfo`` instance.
This method is provided to allow purchase info to be determined using a
basket line's attributes. For instance, "bundle" products often use
basket line attributes to store SKUs of contained products. For such
products, we need to look at the availability of each contained product
to determine overall availability.
"""
# Default to ignoring any basket line options as we don't know what to
# do with them within Oscar - that's up to your project to implement.
return self.fetch_for_product(line.product)
class Structured(Base):
"""
A strategy class which provides separate, overridable methods for
determining the 3 things that a ``PurchaseInfo`` instance requires:
#) A stockrecord
#) A pricing policy
#) An availability policy
"""
def fetch_for_product(self, product, stockrecord=None):
"""
Return the appropriate ``PurchaseInfo`` instance.
This method is not intended to be overridden.
"""
if stockrecord is None:
stockrecord = self.select_stockrecord(product)
return PurchaseInfo(
price=self.pricing_policy(product, stockrecord),
availability=self.availability_policy(product, stockrecord),
stockrecord=stockrecord)
def fetch_for_parent(self, product):
# Select children and associated stockrecords
children_stock = self.select_children_stockrecords(product)
return PurchaseInfo(
price=self.parent_pricing_policy(product, children_stock),
availability=self.parent_availability_policy(
product, children_stock),
stockrecord=None)
def select_stockrecord(self, product):
"""
Select the appropriate stockrecord
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'select_stockrecord' method")
def select_children_stockrecords(self, product):
"""
Select appropriate stock record for all children of a product
"""
records = []
for child in product.children.all():
# Use tuples of (child product, stockrecord)
records.append((child, self.select_stockrecord(child)))
return records
def pricing_policy(self, product, stockrecord):
"""
Return the appropriate pricing policy
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'pricing_policy' method")
def parent_pricing_policy(self, product, children_stock):
raise NotImplementedError(
"A structured strategy class must define a "
"'parent_pricing_policy' method")
def availability_policy(self, product, stockrecord):
"""
Return the appropriate availability policy
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'availability_policy' method")
def parent_availability_policy(self, product, children_stock):
raise NotImplementedError(
"A structured strategy class must define a "
"'parent_availability_policy' method")
# Mixins - these can be used to construct the appropriate strategy class
class UseFirstStockRecord(object):
"""
Stockrecord selection mixin for use with the ``Structured`` base strategy.
This mixin picks the first (normally only) stockrecord to fulfil a product.
This is backwards compatible with Oscar<0.6 where only one stockrecord per
product was permitted.
"""
def select_stockrecord(self, product):
try:
return product.stockrecords.all()[0]
except IndexError:
return None
class StockRequired(object):
"""
Availability policy mixin for use with the ``Structured`` base strategy.
This mixin ensures that a product can only be bought if it has stock
available (if stock is being tracked).
"""
def availability_policy(self, product, stockrecord):
if not stockrecord:
return availability.Unavailable()
if not product.get_product_class().track_stock:
return availability.Available()
else:
return availability.StockRequired(
stockrecord.net_stock_level)
def parent_availability_policy(self, product, children_stock):
# A parent product is available if one of its children is
for child, stockrecord in children_stock:
policy = self.availability_policy(product, stockrecord)
if policy.is_available_to_buy:
return availability.Available()
return availability.Unavailable()
class NoTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy.
This mixin specifies zero tax and uses the ``price_excl_tax`` from the
stockrecord.
"""
def pricing_policy(self, product, stockrecord):
# Check stockrecord has the appropriate data
if not stockrecord or stockrecord.price_excl_tax is None:
return prices.Unavailable()
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax,
tax=D('0.00'))
def parent_pricing_policy(self, product, children_stock):
stockrecords = [x[1] for x in children_stock if x[1] is not None]
if not stockrecords:
return prices.Unavailable()
# We take price from first record
stockrecord = stockrecords[0]
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax,
tax=D('0.00'))
class FixedRateTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy. This
mixin applies a fixed rate tax to the base price from the product's
stockrecord. The price_incl_tax is quantized to two decimal places.
Rounding behaviour is Decimal's default
"""
rate = D('0.08875') # Subclass and specify the correct rate
exponent = D('0.01') # Default to two decimal places
def pricing_policy(self, product, stockrecord):
if not stockrecord:
return prices.Unavailable()
rate = self.get_rate(product, stockrecord)
exponent = self.get_exponent(stockrecord)
tax = (stockrecord.price_excl_tax * rate).quantize(exponent)
return prices.TaxInclusiveFixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax,
tax=tax)
def parent_pricing_policy(self, product, children_stock):
stockrecords = [x[1] for x in children_stock if x[1] is not None]
if not stockrecords:
return prices.Unavailable()
# We take price from first record
stockrecord = stockrecords[0]
rate = self.get_rate(product, stockrecord)
exponent = self.get_exponent(stockrecord)
tax = (stockrecord.price_excl_tax * rate).quantize(exponent)
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax,
tax=tax)
def get_rate(self, product, stockrecord):
"""
This method serves as hook to be able to plug in support for varying tax rates
based on the product.
TODO: Needs tests.
"""
return self.rate
def get_exponent(self, stockrecord):
"""
This method serves as hook to be able to plug in support for a varying exponent
based on the currency.
TODO: Needs tests.
"""
return self.exponent
class DeferredTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy.
This mixin does not specify the product tax and is suitable to territories
where tax isn't known until late in the checkout process.
"""
def pricing_policy(self, product, stockrecord):
if not stockrecord:
return prices.Unavailable()
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax)
def parent_pricing_policy(self, product, children_stock):
stockrecords = [x[1] for x in children_stock if x[1] is not None]
if not stockrecords:
return prices.Unavailable()
# We take price from first record
stockrecord = stockrecords[0]
return prices.FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price_excl_tax)
# Example strategy composed of above mixins. For real projects, it's likely
# you'll want to use a different pricing mixin as you'll probably want to
# charge tax!
class Default(UseFirstStockRecord, StockRequired, NoTax, Structured):
"""
Default stock/price strategy that uses the first found stockrecord for a
product, ensures that stock is available (unless the product class
indicates that we don't need to track stock) and charges zero tax.
"""
class UK(UseFirstStockRecord, StockRequired, FixedRateTax, Structured):
"""
Sample strategy for the UK that:
- uses the first stockrecord for each product (effectively assuming
there is only one).
- requires that a product has stock available to be bought
- applies a fixed rate of tax on all products
This is just a sample strategy used for internal development. It is not
recommended to be used in production, especially as the tax rate is
hard-coded.
"""
# Use UK VAT rate (as of December 2013)
rate = D('0.20')
class US(UseFirstStockRecord, StockRequired, DeferredTax, Structured):
rate = D('0.0875')
"""
Sample strategy for the US.
- uses the first stockrecord for each product (effectively assuming
there is only one).
- requires that a product has stock available to be bought
- doesn't apply a tax to product prices (normally this will be done
after the shipping address is entered).
This is just a sample one used for internal development. It is not
recommended to be used in production.
"""
| |
# pylint: disable=bad-indentation,bad-whitespace,dangerous-default-value,invalid-name,missing-docstring,old-style-class,pointless-statement,redefined-builtin,redefined-outer-name,reimported,unused-argument,g-bad-import-order,g-doc-args,g-doc-exception,g-doc-return-or-yield,g-import-not-at-top,g-inconsistent-quotes,g-inline-comment-too-close,g-no-space-after-comment,g-wrong-blank-lines
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copy_reg import dispatch_table
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, long, float, bool, str, tuple,
frozenset, type, xrange, types.ClassType,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
for name in ("ComplexType", "UnicodeType", "CodeType"):
t = getattr(types, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
def _copy_inst(x):
if hasattr(x, '__copy__'):
return x.__copy__()
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _copy_inst
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[long] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[str] = _deepcopy_atomic
try:
d[unicode] = _deepcopy_atomic
except NameError:
pass
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[xrange] = _deepcopy_atomic
d[types.ClassType] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.iteritems():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.im_func, deepcopy(x.im_self, memo), x.im_class)
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _deepcopy_inst(x, memo):
if hasattr(x, '__deepcopy__'):
return x.__deepcopy__(memo)
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
args = deepcopy(args, memo)
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
memo[id(x)] = y
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _deepcopy_inst
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.iteritems():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
def _test():
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
{'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print l1==l
l1 = map(copy, l)
print l1==l
l1 = deepcopy(l)
print l1==l
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a, 'arg': self.arg}
def __setstate__(self, state):
for key, value in state.iteritems():
setattr(self, key, value)
def __deepcopy__(self, memo=None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print l == l2
print l
print l2
l2 = deepcopy(l)
print l == l2
print l
print l2
l.append({l[1]: l, 'xyz': l[2]})
l3 = copy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
l3 = deepcopy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
class odict(dict):
def __init__(self, d = {}):
self.a = 99
dict.__init__(self, d)
def __setitem__(self, k, i):
dict.__setitem__(self, k, i)
self.a
o = odict({"A" : "B"})
x = deepcopy(o)
print(o, x)
if __name__ == '__main__':
_test()
| |
# Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
from __future__ import division
from warnings import warn
import numpy as np
from scipy.sparse import issparse
from .bagging import BaseBagging
from ..externals import six
from ..tree import ExtraTreeRegressor
from ..utils import check_random_state, check_array
__all__ = ["IsolationForest"]
class IsolationForest(BaseBagging):
"""Isolation Forest Algorithm
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of abnormality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Parameters
----------
n_estimators : int, optional (default=100)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default="auto")
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=False)
Whether samples are drawn with replacement.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : integer
The actual number of samples
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
"""
def __init__(self,
n_estimators=100,
max_samples="auto",
max_features=1.,
bootstrap=False,
n_jobs=1,
random_state=None,
verbose=0):
super(IsolationForest, self).__init__(
base_estimator=ExtraTreeRegressor(
max_features=1,
splitter='random',
random_state=random_state),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, six.string_types):
if self.max_samples == 'auto':
max_samples = min(256, n_samples)
else:
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, six.integer_types):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not (0. < self.max_samples <= 1.):
raise ValueError("max_samples must be in (0, 1]")
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super(IsolationForest, self)._fit(X, y, max_samples,
max_depth=max_depth,
sample_weight=sample_weight)
return self
def predict(self, X):
"""Predict anomaly score of X with the IsolationForest algorithm.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : array of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more normal.
"""
# code structure from ForestClassifier/predict_proba
# Check data
X = self.estimators_[0]._validate_X_predict(X, check_input=True)
n_samples = X.shape[0]
n_samples_leaf = np.zeros((n_samples, self.n_estimators), order="f")
depths = np.zeros((n_samples, self.n_estimators), order="f")
for i, tree in enumerate(self.estimators_):
leaves_index = tree.apply(X)
node_indicator = tree.decision_path(X)
n_samples_leaf[:, i] = tree.tree_.n_node_samples[leaves_index]
depths[:, i] = np.asarray(node_indicator.sum(axis=1)).reshape(-1) - 1
depths += _average_path_length(n_samples_leaf)
scores = 2 ** (-depths.mean(axis=1) / _average_path_length(self.max_samples_))
return scores
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : array, shape (n_samples,)
The decision function of the input samples.
"""
# minus as bigger is better (here less abnormal):
return - self.predict(X)
def _average_path_length(n_samples_leaf):
""" The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples, n_estimators), or int.
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : array, same shape as n_samples_leaf
"""
if isinstance(n_samples_leaf, six.integer_types):
if n_samples_leaf <= 1:
return 1.
else:
return 2. * (np.log(n_samples_leaf) + 0.5772156649) - 2. * (
n_samples_leaf - 1.) / n_samples_leaf
else:
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask = (n_samples_leaf <= 1)
not_mask = np.logical_not(mask)
average_path_length[mask] = 1.
average_path_length[not_mask] = 2. * (
np.log(n_samples_leaf[not_mask]) + 0.5772156649) - 2. * (
n_samples_leaf[not_mask] - 1.) / n_samples_leaf[not_mask]
return average_path_length.reshape(n_samples_leaf_shape)
| |
from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, \
OGRException, OGRIndexError, SpatialReference, CoordTransform, \
GDAL_VERSION
from django.utils import unittest
from django.contrib.gis.geometry.test_data import TestDataMixin
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
try:
g = OGRGeomType(1)
g = OGRGeomType(7)
g = OGRGeomType('point')
g = OGRGeomType('GeometrycollectioN')
g = OGRGeomType('LINearrING')
g = OGRGeomType('Unknown')
except:
self.fail('Could not create an OGRGeomType object!')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1))
self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection')
self.assertEqual(True, OGRGeomType('point') == 'POINT')
self.assertEqual(False, OGRGeomType('point') == 2)
self.assertEqual(True, OGRGeomType('unknown') == 0)
self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON')
self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point'))
self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertEqual(None, OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertTrue(OGRGeomType(wkb25bit + 1) == 'Point25D')
self.assertTrue(OGRGeomType('MultiLineString25D') == (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex, geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
from django.contrib.gis.gdal.prototypes.geom import GEOJSON
if not GEOJSON: return
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
def test02_points(self):
"Testing Point objects."
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(True, linestr == OGRGeometry(ls.wkt))
self.assertEqual(True, linestr != prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt))
self.assertEqual(True, mlinestr != prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr == OGRGeometry(rr.wkt))
self.assertEqual(True, lr != prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180,-90,180,90)
p = OGRGeometry.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(True, poly == OGRGeometry(p.wkt))
self.assertEqual(True, poly != prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
print "\nBEGIN - expecting IllegalArgumentException; safe to ignore.\n"
try:
c = poly.centroid
except OGRException:
# Should raise an OGR exception, rings are not closed
pass
else:
self.fail('Should have raised an OGRException!')
print "\nEND - expecting IllegalArgumentException; safe to ignore.\n"
# Closing the rings -- doesn't work on GDAL versions 1.4.1 and below:
# http://trac.osgeo.org/gdal/ticket/1673
if GDAL_VERSION <= (1, 4, 1): return
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
prev = OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolyogn after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
import cPickle
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = cPickle.loads(cPickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertNotEqual(None, OGRGeometry('POINT(0 0)'))
self.assertEqual(False, OGRGeometry('LINESTRING(0 0, 1 1)') == 3)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(OGRGeomTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission as DjangoPermission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import MultipleObjectsReturned
from django.db.models import Q
from django.test import TestCase
from django.urls import reverse
import authority
from authority import permissions
from authority.models import Permission
from authority.exceptions import NotAModel, UnsavedModelInstance
# Load the form
from authority.forms import UserPermissionForm # noqa
User = get_user_model()
FIXTURES = ["tests_custom.json"]
QUERY = Q(email="jezdez@github.com")
class UserPermission(permissions.BasePermission):
checks = ("browse",)
label = "user_permission"
authority.utils.register(User, UserPermission)
class GroupPermission(permissions.BasePermission):
checks = ("browse",)
label = "group_permission"
authority.utils.register(Group, GroupPermission)
class DjangoPermissionChecksTestCase(TestCase):
"""
Django permission objects have certain methods that are always present,
test those here.
self.user will be given:
- django permission add_user (test_add)
- authority to delete_user which is him (test_delete)
This permissions are given in the test case and not in the fixture, for
later reference.
"""
fixtures = FIXTURES
def setUp(self):
self.user = User.objects.get(QUERY)
self.check = UserPermission(self.user)
def test_no_permission(self):
self.assertFalse(self.check.add_user())
self.assertFalse(self.check.delete_user())
self.assertFalse(self.check.delete_user(self.user))
def test_add(self):
# setup
perm = DjangoPermission.objects.get(codename="add_user")
self.user.user_permissions.add(perm)
# test
self.assertTrue(self.check.add_user())
def test_delete(self):
perm = Permission(
user=self.user,
content_object=self.user,
codename="user_permission.delete_user",
approved=True,
)
perm.save()
# test
self.assertFalse(self.check.delete_user())
self.assertTrue(self.check.delete_user(self.user))
class AssignBehaviourTest(TestCase):
"""
self.user will be given:
- permission add_user (test_add),
- permission delete_user for him (test_delete),
- all existing codenames permissions: a/b/c/d (test_all),
"""
fixtures = FIXTURES
def setUp(self):
self.user = User.objects.get(QUERY)
self.group1, _ = Group.objects.get_or_create(name="Test Group 1")
self.group2, _ = Group.objects.get_or_create(name="Test Group 2")
self.group3, _ = Group.objects.get_or_create(name="Test Group 2")
self.check = UserPermission(self.user)
def test_add(self):
result = self.check.assign(check="add_user")
self.assertTrue(isinstance(result[0], DjangoPermission))
self.assertTrue(self.check.add_user())
def test_assign_to_group(self):
result = UserPermission(group=self.group1).assign(
check="delete_user", content_object=self.user
)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], Permission)
self.assertTrue(UserPermission(group=self.group1).delete_user(self.user))
def test_assign_to_group_does_not_overwrite_other_group_permission(self):
UserPermission(group=self.group1).assign(
check="delete_user", content_object=self.user
)
UserPermission(group=self.group2).assign(
check="delete_user", content_object=self.user
)
self.assertTrue(UserPermission(group=self.group2).delete_user(self.user))
self.assertTrue(UserPermission(group=self.group1).delete_user(self.user))
def test_assign_to_group_does_not_fail_when_two_group_perms_exist(self):
for group in self.group1, self.group2:
perm = Permission(
group=group,
content_object=self.user,
codename="user_permission.delete_user",
approved=True,
)
perm.save()
try:
UserPermission(group=self.group3).assign(
check="delete_user", content_object=self.user
)
except MultipleObjectsReturned:
self.fail("assign() should not have raised this exception")
def test_delete(self):
result = self.check.assign(content_object=self.user, check="delete_user",)
self.assertTrue(isinstance(result[0], Permission))
self.assertFalse(self.check.delete_user())
self.assertTrue(self.check.delete_user(self.user))
def test_all(self):
result = self.check.assign(content_object=self.user)
self.assertTrue(isinstance(result, list))
self.assertTrue(self.check.browse_user(self.user))
self.assertTrue(self.check.delete_user(self.user))
self.assertTrue(self.check.add_user(self.user))
self.assertTrue(self.check.change_user(self.user))
class GenericAssignBehaviourTest(TestCase):
"""
self.user will be given:
- permission add (test_add),
- permission delete for him (test_delete),
"""
fixtures = FIXTURES
def setUp(self):
self.user = User.objects.get(QUERY)
self.check = UserPermission(self.user)
def test_add(self):
result = self.check.assign(check="add", generic=True)
self.assertTrue(isinstance(result[0], DjangoPermission))
self.assertTrue(self.check.add_user())
def test_delete(self):
result = self.check.assign(
content_object=self.user, check="delete", generic=True,
)
self.assertTrue(isinstance(result[0], Permission))
self.assertFalse(self.check.delete_user())
self.assertTrue(self.check.delete_user(self.user))
class AssignExceptionsTest(TestCase):
"""
Tests that exceptions are thrown if assign() was called with inconsistent
arguments.
"""
fixtures = FIXTURES
def setUp(self):
self.user = User.objects.get(QUERY)
self.check = UserPermission(self.user)
def test_unsaved_model(self):
try:
self.check.assign(content_object=User())
except UnsavedModelInstance:
return True
self.fail()
def test_not_model_content_object(self):
try:
self.check.assign(content_object="fail")
except NotAModel:
return True
self.fail()
class SmartCachingTestCase(TestCase):
"""
The base test case for all tests that have to do with smart caching.
"""
fixtures = FIXTURES
def setUp(self):
# Create a user.
self.user = User.objects.get(QUERY)
# Create a group.
self.group = Group.objects.create()
self.group.user_set.add(self.user)
# Make the checks
self.user_check = UserPermission(user=self.user)
self.group_check = GroupPermission(group=self.group)
# Ensure we are using the smart cache.
settings.AUTHORITY_USE_SMART_CACHE = True
def tearDown(self):
ContentType.objects.clear_cache()
def _old_user_permission_check(self):
# This is what the old, pre-cache system would check to see if a user
# had a given permission.
return Permission.objects.user_permissions(
self.user, "foo", self.user, approved=True, check_groups=True,
)
def _old_group_permission_check(self):
# This is what the old, pre-cache system would check to see if a user
# had a given permission.
return Permission.objects.group_permissions(
self.group, "foo", self.group, approved=True,
)
class PerformanceTest(SmartCachingTestCase):
"""
Tests that permission are actually cached and that the number of queries
stays constant.
"""
def test_has_user_perms(self):
# Show that when calling has_user_perms multiple times no additional
# queries are done.
# Make sure the has_user_perms check does not get short-circuited.
assert not self.user.is_superuser
assert self.user.is_active
# Regardless of how many times has_user_perms is called, the number of
# queries is the same.
# Content type and permissions (2 queries)
with self.assertNumQueries(3):
for _ in range(5):
# Need to assert it so the query actually gets executed.
assert not self.user_check.has_user_perms(
"foo", self.user, True, False,
)
def test_group_has_perms(self):
with self.assertNumQueries(2):
for _ in range(5):
assert not self.group_check.has_group_perms("foo", self.group, True,)
def test_has_user_perms_check_group(self):
# Regardless of the number groups permissions, it should only take one
# query to check both users and groups.
# Content type and permissions (2 queries)
with self.assertNumQueries(3):
self.user_check.has_user_perms(
"foo", self.user, approved=True, check_groups=True,
)
def test_invalidate_user_permissions_cache(self):
# Show that calling invalidate_permissions_cache will cause extra
# queries.
# For each time invalidate_permissions_cache gets called, you
# will need to do one query to get content type and one to get
# the permissions.
with self.assertNumQueries(6):
for _ in range(5):
assert not self.user_check.has_user_perms(
"foo", self.user, True, False,
)
# Invalidate the cache to show that a query will be generated when
# checking perms again.
self.user_check.invalidate_permissions_cache()
ContentType.objects.clear_cache()
# One query to re generate the cache.
for _ in range(5):
assert not self.user_check.has_user_perms(
"foo", self.user, True, False,
)
def test_invalidate_group_permissions_cache(self):
# Show that calling invalidate_permissions_cache will cause extra
# queries.
# For each time invalidate_permissions_cache gets called, you
# will need to do one query to get content type and one to get
with self.assertNumQueries(4):
for _ in range(5):
assert not self.group_check.has_group_perms("foo", self.group, True,)
# Invalidate the cache to show that a query will be generated when
# checking perms again.
self.group_check.invalidate_permissions_cache()
ContentType.objects.clear_cache()
# One query to re generate the cache.
for _ in range(5):
assert not self.group_check.has_group_perms("foo", self.group, True,)
def test_has_user_perms_check_group_multiple(self):
# Create a permission with just a group.
Permission.objects.create(
content_type=Permission.objects.get_content_type(User),
object_id=self.user.pk,
codename="foo",
group=self.group,
approved=True,
)
# By creating the Permission objects the Content type cache
# gets created.
# Check the number of queries.
with self.assertNumQueries(2):
assert self.user_check.has_user_perms("foo", self.user, True, True)
# Create a second group.
new_group = Group.objects.create(name="new_group")
new_group.user_set.add(self.user)
# Create a permission object for it.
Permission.objects.create(
content_type=Permission.objects.get_content_type(User),
object_id=self.user.pk,
codename="foo",
group=new_group,
approved=True,
)
self.user_check.invalidate_permissions_cache()
# Make sure it is the same number of queries.
with self.assertNumQueries(2):
assert self.user_check.has_user_perms("foo", self.user, True, True)
class GroupPermissionCacheTestCase(SmartCachingTestCase):
"""
Tests that peg expected behaviour
"""
def test_has_user_perms_with_groups(self):
perms = self._old_user_permission_check()
self.assertEqual([], list(perms))
# Use the new cached user perms to show that the user does not have the
# perms.
can_foo_with_group = self.user_check.has_user_perms(
"foo", self.user, approved=True, check_groups=True,
)
self.assertFalse(can_foo_with_group)
# Create a permission with just that group.
perm = Permission.objects.create(
content_type=Permission.objects.get_content_type(User),
object_id=self.user.pk,
codename="foo",
group=self.group,
approved=True,
)
# Old permission check
perms = self._old_user_permission_check()
self.assertEqual([perm], list(perms))
# Invalidate the cache.
self.user_check.invalidate_permissions_cache()
can_foo_with_group = self.user_check.has_user_perms(
"foo", self.user, approved=True, check_groups=True,
)
self.assertTrue(can_foo_with_group)
def test_has_group_perms_no_user(self):
# Make sure calling has_user_perms on a permission that does not have a
# user does not throw any errors.
can_foo_with_group = self.group_check.has_group_perms(
"foo", self.user, approved=True,
)
self.assertFalse(can_foo_with_group)
perms = self._old_group_permission_check()
self.assertEqual([], list(perms))
# Create a permission with just that group.
perm = Permission.objects.create(
content_type=Permission.objects.get_content_type(Group),
object_id=self.group.pk,
codename="foo",
group=self.group,
approved=True,
)
# Old permission check
perms = self._old_group_permission_check()
self.assertEqual([perm], list(perms))
# Invalidate the cache.
self.group_check.invalidate_permissions_cache()
can_foo_with_group = self.group_check.has_group_perms(
"foo", self.group, approved=True,
)
self.assertTrue(can_foo_with_group)
class AddPermissionTestCase(TestCase):
def test_add_permission_permission_denied_is_403(self):
user = get_user_model().objects.create(username="foo", email="foo@example.com",)
user.set_password("pw")
user.save()
assert self.client.login(username="foo@example.com", password="pw")
url = reverse(
"authority-add-permission-request",
kwargs={"app_label": "foo", "module_name": "Bar", "pk": 1,},
)
r = self.client.get(url)
self.assertEqual(r.status_code, 403)
| |
"""
"""
from subprocess import Popen, PIPE
import json, types, re
MEGACLI='/opt/MegaRAID/MegaCli/MegaCli64'
class Adapter:
def __init__(self):
self.device_id = 0
self.product_name = ''
self.serial_number = ''
self.fw_package_build = ''
self.fw_version = ''
self.bios_version = ''
self.webbios_version = ''
self.preboot_cli_version = ''
self.boot_block_version = ''
self.sas_address = ''
self.bbu_present = False
self.alarm_present = False
self.nvram_present = False
self.serial_debugger_present = False
self.flash_present = False
self.memory_size = ''
def load(self, adapter_id):
try:
ret = megacli('-AdpAllInfo -a%i -NoLog' % adapter_id)
except OSError:
print 'Failed to get adapter information (MegaCli -AdpAllInfo)'
return 0
for line in ret.readlines():
if line.startswith('Adapter #'):
self.device_id = int(line[9:].strip())
if line.startswith('Product Name'):
offset = line.find(':')
self.product_name = line[offset+1:].strip()
elif line.startswith('Serial No'):
offset = line.find(':')
self.serial_number = line[offset+1:].strip()
elif line.startswith('FW Package Build'):
offset = line.find(':')
self.fw_package_build = line[offset+1:].strip()
elif line.startswith('FW Version'):
offset = line.find(':')
self.fw_version = line[offset+1:].strip()
elif line.startswith('BIOS Version'):
offset = line.find(':')
self.bios_version = line[offset+1:].strip()
elif line.startswith('WebBIOS Version'):
offset = line.find(':')
self.webbios_version = line[offset+1:].strip()
elif line.startswith('Preboot CLI Version'):
offset = line.find(':')
self.preboot_cli_version = line[offset+1:].strip()
elif line.startswith('Boot Block Version'):
offset = line.find(':')
self.boot_block_version = line[offset+1:].strip()
elif line.startswith('SAS Address'):
offset = line.find(':')
self.sas_address = line[offset+1:].strip()
elif line.startswith('BBU'):
offset = line.find(':')
self.bbu_present = str2bool(line[offset+1:])
elif line.startswith('Alarm'):
offset = line.find(':')
self.alarm_present = str2bool(line[offset+1:])
elif line.startswith('NVRAM'):
offset = line.find(':')
self.nvram_present = str2bool(line[offset+1:])
elif line.startswith('Serial Debugger'):
offset = line.find(':')
self.serial_debugger_present = str2bool(line[offset+1:])
elif line.startswith('Flash'):
offset = line.find(':')
self.flash_present = str2bool(line[offset+1:])
elif line.startswith('Memory Size'):
offset = line.find(':')
self.memory_size = line[offset+1:].strip()
def json(self):
return json.dumps(self.__dict__)
def __getitem__(self, item):
return self.__dict__[item]
def __str__(self):
ret = """Device ID : %d
Product Name : %s
Serial Number : %s
FW Package Build : %s
FW Version : %s
BIOS Version : %s
WebBIOS Version : %s
Preboot CLI Version : %s
Boot Block Version : %s
SAS Address : %s
BBU Present : %s
Alarm Present : %s
NVRAM Present : %s
Serial Debugger Present : %s
Flash Present : %s
Memory Size : %s""" % (self.device_id, self.product_name, \
self.serial_number, self.fw_package_build, self.fw_version, \
self.bios_version, self.webbios_version, self.preboot_cli_version, \
self.boot_block_version, self.sas_address, self.bbu_present, \
self.alarm_present, self.nvram_present, self.serial_debugger_present, \
self.flash_present, self.memory_size)
return ret
class Enclosure:
def __init__(self):
self.device_id = 0
self.adapter_id = 0
self.number_of_slots = 0
self.number_of_power_supplies = 0
self.number_of_fans = 0
self.number_of_temperature_sensors = 0
self.number_of_alarms = 0
self.number_of_sim_modules = 0
self.number_of_physical_drives = 0
self.status = ''
self.position = 0
self.connector_name = ''
self.partner_device_id = ''
def load_from_text(self, input):
for line in input:
if line.startswith(' Device ID'):
offset = line.find(':')
self.device_id = int(line[offset+1:].strip())
if line.startswith(' Number of Slots'):
offset = line.find(':')
self.number_of_slots = int(line[offset+1:].strip())
elif line.startswith(' Number of Power Supplies'):
offset = line.find(':')
self.number_of_power_supplies = int(line[offset+1:].strip())
elif line.startswith(' Number of Fans'):
offset = line.find(':')
self.number_of_fans = int(line[offset+1:].strip())
elif line.startswith(' Number of Temperature Sensors'):
offset = line.find(':')
self.number_of_temperature_sensors = int(line[offset+1:].strip())
elif line.startswith(' Number of Alarms'):
offset = line.find(':')
self.number_of_alarms = int(line[offset+1:].strip())
elif line.startswith(' Number of SIM Modules'):
offset = line.find(':')
self.number_of_sim_modules = int(line[offset+1:].strip())
elif line.startswith(' Number of Physical Drives'):
offset = line.find(':')
self.number_of_physical_drives = int(line[offset+1:].strip())
elif line.startswith(' Status'):
offset = line.find(':')
self.status = line[offset+1:].strip()
elif line.startswith(' Position'):
offset = line.find(':')
self.position = line[offset+1:].strip()
elif line.startswith(' Connector Name'):
offset = line.find(':')
self.connector_name = line[offset+1:].strip()
elif line.startswith(' Partner Device Id'):
offset = line.find(':')
self.partner_device_id = line[offset+1:].strip()
def json(self):
return json.dumps(self.__dict__)
def __getitem__(self, item):
return self.__dict__[item]
def __str__(self):
ret = """Device ID : %i
Number of Slots : %i
Number of Power Supplies : %i
Number of Fans : %i
Number of Temperature Sensors : %i
Number of Alarms : %i
Number of SIM Modules : %i
Number of Physical Drives : %i
Status : %s
Position : %s
Connector Name : %s
Partner Device Id : %s""" % (self.device_id, self.number_of_slots, \
self.number_of_power_supplies, self.number_of_fans, \
self.number_of_temperature_sensors, self.number_of_alarms, \
self.number_of_sim_modules, self.number_of_physical_drives, \
self.status, self.position, self.connector_name, self.partner_device_id)
return ret
class PhysicalDevice:
def __init__(self):
self.adapter_id = 0
self.enclosure_id = 0
self.slot_id = 0
self.device_id = 0
self.sequence_number = 0
self.media_errors = 0
self.other_errors = 0
self.predictive_failures = 0
self.last_predictive_seq_number = 0
self.pd_type = ''
self.raw_size = ''
self.non_coerced_size = ''
self.coerced_size = ''
self.firmware_state = ''
self.sas_address = ''
self.connected_port_number = ''
self.inquiry_data = ''
self.fde_capable = ''
self.fde_enable = ''
self.secured = ''
self.locked = ''
self.foreign_state = ''
self.device_speed = ''
self.link_speed = ''
self.media_type = ''
def led_on(self):
try:
ret = megacli('-PdLocate -Start -PhysDrv[%i:%i] -a%i'
% (self.enclosure_id, self.slot_id, self.adapter_id))
except OSError:
print 'Failed to turn location LED on (MegaCli -PdLocate -Start)'
return False
return True
def led_off(self):
try:
ret = megacli('-PdLocate -Stop -PhysDrv[%i:%i] -a%i'
% (self.enclosure_id, self.slot_id, self.adapter_id))
except OSError:
print 'Failed to turn location LED on (MegaCli -PdLocate -Stop)'
return False
return True
def is_configured(self):
if 'Unconfigured' in self.firmware_state and 'good' in self.firmware_state:
return True
else:
return False
def make_JBOD(self):
""" Makes drive JBOD Just a Bunch Of Disks"""
args = '-PDMakeJBOD -PhysDrv[%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def set_offline(self):
""" Set firmware state to Offline """
args = '-PDOffline -PhysDrv [%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def set_online(self):
""" Set firmware state to Online """
args = '-PDOnline -PhysDrv [%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def mark_missing(self):
""" Mark drive as missing """
args = '-PDMarkMissing -PhysDrv [%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def prepare_for_removal(self):
""" Prepare drive for removal """
args = '-PdPrpRmv -PhysDrv [%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def bad_to_good(self):
""" Changes drive in state Unconfigured-Bad to Unconfigured-Good. """
args = '-PDMakeGood -PhysDrv[%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def set_global_hot_spare(self):
""" Set drive as global hot spare """
args = '-PDHSP -Set -PhysDrv [%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def remove_global_hot_spare(self):
""" Unset drive as global hot spare """
args = '-PDHSP -Rmv -PhysDrv [%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)
megacli(args)
self.reload()
def auto_remove(self):
""" This method calls all the other necessary methods in order to
prepare a drive for replacement
"""
self.set_offline()
self.mark_missing()
self.prepare_for_removal()
self.reload()
def load_from_text(self, input):
for line in input:
if line.startswith('Enclosure Device ID'):
offset = line.find(':')
self.enclosure_id = int(line[offset+1:].strip())
if line.startswith('Slot Number'):
offset = line.find(':')
self.slot_id = int(line[offset+1:].strip())
elif line.startswith('Device Id'):
offset = line.find(':')
self.device_id = int(line[offset+1:].strip())
elif line.startswith('Sequence Number'):
offset = line.find(':')
self.sequence_number = int(line[offset+1:].strip())
elif line.startswith('Media Error Count'):
offset = line.find(':')
self.media_errors = int(line[offset+1:].strip())
elif line.startswith('Other Error Count'):
offset = line.find(':')
self.other_errors = int(line[offset+1:].strip())
elif line.startswith('Predictive Failure Count'):
offset = line.find(':')
self.predictive_failures = int(line[offset+1:].strip())
elif line.startswith('Last Predictive Failure Event Seq Number'):
offset = line.find(':')
self.last_predictive_seq_number = int(line[offset+1:].strip())
elif line.startswith('PD Type'):
offset = line.find(':')
self.pd_type = line[offset+1:].strip()
elif line.startswith('Raw Size'):
offset = line.find(':')
delim = line.find('[') - 4
self.raw_size = float(line[offset+1:delim].strip())
elif line.startswith('Non Coerced Size'):
offset = line.find(':')
delim = line.find('[') - 4
self.non_coerced_size = float(line[offset+1:delim].strip())
elif line.startswith('Coerced Size'):
offset = line.find(':')
delim = line.find('[') - 4
self.coerced_size = float(line[offset+1:delim].strip())
elif line.startswith('Firmware state'):
offset = line.find(':')
self.firmware_state = line[offset+1:].strip()
elif line.startswith('SAS Address'):
offset = line.find(':')
self.sas_address = line[offset+1:].strip()
elif line.startswith('Connected Port Number'):
offset = line.find(':')
self.connected_port_number = line[offset+1:].strip()
elif line.startswith('Inquiry Data'):
offset = line.find(':')
self.inquiry_data = line[offset+1:].strip()
elif line.startswith('FDE Capable'):
offset = line.find(':')
self.fde_capable = line[offset+1:].strip()
elif line.startswith('FDE Enable'):
offset = line.find(':')
self.fde_enable = line[offset+1:].strip()
elif line.startswith('Secured'):
offset = line.find(':')
self.secured = line[offset+1:].strip()
elif line.startswith('Locked'):
offset = line.find(':')
self.locked = line[offset+1:].strip()
elif line.startswith('Foreign State'):
offset = line.find(':')
self.foreign_state = line[offset+1:].strip()
elif line.startswith('Device Speed'):
offset = line.find(':')
self.device_speed = line[offset+1:].strip()
elif line.startswith('Link Speed'):
offset = line.find(':')
self.link_speed = line[offset+1:].strip()
elif line.startswith('Media Type'):
offset = line.find(':')
self.media_type = line[offset+1:].strip()
def reload(self):
try:
ret = megacli('-PdInfo -PhysDrv[%i:%i] -a%i' % (self.enclosure_id, \
self.slot_id, self.adapter_id))
except OSError:
print 'Failed to get physical device information (MegaCli -PdInfo \
-PhysDrv[%i:%i] -a%i)' % (self.enclosure_id, self.slot_id, \
self.adapter_id)
return []
#self.adapter_id = adapter_id
ret_lines = ret.readlines()
self.load_from_text(ret_lines)
def json(self):
return json.dumps(self.__dict__)
def __getitem__(self, item):
return self.__dict__[item]
def __str__(self):
ret = """Adapter ID: %s
Enclosure Device ID: %s
Slot Number: %s
Device Id: %s
Sequence Number: %s
Media Error Count: %s
Other Error Count: %s
Predictive Failure Count: %s
Last Predictive Failure Event Seq Number: %s
PD Type: %s
Raw Size: %s
Non Coerced Size: %s
Coerced Size: %s
Firmware state: %s
SAS Address(0): %s
Connected Port Number: %s
Inquiry Data: %s
FDE Capable: %s
FDE Enable: %s
Secured: %s
Locked: %s
Foreign State: %s
Device Speed: %s
Link Speed: %s
Media Type: %s""" % (self.adapter_id, self.enclosure_id, self.slot_id, self.device_id, \
self.sequence_number, self.media_errors, self.other_errors, \
self.predictive_failures, \
self.last_predictive_seq_number, \
self.pd_type, self.raw_size, self.non_coerced_size, \
self.coerced_size, \
self.firmware_state, self.sas_address, self.connected_port_number, \
self.inquiry_data, self.fde_capable, self.fde_enable, \
self.secured, self.locked, self.foreign_state, self.device_speed, \
self.link_speed, self.media_type)
return ret
class VirtualDrive:
def __init__(self):
self.virtualdisk_id = 0
self.adapter_id = 0
self.name = ''
self.raid_level = ''
self.size = ''
self.state = ''
self.stripe_size = ''
self.number_of_drives = 0
self.span_depth = 0
self.default_cache_policy = ''
self.current_cache_policy = ''
self.access_policy = ''
self.disk_cache_policy = ''
self.encryption = ''
def convert_raid_level(self, raid_level):
if raid_level in [0,1,5]:
args = '-LDRecon -Start -r%i -L%i -a%i' % (raid_level, self.virtualdisk_id, self.adapter_id)
else:
raise NotImplemented('RAID %i is not yet implemented. Currently supported raid levels are 0,1,5' % raid_level)
megacli(args)
self.reload()
def set_wb(self):
args = '-LDSetProp WB -L%i -a%i -NoLog' % (self.virtualdisk_id, self.adapter_id)
megacli(args)
self.reload()
def setNoCacheBadBBU(self):
args = '-LDSetProp NoCacheBadBBU -L%i -a%i' % (self.virtualdisk_id, self.adapter_id)
megacli(args)
self.reload()
def destroy(self):
args = '-CfgLdDel -L%i -a%i' % (self.virtualdisk_id, self.adapter_id)
megacli(args)
self.reload()
def view_rebuild_progress(self):
args = '-LDRecon ShowProg L%i -a%i' % (self.virtualdisk_id, self.adapter_id)
megacli(args)
self.reload()
def extend(self, drives):
if not type(drives) == types.ListType:
raise TypeError("drives must be in list format ['E:S', 'E:S', '...']")
_raid_level = decode_raid_level(self.raid_level)
drive_syntax = re.compile(r'^[0-9]+:[0-9]+$')
for drive in drives:
if not re.match(drive_syntax, drive):
raise AttributeError('Invalid drive format. E:S required')
_enclosure = drive.split(':')[0]
_slot = drive.split(':')[-1]
for p in pd_list(self.adapter_id):
if _enclosure == p.enclosure_id and _slot == p.slot_id and p.firmware_state.startswith('Unconfigured'):
break
raise DriveError('Drive %s is either already in use or does not exist' % drive)
args = '-LDRecon -Start -r%i -Add -PhysDrv[%s] -L%i -a%i' % (_raid_level, ','.join(drives), self.virtualdisk_id, self.adapter_id)
megacli(args)
self.reload()
def load_from_text(self, input):
for line in input:
if line.startswith('Virtual Drive'):
delim = line.find('(')
offset = line.find(':')
self.virtualdisk_id = int(line[offset+1:delim].strip())
if line.startswith('Name'):
offset = line.find(':')
self.name = line[offset+1:].strip()
elif line.startswith('RAID Level'):
offset = line.find(':')
self.raid_level = line[offset+1:].strip()
elif line.startswith('Size'):
delim = line.find(' GB')
offset = line.find(':')
self.size = line[offset+1:delim].strip()
elif line.startswith('State'):
offset = line.find(':')
self.state = line[offset+1:].strip()
elif line.startswith('Strip Size'):
delim = line.find(' KB')
offset = line.find(':')
self.stripe_size = line[offset+1:delim].strip()
elif line.startswith('Number Of Drives'):
offset = line.find(':')
self.number_of_drives = int(line[offset+1:].strip())
elif line.startswith('Span Depth'):
offset = line.find(':')
self.span_depth = int(line[offset+1:].strip())
elif line.startswith('Default Cache Policy'):
offset = line.find(':')
self.default_cache_policy = line[offset+1:].strip()
elif line.startswith('Current Cache Policy'):
offset = line.find(':')
self.current_cache_policy = line[offset+1:].strip()
elif line.startswith('Current Access Policy'):
offset = line.find(':')
self.access_policy = line[offset+1:].strip()
elif line.startswith('Disk Cache Policy'):
offset = line.find(':')
self.disk_cache_policy = line[offset+1:].strip()
elif line.startswith('Encryption'):
offset = line.find(':')
self.encryption = line[offset+1:].strip()
def reload(self):
try:
ret = megacli('-LdInfo -L%i -a%i' % (self.virtualdisk_id, self.adapter_id))
except OSError:
print 'Failed to get Virtual Drive information (MegaCli -LdInfo -L%i -a%i' % (self.virtualdisk_id, self.adapter_id)
return []
#self.adapter_id = adapter_id
ret_lines = ret.readlines()
self.load_from_text(ret_lines)
def json(self):
return json.dumps(self.__dict__)
def __getitem__(self, item):
return self.__dict__[item]
def __str__(self):
ret = """Virtual Drive: %d
Name: %s
RAID Level: %s
Size: %s
State: %s
Strip Size: %s
Number Of Drives: %d
Span Depth: %d
Default Cache Policy: %s
Current Cache Policy: %s
Access Policy: %s
Disk Cache Policy: %s
Encryption: %s""" % (self.virtualdisk_id, self.name, self.raid_level, \
self.size, self.state, self.stripe_size, self.number_of_drives, \
self.span_depth, self.default_cache_policy, self.current_cache_policy, \
self.access_policy, self.disk_cache_policy, self.encryption)
return ret
class BBU():
def __init__(self):
self.battery_type = ''
self.voltage = ''
self.current = ''
self.temperature = ''
self.state = ''
self.charging_status = ''
self.voltage_status = ''
self.learn_cycle_requested = False
self.learn_cycle_active = False
self.learn_cycle_status = ''
self.learn_cycle_timeout = False
self.i2c_errors_detected = False
self.battery_pack_missing = False
self.battery_replacement_required = False
self.remaining_capacity_low = False
self.periodic_learn_required = False
self.transparent_learn = False
self.about_to_fail = False
self.microcode_update_required = False
self.gas_gauge_status = ''
self.relative_charge = ''
self.charger_system_state = 0
self.charger_system_ctrl = 0
self.charging_current = ''
self.absolute_charge = ''
self.max_error = ''
def load_from_text(self, input):
for line in input:
if line.startswith('BatteryType'):
offset = line.find(':')
self.battery_type = line[offset+1:].strip()
if line.startswith('Voltage:'):
offset = line.find(':')
self.voltage = line[offset+1:].strip()
elif line.startswith('Current:'):
offset = line.find(':')
self.current = line[offset+1:].strip()
elif line.startswith('Temperature'):
offset = line.find(':')
self.temperature = line[offset+1:].strip()
elif line.startswith('Battery State'):
offset = line.find(':')
self.state = line[offset+1:].strip()
elif line.startswith(' Charging Status'):
offset = line.find(':')
self.charging_status = line[offset+1:].strip()
elif line.startswith(' Voltage'):
offset = line.find(':')
self.voltage_status = line[offset+1:].strip()
elif line.startswith(' Learn Cycle Requested'):
offset = line.find(':')
self.learn_cycle_requested = str2bool(line[offset+1:].strip())
elif line.startswith(' Learn Cycle Active'):
offset = line.find(':')
self.learn_cycle_active = str2bool(line[offset+1:].strip())
elif line.startswith(' Learn Cycle Status'):
offset = line.find(':')
self.learn_cycle_status = line[offset+1:].strip()
elif line.startswith(' Learn Cycle Timeout'):
offset = line.find(':')
self.learn_cycle_timeout = str2bool(line[offset+1:].strip())
elif line.startswith(' I2c Errors Detected'):
offset = line.find(':')
self.i2c_errors_detected = str2bool(line[offset+1:].strip())
elif line.startswith(' Battery Pack Missing'):
offset = line.find(':')
self.battery_pack_missing = str2bool(line[offset+1:].strip())
elif line.startswith(' Battery Replacement required'):
offset = line.find(':')
self.battery_replacement_required = str2bool(line[offset+1:].strip())
elif line.startswith(' Remaining Capacity Low'):
offset = line.find(':')
self.remaining_capacity_low = str2bool(line[offset+1:].strip())
elif line.startswith(' Periodic Learn Required'):
offset = line.find(':')
self.periodic_learn_required = str2bool(line[offset+1:].strip())
elif line.startswith(' Transparent Learn'):
offset = line.find(':')
self.transparent_learn = str2bool(line[offset+1:].strip())
elif line.startswith(' Pack is about to fail'):
offset = line.find(':')
self.about_to_fail = str2bool(line[offset+1:].strip())
elif line.startswith(' Module microcode update required'):
offset = line.find(':')
self.microcode_update_required = str2bool(line[offset+1:].strip())
elif line.startswith('BBU GasGauge Status'):
offset = line.find(':')
self.gas_gauge_status = line[offset+1:].strip()
elif line.startswith(' Relative State of Charge'):
offset = line.find(':')
self.relative_charge = line[offset+1:].strip()
elif line.startswith(' Charger System State'):
offset = line.find(':')
self.charger_system_state = int(line[offset+1:].strip())
elif line.startswith(' Charger System Ctrl'):
offset = line.find(':')
self.charger_system_ctrl = int(line[offset+1:].strip())
elif line.startswith(' Charging current'):
offset = line.find(':')
self.charging_current = line[offset+1:].strip()
elif line.startswith(' Absolute state of charge'):
offset = line.find(':')
self.absolute_charge = line[offset+1:].strip()
elif line.startswith(' Max Error'):
offset = line.find(':')
self.max_error = line[offset+1:].strip()
def reload(self):
try:
ret = megacli('-AdpBbuCmd -GetBbuStatus -a%i' % self.adapter_id)
except OSError:
print 'Failed to get BBU information (MegaCli --AdpBbuCmd -GetBbuStatus -a%i)' % self.adapter_id
return []
#self.adapter_id = adapter_id
ret_lines = ret.readlines()
self.load_from_text(ret_lines)
def json(self):
return json.dumps(self.__dict__)
def __getitem__(self, item):
return self.__dict__[item]
def __str__(self):
ret = """Battery Type: %s
Voltage: %s
Current: %s
Temperature: %s
State: %s
Charging Status: %s
Voltage status: %s
Learn Cycle Requested: %s
Learn Cycle Active: %s
Learn Cycle Status: %s
Learn Cycle Timeout: %s
I2C Errors Detected: %s
Battery Pack Missing: %s
Battery Replacement Required: %s
Remaining Capacity Low: %s
Periodic Learn Required: %s
Transparent Learn: %s
About to Fail: %s
Microcode Update Required: %s
Gas Gauge Status: %s
Relative Charge: %s
Charger State: %s
Charger Ctrl: %s
Charge Current: %s
Absolute Charge: %s
Max Error: %s
""" % (self.battery_type, self.voltage, self.current, \
self.temperature, self.state, self.charging_status, self.voltage_status, \
self.learn_cycle_requested, self.learn_cycle_active, self.learn_cycle_status, \
self.learn_cycle_timeout, self.i2c_errors_detected, self.battery_pack_missing, \
self.battery_replacement_required, self.remaining_capacity_low, \
self.periodic_learn_required, self.transparent_learn, self.about_to_fail, \
self.microcode_update_required, self.gas_gauge_status, self.relative_charge, \
self.charger_system_state, self.charger_system_ctrl, self.charging_current, \
self.absolute_charge, self.max_error)
return ret
def adp_list():
try:
ret = megacli('-AdpCount -NoLog')
except OSError:
print 'Failed to get adapter count (MegaCli -AdpCount)'
return []
adp_count = 0
for line in ret.readlines():
if line.startswith('Controller Count'):
offset = line.find(':')
adp_count = int(line[offset+1:].replace('.','').strip())
adp_list = []
adp = Adapter()
for adp_id in range(0, adp_count):
adp.load(adp_id)
adp_list.append(adp)
adp = Adapter()
return adp_list
def enc_list(adapter_id):
try:
ret = megacli('-EncInfo -a%i' % adapter_id)
except OSError:
print 'Failed to get enclosure information (MegaCli -EncInfo)'
return []
ret_lines = ret.readlines()
enc_list = []
enc = Enclosure()
enc.adapter_id = adapter_id
# Go through all lines looking for the Enclosure identifier line
start_matcher = re.compile(r'.*Enclosure\ [0-9]+:.*')
for line in range(0, len(ret_lines)):
if re.match(start_matcher, ret_lines[line]):
# Feed the enclosure's block of text to the Enclosure instance
enc.load_from_text(ret_lines[line:line+23])
# Add Enclosure to the enc_list and create new instance
enc_list.append(enc)
enc = Enclosure()
enc.adapter_id = adapter_id
return enc_list
def pd_list(adapter_id):
try:
ret = megacli('-PdList -a%i' % adapter_id)
except OSError:
print 'Failed to get physical device information (MegaCli -PdList)'
return []
ret_lines = ret.readlines()
pd_list = []
pd = PhysicalDevice()
pd.adapter_id = adapter_id
# Go through all lines looking for the first line in the disk info
for line in range(0, len(ret_lines)):
if ret_lines[line].startswith('Enclosure Device ID'):
# Feed disk info to the PhysicalDevice object
pd.load_from_text(ret_lines[line:line+46])
# Add PhysicalDevice to the pd_list and reset it
pd_list.append(pd)
pd = PhysicalDevice()
pd.adapter_id = adapter_id
return pd_list
def vd_list(adapter_id):
try:
ret = megacli('-LdInfo -Lall -a%i' % adapter_id)
except OSError:
print 'Failed to get virtual drive information (MegaCli -LDInfo -Lall)'
return []
ret_lines = ret.readlines()
vd_list = []
vd = VirtualDrive()
vd.adapter_id = adapter_id
# Go through all lines looking for the Virtual Drive line
for line in range(0, len(ret_lines)):
if ret_lines[line].startswith('Virtual Drive'):
# Feed the virtual drive's block of text to the VirtualDrive object
vd.load_from_text(ret_lines[line:line+15])
# Add VirtualDrive to the vd_list and create a new one
vd_list.append(vd)
vd = VirtualDrive()
vd.adapter_id = adapter_id
return vd_list
def bbu_list(adapter_id):
try:
ret = megacli('-AdpBbuCmd -GetBbuStatus -a%i' % adapter_id)
except OSError:
print 'Failed to retrieve BBU information (MegaCli64 -AdpBbuCmd -GetBbuStatus -a%i)' % adapter_id
return []
ret_lines = ret.readlines()
bbu_list = []
bbu = BBU()
bbu.adapter_id = adapter_id
bbu.load_from_text(ret_lines)
bbu_list.append(bbu)
return bbu_list
def createvd(raid_level, drives, adapter):
if not type(drives) == types.ListType:
raise TypeError("drives must be in list format ['E:S', 'E:S', '...']")
if not adapter in [a.device_id for a in adp_list()]:
raise AttributeError('Invalid adapter: %s does not exist' % adapter)
drive_syntax = re.compile(r'^[0-9]+:[0-9]+$')
for drive in drives:
if not re.match(drive_syntax, drive):
raise AttributeError('Invalid drive format. E:S required')
_enclosure = drive.split(':')[0]
_slot = drive.split(':')[-1]
for p in pd_list(adapter):
if _enclosure == p.enclosure_id and _slot == p.slot_id and p.firmware_state.startswith('Unconfigured'):
break
raise DriveError('Drive %s is either already in use or does not exist' % drive)
if raid_level in [0,1,5]:
args = '-CfgLdAdd -r%i [%s] -a%i' % (raid_level, ','.join(drives), adapter)
else:
print 'try manually running: MegaCli64 -CfgSpanAdd -r%i -Array0[E:S,E:S] -Array1[E:S,E:S] -a%i' % (self.raid_level, self.adapter_id)
raise NotImplemented('RAID %i is not yet implemented. Currently supported raid levels are 0,1,5' % raid_level)
megacli(args)
def pd_list_unconfigured(drives=None):
if drives:
if not type(drives) == types.ListType:
raise TypeError('pd_list_unconfigured method requires argument in form of list of PhysicalDevice instances')
for drive in drives:
if not type(drive) == types.InstanceType:
raise TypeError('pd_list_unconfigured method requires argument in form of list of PhysicalDevice instances')
else:
drives = [ x for x in pd_list(i) for i in range(0, len(adp_list())) ]
# returns in form of ['E:S', '...']
unconfigured = [ '%i:%i' % (p.enclosure_id, p.slot_id) for p in drives if p.is_unconfigured () ]
# returns in form of [PhysicalDevice instance, ...]
#unconfigured = [ p for p in drives if p.is_unconfigured () ]
return unconfigured
def pd_list_configured():
drives = [ x for x in pd_list(i) for i in range(0, len(adp_list())) ]
configured = [ '%i:%i' % (p.enclosure_id, p.slot_id) for p in drives if not p.is_configured() ]
return configured
def str2bool(str):
if str.strip() in ['Present', 'OK', 'Yes']:
return True
else:
return False
def megacli(args):
cmd = MEGACLI + ' ' + args
out = Popen(cmd, shell=True, stdout=PIPE).stdout
return out
def decode_raid_level(description):
raid_map = {
'Primary-0, Secondary-0, RAID Level Qualifier-0': 0,
'Primary-1, Secondary-0, RAID Level Qualifier-0': 1,
'Primary-5, Secondary-0, RAID Level Qualifier-3': 5,
'Primary-6, Secondary-0, RAID Level Qualifier-3': 6,
'Primary-1, Secondary-3, RAID Level Qualifier-0': 10,
0: 'Primary-0, Secondary-0, RAID Level Qualifier-0',
1: 'Primary-1, Secondary-0, RAID Level Qualifier-0',
5: 'Primary-5, Secondary-0, RAID Level Qualifier-3',
6: 'Primary-6, Secondary-0, RAID Level Qualifier-3',
10: 'Primary-1, Secondary-3, RAID Level Qualifier-0'
}
return raid_map[description]
class DriveError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class NotImplemented(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Saves and restore variables inside traced @tf.functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import saveable_hook
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util import nest
class _SingleDeviceSaver(object):
"""Saves and restores checkpoints from the current device."""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
"""
saveable_objects = list(saveable_objects)
for saveable in saveable_objects:
if not isinstance(saveable, saveable_object.SaveableObject):
raise ValueError(
"Expected a list of SaveableObjects, got %s." % (saveable,))
self._saveable_objects = saveable_objects
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
save_device = options.experimental_io_device or "cpu:0"
with ops.device(save_device):
return io_ops.save_v2(file_prefix, tensor_names, tensor_slices, tensors)
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
options = options or checkpoint_options.CheckpointOptions()
restore_specs = []
tensor_structure = []
for saveable in self._saveable_objects:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
saveable_tensor_structure.append(spec.name)
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
tensor_names, tensor_slices, tensor_dtypes = zip(*restore_specs)
restore_device = options.experimental_io_device or "cpu:0"
with ops.device(restore_device):
restored_tensors = io_ops.restore_v2(
file_prefix, tensor_names, tensor_slices, tensor_dtypes)
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
restore_ops = {}
for saveable, restored_tensors in zip(self._saveable_objects,
structured_restored_tensors):
restore_ops[saveable.name] = saveable.restore(
restored_tensors, restored_shapes=None)
return restore_ops
def sharded_filename(filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
class MultiDeviceSaver(object):
"""Saves checkpoints directly from multiple devices.
Note that this is a low-level utility which stores Tensors in the keys
specified by `SaveableObject`s. Higher-level utilities for object-based
checkpointing are built on top of it.
"""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
Objects extending `SaveableObject` will be saved and restored, and
objects extending `SaveableHook` will be called into at save and
restore time.
"""
self._before_save_callbacks = []
self._after_restore_callbacks = []
saveable_objects = list(saveable_objects)
saveables_by_device = {}
for saveable in saveable_objects:
is_saveable = isinstance(saveable, saveable_object.SaveableObject)
is_hook = isinstance(saveable, saveable_hook.SaveableHook)
if not is_saveable and not is_hook:
raise ValueError(
"Expected a dictionary of SaveableObjects, got {}."
.format(saveable))
if is_hook:
self._before_save_callbacks.append(saveable.before_save)
self._after_restore_callbacks.append(saveable.after_restore)
if is_saveable:
saveables_by_device.setdefault(saveable.device, []).append(saveable)
self._single_device_savers = {
device: _SingleDeviceSaver(saveables)
for device, saveables in saveables_by_device.items()}
def to_proto(self):
"""Serializes to a SaverDef referencing the current graph."""
filename_tensor = array_ops.placeholder(
shape=[], dtype=dtypes.string, name="saver_filename")
save_tensor = self._traced_save(filename_tensor)
restore_op = self._traced_restore(filename_tensor).op
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
version=saver_pb2.SaverDef.V2)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_save(self, file_prefix):
save_op = self.save(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies([save_op]):
return array_ops.identity(file_prefix)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_restore(self, file_prefix):
restore_ops = self.restore(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies(restore_ops.values()):
return array_ops.identity(file_prefix)
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
for callback in self._before_save_callbacks:
callback()
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Filesystems with eventual consistency (such as S3), don't need a
# temporary location. Using a temporary directory in those cases might
# cause situations where files are not available during copy.
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
with ops.device("CPU"):
sharded_suffix = array_ops.where(
string_ops.regex_full_match(file_prefix, "^s3://.*"),
constant_op.constant(".part"),
constant_op.constant("_temp_%s/part" % uuid.uuid4().hex))
tmp_checkpoint_prefix = string_ops.string_join(
[file_prefix, sharded_suffix])
num_shards = len(self._single_device_savers)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saver) in enumerate(
sorted(self._single_device_savers.items())):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(shard_prefix)
with ops.device(device):
# _SingleDeviceSaver will use the CPU device when necessary, but initial
# read operations should be placed on the SaveableObject's device.
sharded_saves.append(saver.save(shard_prefix, options))
with ops.control_dependencies(sharded_saves):
# Merge on the io_device if specified, otherwise co-locates the merge op
# with the last device used.
merge_device = (options.experimental_io_device or
saveable_object_util.set_cpu0(last_device))
with ops.device(merge_device):
# V2 format write path consists of a metadata merge step. Once merged,
# attempts to delete the temporary directory, "<user-fed prefix>_temp".
return gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, file_prefix, delete_old_dirs=True)
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
options = options or checkpoint_options.CheckpointOptions()
restore_ops = {}
# Sort by device name to avoid propagating non-deterministic dictionary
# ordering in some Python versions.
for device, saver in sorted(self._single_device_savers.items()):
with ops.device(device):
restore_ops.update(saver.restore(file_prefix, options))
for callback in self._after_restore_callbacks:
callback()
return restore_ops
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test RangeDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class RangeDatasetTest(test.TestCase):
def tearDown(self):
# Remove all checkpoint files.
prefix = self._iterator_checkpoint_prefix()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
def testStop(self):
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={stop: 5})
for i in range(5):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStartStop(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 5})
for i in range(2, 5):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStartStopStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 10, step: 2})
for i in range(2, 10, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testZeroStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={start: 2, stop: 10, step: 0})
def testNegativeStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 10, step: -1})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(2, 10, -1):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStart(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(10, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStartWithPositiveStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2, step: 2})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(10, 2, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStartWithNegativeStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2, step: -1})
for i in range(10, 2, -1):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testEnumerateDataset(self):
components = (["a", "b"], [1, 2], [37.0, 38])
start = constant_op.constant(20, dtype=dtypes.int64)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).enumerate(
start=start).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual(dtypes.int64, get_next[0].dtype)
self.assertEqual((), get_next[0].shape)
self.assertEqual([tensor_shape.TensorShape([])] * 3,
[t.shape for t in get_next[1]])
with self.test_session() as sess:
sess.run(init_op)
self.assertEqual((20, (b"a", 1, 37.0)), sess.run(get_next))
self.assertEqual((21, (b"b", 2, 38.0)), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def _iterator_checkpoint_prefix(self):
return os.path.join(self.get_temp_dir(), "iterator")
def testSaveRestore(self):
def _build_graph(start, stop):
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Saving and restoring in same session.
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultipleSaves(self):
def _build_graph(start, stop):
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
break_point1 = 5
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point1):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point1, break_point2):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point2, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreWithRepeat(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.Dataset.range(
start, stop).repeat(num_epochs).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
break_range = 5
break_epoch = 3
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(break_epoch - 1):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_range):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_range, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreExhaustedIterator(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.Dataset.range(
start, stop).repeat(num_epochs).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
path = self._iterator_checkpoint_prefix()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| |
import ARM
from Goal import Goal
from GoalEnvironmentProperties import GoalEnvironmentProperties
from GoalParameters import GoalParameters
from CairisHTTPError import ObjectNotFoundHTTPError, MalformedJSONHTTPError, ARMHTTPError, MissingParameterHTTPError, OverwriteNotAllowedHTTPError
from alternative.KaosModel import KaosModel
from ValueType import ValueType
from ValueTypeParameters import ValueTypeParameters
from data.CairisDAO import CairisDAO
from tools.JsonConverter import json_serialize, json_deserialize
from tools.ModelDefinitions import GoalEnvironmentPropertiesModel, GoalModel
from tools.SessionValidator import check_required_keys, get_fonts
__author__ = 'Robin Quetin'
class GoalDAO(CairisDAO):
def __init__(self, session_id):
CairisDAO.__init__(self, session_id)
def get_goals(self, constraint_id=-1, coloured=False, simplify=True):
try:
if coloured:
goals = self.db_proxy.getColouredGoals(constraint_id)
else:
goals = self.db_proxy.getGoals(constraint_id)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
if simplify:
for key, value in goals.items():
goals[key] = self.simplify(value)
return goals
def get_goal_by_name(self, name, coloured=False, simplify=True):
found_goal = None
goals = self.get_goals(coloured=coloured, simplify=False)
if goals is not None:
found_goal = goals.get(name)
if found_goal is None:
self.close()
raise ObjectNotFoundHTTPError('The provided goal name')
if simplify:
found_goal = self.simplify(found_goal)
return found_goal
def add_goal(self, goal):
goalParams = GoalParameters(
goalName=goal.theName,
goalOrig=goal.theOriginator,
tags=goal.theTags,
properties=goal.theEnvironmentProperties
)
if not self.check_existing_goal(goal.theName):
goal_id = self.db_proxy.addGoal(goalParams)
else:
self.close()
raise OverwriteNotAllowedHTTPError('The provided goal name')
return goal_id
def update_goal(self, goal, name):
old_goal = self.get_goal_by_name(name, simplify=False)
id = old_goal.theId
params = GoalParameters(
goalName=goal.theName,
goalOrig=goal.theOriginator,
tags=goal.theTags,
properties=goal.theEnvironmentProperties
)
params.setId(id)
try:
self.db_proxy.updateGoal(params)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_goal(self, name):
found_goal = self.get_goal_by_name(name)
try:
self.db_proxy.deleteGoal(found_goal.theId)
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_goal(self, name):
try:
self.db_proxy.nameCheck(name, 'goal')
return False
except ARM.ARMException as ex:
if str(ex.value).find('already exists') > -1:
return True
self.close()
raise ARMHTTPError(ex)
def get_goal_model(self, environment_name):
fontName, fontSize, apFontName = get_fonts(session_id=self.session_id)
try:
associationDictionary = self.db_proxy.goalModel(environment_name)
associations = KaosModel(associationDictionary.values(), environment_name, db_proxy=self.db_proxy, font_name=fontName,
font_size=fontSize)
dot_code = associations.graph()
return dot_code
except ARM.DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
# region Goal types
def get_goal_types(self, environment_name=''):
try:
goal_types = self.db_proxy.getValueTypes('goal_type', environment_name)
return goal_types
except ARM.DatabaseProxyException as ex:
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
raise ARMHTTPError(ex)
def get_goal_type_by_name(self, name, environment_name=''):
found_type = None
goal_types = self.get_goal_types(environment_name=environment_name)
if goal_types is None or len(goal_types) < 1:
raise ObjectNotFoundHTTPError('Goal types')
idx = 0
while found_type is None and idx < len(goal_types):
if goal_types[idx].theName == name:
found_type = goal_types[idx]
idx += 1
if found_type is None:
raise ObjectNotFoundHTTPError('The provided goal type name')
return found_type
def add_goal_type(self, goal_type, environment_name=''):
assert isinstance(goal_type, ValueType)
type_exists = self.check_existing_goal_type(goal_type.theName, environment_name=environment_name)
if type_exists:
raise OverwriteNotAllowedHTTPError(obj_name='The goal type')
params = ValueTypeParameters(
vtName=goal_type.theName,
vtDesc=goal_type.theDescription,
vType='goal_type',
envName=environment_name,
vtScore=goal_type.theScore,
vtRat=goal_type.theRationale
)
try:
return self.db_proxy.addValueType(params)
except ARM.DatabaseProxyException as ex:
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
raise ARMHTTPError(ex)
def update_goal_type(self, goal_type, name, environment_name=''):
assert isinstance(goal_type, ValueType)
found_type = self.get_goal_type_by_name(name, environment_name)
params = ValueTypeParameters(
vtName=goal_type.theName,
vtDesc=goal_type.theDescription,
vType='goal_type',
envName=environment_name,
vtScore=goal_type.theScore,
vtRat=goal_type.theRationale
)
params.setId(found_type.theId)
try:
self.db_proxy.updateValueType(params)
except ARM.DatabaseProxyException as ex:
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
raise ARMHTTPError(ex)
def delete_goal_type(self, name, environment_name=''):
found_type = self.get_goal_type_by_name(name, environment_name)
try:
self.db_proxy.deleteGoalType(found_type.theId)
except ARM.DatabaseProxyException as ex:
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
raise ARMHTTPError(ex)
def check_existing_goal_type(self, name, environment_name):
try:
self.get_goal_type_by_name(name, environment_name)
return True
except ObjectNotFoundHTTPError:
return False
# endregion
# region Goal values
def get_goal_values(self, environment_name=''):
try:
goal_values = self.db_proxy.getValueTypes('goal_value', environment_name)
return goal_values
except ARM.DatabaseProxyException as ex:
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
raise ARMHTTPError(ex)
def get_goal_value_by_name(self, name, environment_name=''):
found_value = None
goal_values = self.get_goal_values(environment_name=environment_name)
if goal_values is None or len(goal_values) < 1:
raise ObjectNotFoundHTTPError('Goal values')
idx = 0
while found_value is None and idx < len(goal_values):
if goal_values[idx].theName == name:
found_value = goal_values[idx]
idx += 1
if found_value is None:
raise ObjectNotFoundHTTPError('The provided goal value name')
return found_value
def update_goal_value(self, goal_value, name, environment_name=''):
assert isinstance(goal_value, ValueType)
found_value = self.get_goal_value_by_name(name, environment_name)
params = ValueTypeParameters(
vtName=goal_value.theName,
vtDesc=goal_value.theDescription,
vType='goal_value',
envName=environment_name,
vtScore=goal_value.theScore,
vtRat=goal_value.theRationale
)
params.setId(found_value.theId)
try:
self.db_proxy.updateValueType(params)
except ARM.DatabaseProxyException as ex:
raise ARMHTTPError(ex)
except ARM.ARMException as ex:
raise ARMHTTPError(ex)
def check_existing_goal_value(self, name, environment_name):
try:
self.get_goal_value_by_name(name, environment_name)
return True
except ObjectNotFoundHTTPError:
return False
# endregion
def convert_properties(self, real_props=None, fake_props=None):
new_props = []
if real_props is not None:
for real_prop in real_props:
assert isinstance(real_prop, GoalEnvironmentProperties)
new_concern_assocs = []
for concern_assoc in real_prop.theConcernAssociations:
new_concern_assocs.append(list(concern_assoc))
new_goal_refinements = []
for goal_refinement in real_prop.theGoalRefinements:
new_goal_refinements.append(list(goal_refinement))
new_subgoal_refinements = []
for subgoal_refinement in real_prop.theSubGoalRefinements:
new_subgoal_refinements.append(list(subgoal_refinement))
real_prop.theConcernAssociations = new_concern_assocs
real_prop.theGoalRefinements = new_goal_refinements
real_prop.theSubGoalRefinements = new_subgoal_refinements
new_props.append(real_prop)
elif fake_props is not None:
for fake_prop in fake_props:
check_required_keys(fake_prop, GoalEnvironmentPropertiesModel.required)
new_concern_assocs = []
for concern_assoc in fake_prop['theConcernAssociations']:
new_concern_assocs.append(tuple(concern_assoc))
new_goal_refinements = []
for goal_refinement in fake_prop['theGoalRefinements']:
new_goal_refinements.append(tuple(goal_refinement))
new_subgoal_refinements = []
for subgoal_refinement in fake_prop['theSubGoalRefinements']:
new_subgoal_refinements.append(tuple(subgoal_refinement))
new_prop = GoalEnvironmentProperties(
environmentName=fake_prop['theEnvironmentName'],
lbl=fake_prop['theLabel'],
definition=fake_prop['theDefinition'],
category=fake_prop['theCategory'],
priority=fake_prop['thePriority'],
fitCriterion=fake_prop['theFitCriterion'],
issue=fake_prop['theIssue'],
goalRefinements=new_goal_refinements,
subGoalRefinements=new_subgoal_refinements,
concs=fake_prop['theConcerns'],
cas=new_concern_assocs,
)
new_props.append(new_prop)
else:
self.close()
raise MissingParameterHTTPError(param_names=['real_props', 'fake_props'])
return new_props
def from_json(self, request):
self.logger.debug('Request data: %s', request.data)
json = request.get_json(silent=True)
if json is False or json is None:
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
json_dict = json['object']
check_required_keys(json_dict, GoalModel.required)
json_dict['__python_obj__'] = Goal.__module__+'.'+Goal.__name__
props_list = json_dict.pop('theEnvironmentProperties', [])
json_dict.pop('theEnvironmentDictionary', None)
real_props = self.convert_properties(fake_props=props_list)
new_json_goal = json_serialize(json_dict)
new_json_goal = json_deserialize(new_json_goal)
new_json_goal.theEnvironmentProperties = real_props
if not isinstance(new_json_goal, Goal):
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
else:
return new_json_goal
def simplify(self, goal):
"""
Simplifies the Goal object by removing the environment properties
:param goal: The Goal to simplify
:type goal: Goal
:return: The simplified Goal
:rtype: Goal
"""
goal.theEnvironmentProperties = self.convert_properties(real_props=goal.theEnvironmentProperties)
assert isinstance(goal, Goal)
goal.theEnvironmentDictionary = {}
delattr(goal, 'theEnvironmentDictionary')
return goal
| |
# -*- coding: utf-8 -*-
'''
Support for iptables
'''
# Import python libs
import os
import sys
import shlex
# Import salt libs
import salt.utils
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
from salt.exceptions import SaltException
def __virtual__():
'''
Only load the module if iptables is installed
'''
if salt.utils.which('iptables'):
return 'iptables'
return False
def _conf():
'''
Some distros have a specific location for config files
'''
if __grains__['os_family'] == 'RedHat':
return '/etc/sysconfig/iptables'
elif __grains__['os_family'] == 'Arch':
return '/etc/iptables/iptables.rules'
elif __grains__['os'] == 'Gentoo':
return '/var/lib/iptables/rules-save'
else:
return False
def version():
'''
Return version from iptables --version
CLI Example:
.. code-block:: bash
salt '*' iptables.version
'''
cmd = 'iptables --version'
out = __salt__['cmd.run'](cmd).split()
return out[1]
def build_rule(table=None, chain=None, command=None, position='', full=None,
**kwargs):
'''
Build a well-formatted iptables rule based on kwargs. Long options must be
used (`--jump` instead of `-j`) because they will have the `--` added to
them. A `table` and `chain` are not required, unless `full` is True.
If `full` is `True`, then `table`, `chain` and `command` are required.
`command` may be specified as either a short option ('I') or a long option
(`--insert`). This will return the iptables command, exactly as it would
be used from the command line.
If a position is required (as with `-I` or `-D`), it may be specified as
`position`. This will only be useful if `full` is True.
If `connstate` is passed in, it will automatically be changed to `state`.
CLI Examples:
.. code-block:: bash
salt '*' iptables.build_rule match=state connstate=RELATED,ESTABLISHED \\
jump=ACCEPT
salt '*' iptables.build_rule filter INPUT command=I position=3 \\
full=True match=state state=RELATED,ESTABLISHED jump=ACCEPT
'''
if 'target' in kwargs:
kwargs['jump'] = kwargs['target']
del kwargs['target']
for ignore in list(_STATE_INTERNAL_KEYWORDS) + ['chain', 'save', 'table']:
if ignore in kwargs:
del kwargs[ignore]
rule = ''
if 'if' in kwargs:
rule += '-i {0} '.format(kwargs['if'])
del kwargs['if']
if 'proto' in kwargs:
rule += '-p {0} '.format(kwargs['proto'])
if 'match' in kwargs:
rule += '-m {0} '.format(kwargs['match'])
del kwargs['match']
if 'state' in kwargs:
del kwargs['state']
if 'connstate' in kwargs:
rule += '--state {0} '.format(kwargs['connstate'])
del kwargs['connstate']
if 'proto' in kwargs:
rule += '-m {0} '.format(kwargs['proto'])
del kwargs['proto']
if 'dport' in kwargs:
rule += '--dport {0} '.format(kwargs['dport'])
del kwargs['dport']
if 'sport' in kwargs:
rule += '--sport {0} '.format(kwargs['sport'])
del kwargs['sport']
if 'jump' in kwargs:
kwargs['j'] = kwargs['jump']
del kwargs['jump']
for item in kwargs:
if len(item) == 1:
rule += '-{0} {1} '.format(item, kwargs[item])
else:
rule += '--{0} {1} '.format(item, kwargs[item])
if full is True:
if not table:
return 'Error: Table needs to be specified'
if not chain:
return 'Error: Chain needs to be specified'
if not command:
return 'Error: Command needs to be specified'
if command in 'ACDIRLSFZNXPE':
flag = '-'
else:
flag = '--'
return 'iptables -t {0} {1}{2} {3} {4} {5}'.format(table,
flag, command, chain, position, rule)
return rule
def get_saved_rules(conf_file=None):
'''
Return a data structure of the rules in the conf file
CLI Example:
.. code-block:: bash
salt '*' iptables.get_saved_rules
'''
return _parse_conf(conf_file)
def get_rules():
'''
Return a data structure of the current, in-memory rules
CLI Example:
.. code-block:: bash
salt '*' iptables.get_rules
'''
return _parse_conf(in_mem=True)
def get_saved_policy(table='filter', chain=None, conf_file=None):
'''
Return the current policy for the specified table/chain
CLI Examples:
.. code-block:: bash
salt '*' iptables.get_saved_policy filter INPUT
salt '*' iptables.get_saved_policy filter INPUT conf_file=/etc/iptables.saved
'''
if not chain:
return 'Error: Chain needs to be specified'
rules = _parse_conf(conf_file)
return rules[table][chain]['policy']
def get_policy(table='filter', chain=None):
'''
Return the current policy for the specified table/chain
CLI Example:
.. code-block:: bash
salt '*' iptables.get_policy filter INPUT
'''
if not chain:
return 'Error: Chain needs to be specified'
rules = _parse_conf(in_mem=True)
return rules[table][chain]['policy']
def set_policy(table='filter', chain=None, policy=None):
'''
Set the current policy for the specified table/chain
CLI Example:
.. code-block:: bash
salt '*' iptables.set_policy filter INPUT ACCEPT
'''
if not chain:
return 'Error: Chain needs to be specified'
if not policy:
return 'Error: Policy needs to be specified'
cmd = 'iptables -t {0} -P {1} {2}'.format(table, chain, policy)
out = __salt__['cmd.run'](cmd)
return out
def save(filename=None):
'''
Save the current in-memory rules to disk
CLI Example:
.. code-block:: bash
salt '*' iptables.save /etc/sysconfig/iptables
'''
if _conf() and not filename:
filename = _conf()
parent_dir = os.path.dirname(filename)
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
cmd = 'iptables-save > {0}'.format(filename)
out = __salt__['cmd.run'](cmd)
return out
def check(table='filter', chain=None, rule=None):
'''
Check for the existance of a rule in the table and chain
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example:
.. code-block:: bash
salt '*' iptables.check filter INPUT rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
'''
if not chain:
return 'Error: Chain needs to be specified'
if not rule:
return 'Error: Rule needs to be specified'
if __grains__['os_family'] == 'RedHat':
cmd = 'iptables-save'
out = __salt__['cmd.run'](cmd).find('-A {1} {2}'.format(table, chain, rule))
if out != -1:
out = ''
else:
cmd = 'iptables -t {0} -C {1} {2}'.format(table, chain, rule)
out = __salt__['cmd.run'](cmd)
if not out:
return True
return out
def append(table='filter', chain=None, rule=None):
'''
Append a rule to the specified table/chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example:
.. code-block:: bash
salt '*' iptables.append filter INPUT rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
'''
if not chain:
return 'Error: Chain needs to be specified'
if not rule:
return 'Error: Rule needs to be specified'
cmd = 'iptables -t {0} -A {1} {2}'.format(table, chain, rule)
out = __salt__['cmd.run'](cmd)
return out
def insert(table='filter', chain=None, position=None, rule=None):
'''
Insert a rule into the specified table/chain, at the specified position.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples:
.. code-block:: bash
salt '*' iptables.insert filter INPUT position=3 rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
'''
if not chain:
return 'Error: Chain needs to be specified'
if not position:
return 'Error: Position needs to be specified or use append (-A)'
if not rule:
return 'Error: Rule needs to be specified'
cmd = 'iptables -t {0} -I {1} {2} {3}'.format(table, chain, position, rule)
out = __salt__['cmd.run'](cmd)
return out
def delete(table, chain=None, position=None, rule=None):
'''
Delete a rule from the specified table/chain, specifying either the rule
in its entirety, or the rule's position in the chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples:
.. code-block:: bash
salt '*' iptables.delete filter INPUT position=3
salt '*' iptables.delete filter INPUT rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
'''
if position and rule:
return 'Error: Only specify a position or a rule, not both'
if position:
rule = position
cmd = 'iptables -t {0} -D {1} {2}'.format(table, chain, rule)
out = __salt__['cmd.run'](cmd)
return out
def flush(table='filter'):
'''
Flush all chains in the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.flush filter
'''
cmd = 'iptables -t {0} -F'.format(table)
out = __salt__['cmd.run'](cmd)
return out
def _parse_conf(conf_file=None, in_mem=False):
'''
If a file is not passed in, and the correct one for this OS is not
detected, return False
'''
if _conf() and not conf_file and not in_mem:
conf_file = _conf()
rules = ''
if conf_file:
with salt.utils.fopen(conf_file, 'r') as ifile:
rules = ifile.read()
elif in_mem:
cmd = 'iptables-save'
rules = __salt__['cmd.run'](cmd)
else:
raise SaltException('A file was not found to parse')
ret = {}
table = ''
for line in rules.splitlines():
if line.startswith('*'):
table = line.replace('*', '')
ret[table] = {}
elif line.startswith(':'):
comps = line.split()
chain = comps[0].replace(':', '')
ret[table][chain] = {}
ret[table][chain]['policy'] = comps[1]
counters = comps[2].replace('[', '').replace(']', '')
(pcount, bcount) = counters.split(':')
ret[table][chain]['packet count'] = pcount
ret[table][chain]['byte count'] = bcount
ret[table][chain]['rules'] = []
ret[table][chain]['rules_comment'] = {}
elif line.startswith('-A'):
parser = _parser()
parsed_args = []
if sys.version.startswith('2.6'):
(opts, args) = parser.parse_args(shlex.split(line))
parsed_args = vars(opts)
else:
parsed_args = vars(parser.parse_args(shlex.split(line)))
ret_args = {}
chain = parsed_args['append']
for arg in parsed_args:
if parsed_args[arg] and arg is not 'append':
ret_args[arg] = parsed_args[arg]
if parsed_args['comment'] is not None:
comment = parsed_args['comment'][0].strip('"')
ret[table][chain[0]]['rules_comment'][comment] = ret_args
ret[table][chain[0]]['rules'].append(ret_args)
return ret
def _parser():
'''
This function contains _all_ the options I could find in man 8 iptables,
listed in the first section that I found them in. They will not all be used
by all parts of the module; use them intelligently and appropriately.
'''
add_arg = None
if sys.version.startswith('2.6'):
import optparse
parser = optparse.OptionParser()
add_arg = parser.add_option
else:
import argparse
parser = argparse.ArgumentParser()
add_arg = parser.add_argument
# COMMANDS
add_arg('-A', '--append', dest='append', action='append')
add_arg('-D', '--delete', dest='delete', action='append')
add_arg('-I', '--insert', dest='insert', action='append')
add_arg('-R', '--replace', dest='replace', action='append')
add_arg('-L', '--list', dest='list', action='append')
add_arg('-F', '--flush', dest='flush', action='append')
add_arg('-Z', '--zero', dest='zero', action='append')
add_arg('-N', '--new-chain', dest='new-chain', action='append')
add_arg('-X', '--delete-chain', dest='delete-chain', action='append')
add_arg('-P', '--policy', dest='policy', action='append')
add_arg('-E', '--rename-chain', dest='rename-chain', action='append')
# PARAMETERS
add_arg('-p', '--protocol', dest='protocol', action='append')
add_arg('-s', '--source', dest='source', action='append')
add_arg('-d', '--destination', dest='destination', action='append')
add_arg('-j', '--jump', dest='jump', action='append')
add_arg('-g', '--goto', dest='goto', action='append')
add_arg('-i', '--in-interface', dest='in-interface', action='append')
add_arg('-o', '--out-interface', dest='out-interface', action='append')
add_arg('-f', '--fragment', dest='fragment', action='append')
add_arg('-c', '--set-counters', dest='set-counters', action='append')
# MATCH EXTENSIONS
add_arg('-m', '--match', dest='match', action='append')
## addrtype
add_arg('--src-type', dest='src-type', action='append')
add_arg('--dst-type', dest='dst-type', action='append')
add_arg('--limit-iface-in', dest='limit-iface-in', action='append')
add_arg('--limit-iface-out', dest='limit-iface-out', action='append')
## ah
add_arg('--ahspi', dest='ahspi', action='append')
## cluster
add_arg('--cluster-total-nodes', dest='cluster-total-nodes', action='append')
add_arg('--cluster-local-node', dest='cluster-local-node', action='append')
add_arg('--cluster-local-nodemask', dest='cluster-local-nodemask', action='append')
add_arg('--cluster-hash-seed', dest='cluster-hash-seed', action='append')
add_arg('--h-length', dest='h-length', action='append')
add_arg('--mangle-mac-s', dest='mangle-mac-s', action='append')
add_arg('--mangle-mac-d', dest='mangle-mac-d', action='append')
## comment
add_arg('--comment', dest='comment', action='append')
## connbytes
add_arg('--connbytes', dest='connbytes', action='append')
add_arg('--connbytes-dir', dest='connbytes-dir', action='append')
add_arg('--connbytes-mode', dest='connbytes-mode', action='append')
## connlimit
add_arg('--connlimit-above', dest='connlimit-above', action='append')
add_arg('--connlimit-mask', dest='connlimit-mask', action='append')
## connmark
add_arg('--mark', dest='mark', action='append')
## conntrack
add_arg('--ctstate', dest='ctstate', action='append')
add_arg('--ctproto', dest='ctproto', action='append')
add_arg('--ctorigsrc', dest='ctorigsrc', action='append')
add_arg('--ctorigdst', dest='ctorigdst', action='append')
add_arg('--ctreplsrc', dest='ctreplsrc', action='append')
add_arg('--ctrepldst', dest='ctrepldst', action='append')
add_arg('--ctorigsrcport', dest='ctorigsrcport', action='append')
add_arg('--ctorigdstport', dest='ctorigdstport', action='append')
add_arg('--ctreplsrcport', dest='ctreplsrcport', action='append')
add_arg('--ctrepldstport', dest='ctrepldstport', action='append')
add_arg('--ctstatus', dest='ctstatus', action='append')
add_arg('--ctexpire', dest='ctexpire', action='append')
## dccp
add_arg('--sport', '--source-port', dest='source_port', action='append')
add_arg('--dport', '--destination-port', dest='destination_port', action='append')
add_arg('--dccp-types', dest='dccp-types', action='append')
add_arg('--dccp-option', dest='dccp-option', action='append')
## dscp
add_arg('--dscp', dest='dscp', action='append')
add_arg('--dscp-class', dest='dscp-class', action='append')
## ecn
add_arg('--ecn-tcp-cwr', dest='ecn-tcp-cwr', action='append')
add_arg('--ecn-tcp-ece', dest='ecn-tcp-ece', action='append')
add_arg('--ecn-ip-ect', dest='ecn-ip-ect', action='append')
## esp
add_arg('--espspi', dest='espspi', action='append')
## hashlimit
add_arg('--hashlimit-upto', dest='hashlimit-upto', action='append')
add_arg('--hashlimit-above', dest='hashlimit-above', action='append')
add_arg('--hashlimit-burst', dest='hashlimit-burst', action='append')
add_arg('--hashlimit-mode', dest='hashlimit-mode', action='append')
add_arg('--hashlimit-srcmask', dest='hashlimit-srcmask', action='append')
add_arg('--hashlimit-dstmask', dest='hashlimit-dstmask', action='append')
add_arg('--hashlimit-name', dest='hashlimit-name', action='append')
add_arg('--hashlimit-htable-size', dest='hashlimit-htable-size', action='append')
add_arg('--hashlimit-htable-max', dest='hashlimit-htable-max', action='append')
add_arg('--hashlimit-htable-expire', dest='hashlimit-htable-expire', action='append')
add_arg('--hashlimit-htable-gcinterval', dest='hashlimit-htable-gcinterval', action='append')
## helper
add_arg('--helper', dest='helper', action='append')
## icmp
add_arg('--icmp-type', dest='icmp-type', action='append')
## iprange
add_arg('--src-range', dest='src-range', action='append')
add_arg('--dst-range', dest='dst-range', action='append')
## length
add_arg('--length', dest='length', action='append')
## limit
add_arg('--limit', dest='limit', action='append')
add_arg('--limit-burst', dest='limit-burst', action='append')
## mac
add_arg('--mac-source', dest='mac-source', action='append')
## multiport
add_arg('--sports', '--source-ports', dest='source-ports', action='append')
add_arg('--dports', '--destination-ports', dest='destination-ports', action='append')
add_arg('--ports', dest='ports', action='append')
## owner
add_arg('--uid-owner', dest='uid-owner', action='append')
add_arg('--gid-owner', dest='gid-owner', action='append')
add_arg('--socket-exists', dest='socket-exists', action='append')
## physdev
add_arg('--physdev-in', dest='physdev-in', action='append')
add_arg('--physdev-out', dest='physdev-out', action='append')
add_arg('--physdev-is-in', dest='physdev-is-in', action='append')
add_arg('--physdev-is-out', dest='physdev-is-out', action='append')
add_arg('--physdev-is-bridged', dest='physdev-is-bridged', action='append')
## pkttype
add_arg('--pkt-type', dest='pkt-type', action='append')
## policy
add_arg('--dir', dest='dir', action='append')
add_arg('--pol', dest='pol', action='append')
add_arg('--strict', dest='strict', action='append')
add_arg('--reqid', dest='reqid', action='append')
add_arg('--spi', dest='spi', action='append')
add_arg('--proto', dest='proto', action='append')
add_arg('--mode', dest='mode', action='append')
add_arg('--tunnel-src', dest='tunnel-src', action='append')
add_arg('--tunnel-dst', dest='tunnel-dst', action='append')
add_arg('--next', dest='next', action='append')
## quota
add_arg('--quota', dest='quota', action='append')
## rateest
add_arg('--rateest1', dest='rateest1', action='append')
add_arg('--rateest2', dest='rateest2', action='append')
add_arg('--rateest-delta', dest='rateest-delta', action='append')
add_arg('--rateest1-bps', dest='rateest1-bps', action='append')
add_arg('--rateest2-bps', dest='rateest2-bps', action='append')
add_arg('--rateest1-pps', dest='rateest1-pps', action='append')
add_arg('--rateest2-pps', dest='rateest2-pps', action='append')
add_arg('--rateest1-lt', dest='rateest1-lt', action='append')
add_arg('--rateest1-gt', dest='rateest1-gt', action='append')
add_arg('--rateest1-eq', dest='rateest1-eq', action='append')
add_arg('--rateest-name', dest='rateest-name', action='append')
add_arg('--rateest-interval', dest='rateest-interval', action='append')
add_arg('--rateest-ewma', dest='rateest-ewma', action='append')
## realm
add_arg('--realm', dest='realm', action='append')
## recent
add_arg('--set', dest='set', action='append')
add_arg('--name', dest='name', action='append')
add_arg('--rsource', dest='rsource', action='append')
add_arg('--rdest', dest='rdest', action='append')
add_arg('--rcheck', dest='rcheck', action='append')
add_arg('--update', dest='update', action='append')
add_arg('--remove', dest='remove', action='append')
add_arg('--seconds', dest='seconds', action='append')
add_arg('--hitcount', dest='hitcount', action='append')
add_arg('--rttl', dest='rttl', action='append')
## sctp
add_arg('--chunk-types', dest='chunk-types', action='append')
## set
add_arg('--match-set', dest='match-set', action='append')
## socket
add_arg('--transparent', dest='transparent', action='append')
## state
add_arg('--state', dest='state', action='append')
## statistic
add_arg('--probability', dest='probability', action='append')
add_arg('--every', dest='every', action='append')
add_arg('--packet', dest='packet', action='append')
## string
add_arg('--algo', dest='algo', action='append')
add_arg('--from', dest='from', action='append')
add_arg('--to', dest='to', action='append')
add_arg('--string', dest='string', action='append')
add_arg('--hex-string', dest='hex-string', action='append')
## tcp
add_arg('--tcp-flags', dest='tcp-flags', action='append')
add_arg('--syn', dest='syn', action='append')
add_arg('--tcp-option', dest='tcp-option', action='append')
## tcpmss
add_arg('--mss', dest='mss', action='append')
## time
add_arg('--datestart', dest='datestart', action='append')
add_arg('--datestop', dest='datestop', action='append')
add_arg('--monthdays', dest='monthdays', action='append')
add_arg('--weekdays', dest='weekdays', action='append')
add_arg('--utc', dest='utc', action='append')
add_arg('--localtz', dest='localtz', action='append')
## tos
add_arg('--tos', dest='tos', action='append')
## ttl
add_arg('--ttl-eq', dest='ttl-eq', action='append')
add_arg('--ttl-gt', dest='ttl-gt', action='append')
add_arg('--ttl-lt', dest='ttl-lt', action='append')
## u32
add_arg('--u32', dest='u32', action='append')
# CHECKSUM
add_arg('--checksum-fill', dest='checksum-fill', action='append')
# CLASSIFY
add_arg('--set-class', dest='set-class', action='append')
# CLUSTERIP
add_arg('--new', dest='new', action='append')
add_arg('--hashmode', dest='hashmode', action='append')
add_arg('--clustermac', dest='clustermac', action='append')
add_arg('--total-nodes', dest='total-nodes', action='append')
add_arg('--local-node', dest='local-node', action='append')
add_arg('--hash-init', dest='hash-init', action='append')
# CONNMARK
add_arg('--set-xmark', dest='set-xmark', action='append')
add_arg('--save-mark', dest='save-mark', action='append')
add_arg('--restore-mark', dest='restore-mark', action='append')
add_arg('--and-mark', dest='and-mark', action='append')
add_arg('--or-mark', dest='or-mark', action='append')
add_arg('--xor-mark', dest='xor-mark', action='append')
add_arg('--set-mark', dest='set-mark', action='append')
# DNAT
add_arg('--to-destination', dest='to-destination', action='append')
add_arg('--random', dest='random', action='append')
add_arg('--persistent', dest='persistent', action='append')
# DSCP
add_arg('--set-dscp', dest='set-dscp', action='append')
add_arg('--set-dscp-class', dest='set-dscp-class', action='append')
# ECN
add_arg('--ecn-tcp-remove', dest='ecn-tcp-remove', action='append')
# LOG
add_arg('--log-level', dest='log-level', action='append')
add_arg('--log-prefix', dest='log-prefix', action='append')
add_arg('--log-tcp-sequence', dest='log-tcp-sequence', action='append')
add_arg('--log-tcp-options', dest='log-tcp-options', action='append')
add_arg('--log-ip-options', dest='log-ip-options', action='append')
add_arg('--log-uid', dest='log-uid', action='append')
# NFLOG
add_arg('--nflog-group', dest='nflog-group', action='append')
add_arg('--nflog-prefix', dest='nflog-prefix', action='append')
add_arg('--nflog-range', dest='nflog-range', action='append')
add_arg('--nflog-threshold', dest='nflog-threshold', action='append')
# NFQUEUE
add_arg('--queue-num', dest='queue-num', action='append')
add_arg('--queue-balance', dest='queue-balance', action='append')
# RATEEST
add_arg('--rateest-ewmalog', dest='rateest-ewmalog', action='append')
# REDIRECT
add_arg('--to-ports', dest='to-ports', action='append')
# REJECT
add_arg('--reject-with', dest='reject-with', action='append')
# SAME
add_arg('--nodst', dest='nodst', action='append')
# SECMARK
add_arg('--selctx', dest='selctx', action='append')
# SET
add_arg('--add-set', dest='add-set', action='append')
add_arg('--del-set', dest='del-set', action='append')
# SNAT
add_arg('--to-source', dest='to-source', action='append')
# TCPMSS
add_arg('--set-mss', dest='set-mss', action='append')
add_arg('--clamp-mss-to-pmtu', dest='clamp-mss-to-pmtu', action='append')
# TCPOPTSTRIP
add_arg('--strip-options', dest='strip-options', action='append')
# TOS
add_arg('--set-tos', dest='set-tos', action='append')
add_arg('--and-tos', dest='and-tos', action='append')
add_arg('--or-tos', dest='or-tos', action='append')
add_arg('--xor-tos', dest='xor-tos', action='append')
# TPROXY
add_arg('--on-port', dest='on-port', action='append')
add_arg('--on-ip', dest='on-ip', action='append')
add_arg('--tproxy-mark', dest='tproxy-mark', action='append')
# TTL
add_arg('--ttl-set', dest='ttl-set', action='append')
add_arg('--ttl-dec', dest='ttl-dec', action='append')
add_arg('--ttl-inc', dest='ttl-inc', action='append')
# ULOG
add_arg('--ulog-nlgroup', dest='ulog-nlgroup', action='append')
add_arg('--ulog-prefix', dest='ulog-prefix', action='append')
add_arg('--ulog-cprange', dest='ulog-cprange', action='append')
add_arg('--ulog-qthreshold', dest='ulog-qthreshold', action='append')
return parser
| |
"""
Vincenty's methods for computing ground distance and reckoning
"""
from __future__ import annotations
import typing
import logging
from math import nan, pi
from copy import copy
try:
from numpy import (
ndarray,
atleast_1d,
sqrt,
tan,
sin,
cos,
isnan,
arctan as atan,
arctan2 as atan2,
arcsin as asin,
radians,
degrees,
)
except ImportError:
from math import sqrt, tan, sin, cos, isnan, atan, atan2, asin, radians, degrees # type: ignore
ndarray = typing.Any # type: ignore
from .ellipsoid import Ellipsoid
from .utils import sign
__all__ = ["vdist", "vreckon", "track2"]
def vdist(
Lat1: float | ndarray,
Lon1: float | ndarray,
Lat2: float | ndarray,
Lon2: float | ndarray,
ell: Ellipsoid = None,
) -> tuple[ndarray, ndarray]:
"""
Using the reference ellipsoid, compute the distance between two points
within a few millimeters of accuracy, compute forward azimuth,
and compute backward azimuth, all using a vectorized version of
Vincenty's algorithm:
Example:
dist_m, azimuth_deg = vdist(lat1, lon1, lat2, lon2, ell)
Parameters
----------
Lat1 : float
Geodetic latitude of first point (degrees)
Lon1 : float
Geodetic longitude of first point (degrees)
Lat2 : float
Geodetic latitude of second point (degrees)
Lon2 : float
Geodetic longitude of second point (degrees)
ell : Ellipsoid, optional
reference ellipsoid
Results
-------
dist_m : float
distance (meters)
az : float
azimuth (degrees) clockwise from first point to second point (forward)
Original algorithm source:
T. Vincenty, "Direct and Inverse Solutions of Geodesics on the Ellipsoid
with Application of Nested Equations", Survey Review, vol. 23, no. 176,
April 1975, pp 88-93.
Available at: http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf
Notes:
1. lat1,lon1,lat2,lon2 can be any (identical) size/shape. Outputs will have the same size and shape.
2. Error correcting code, convergence failure traps, antipodal corrections, polar error corrections, WGS84 ellipsoid parameters, testing, and comments: Michael Kleder, 2004.
3. Azimuth implementation (including quadrant abiguity resolution) and code vectorization, Michael Kleder, Sep 2005.
4. Vectorization is convergence sensitive; that is, quantities which have already converged to within tolerance are not recomputed during subsequent iterations (while other quantities are still converging).
5. Vincenty describes his distance algorithm as precise to within 0.01 millimeters, subject to the ellipsoidal model.
6. For distance calculations, essentially antipodal points are treated as exactly antipodal, potentially reducing accuracy slightly.
7. Distance failures for points exactly at the poles are eliminated by moving the points by 0.6 millimeters.
8. The Vincenty distance algorithm was transcribed verbatim by Peter Cederholm, August 12, 2003. It was modified and translated to English by Michael Kleder. Mr. Cederholm's website is http://www.plan.aau.dk/~pce/
9. Distances agree with the Mapping Toolbox, version 2.2 (R14SP3) with a max relative difference of about 5e-9, except when the two points are nearly antipodal, and except when one point is near the equator and the two longitudes are nearly 180 degrees apart. This function (vdist) is more accurate in such cases.
For example, note this difference (as of this writing):
```python
vdist(0.2,305,15,125)
```
> 18322827.0131551
```python
distance(0.2,305,15,125,[6378137 0.08181919])
```
> 0
10. Azimuths FROM the north pole (either forward starting at the north pole or backward when ending at the north pole) are set to 180 degrees by convention.
Azimuths FROM the south pole are set to 0 degrees by convention.
11. Azimuths agree with the Mapping Toolbox, version 2.2 (R14SP3) to within about a hundred-thousandth of a degree, except when traversing to or from a pole, where the convention for this function is described in (10), and except in the cases noted above in (9).
12. No warranties; use at your own risk.
"""
if ell is None:
ell = Ellipsoid()
# %% Input check:
try:
Lat1 = atleast_1d(Lat1)
Lon1 = atleast_1d(Lon1)
Lat2 = atleast_1d(Lat2)
Lon2 = atleast_1d(Lon2)
if (abs(Lat1) > 90).any() | (abs(Lat2) > 90).any():
raise ValueError("Input latitudes must be in [-90, 90] degrees.")
except NameError:
if (abs(Lat1) > 90) | (abs(Lat2) > 90):
raise ValueError("Input latitudes must be in [-90, 90] degrees.")
# %% Supply WGS84 earth ellipsoid axis lengths in meters:
a = ell.semimajor_axis
b = ell.semiminor_axis
f = ell.flattening
# %% convert inputs in degrees to radians:
lat1 = radians(Lat1)
lon1 = radians(Lon1)
lat2 = radians(Lat2)
lon2 = radians(Lon2)
# %% correct for errors at exact poles by adjusting 0.6 millimeters:
try:
i = abs(pi / 2 - abs(lat1)) < 1e-10
lat1[i] = sign(lat1[i]) * (pi / 2 - 1e-10)
i = abs(pi / 2 - abs(lat2)) < 1e-10
lat2[i] = sign(lat2[i]) * (pi / 2 - 1e-10)
except TypeError:
if abs(pi / 2 - abs(lat1)) < 1e-10:
lat1 = sign(lat1) * (pi / 2 - 1e-10)
if abs(pi / 2 - abs(lat2)) < 1e-10:
lat2 = sign(lat2) * (pi / 2 - 1e-10)
U1 = atan((1 - f) * tan(lat1))
U2 = atan((1 - f) * tan(lat2))
lon1 = lon1 % (2 * pi)
lon2 = lon2 % (2 * pi)
L = abs(lon2 - lon1)
try:
L[L > pi] = 2 * pi - L[L > pi]
except TypeError:
if L > pi:
L = 2 * pi - L
lamb = copy(L) # NOTE: program will fail without copy!
itercount = 0
warninggiven = False
notdone = True
while notdone: # force at least one execution
itercount += 1
if itercount > 50:
if not warninggiven:
logging.warning("Essentially antipodal points--precision may be reduced slightly.")
lamb = pi
break
lambdaold = copy(lamb)
sinsigma = sqrt(
(cos(U2) * sin(lamb)) ** 2 + (cos(U1) * sin(U2) - sin(U1) * cos(U2) * cos(lamb)) ** 2
)
cossigma = sin(U1) * sin(U2) + cos(U1) * cos(U2) * cos(lamb)
# eliminate rare imaginary portions at limit of numerical precision:
sinsigma = sinsigma.real
cossigma = cossigma.real
sigma = atan2(sinsigma, cossigma)
try:
sinAlpha = cos(U1) * cos(U2) * sin(lamb) / sin(sigma)
alpha = asin(sinAlpha)
alpha[isnan(sinAlpha)] = 0
alpha[(sinAlpha > 1) | (abs(sinAlpha - 1) < 1e-16)] = pi / 2
except (ZeroDivisionError, TypeError, ValueError):
try:
sinAlpha = cos(U1) * cos(U2) * sin(lamb) / sin(sigma)
except ZeroDivisionError:
sinAlpha = 0.0
if isnan(sinAlpha):
alpha = 0.0
elif sinAlpha > 1 or abs(sinAlpha - 1) < 1e-16:
alpha = pi / 2
else:
alpha = asin(sinAlpha)
cos2sigmam = cos(sigma) - 2 * sin(U1) * sin(U2) / cos(alpha) ** 2
C = f / 16 * cos(alpha) ** 2 * (4 + f * (4 - 3 * cos(alpha) ** 2))
lamb = L + (1 - C) * f * sin(alpha) * (
sigma + C * sin(sigma) * (cos2sigmam + C * cos(sigma) * (-1 + 2.0 * cos2sigmam ** 2))
)
# print(f'then, lambda(21752) = {lamb[21752],20})
# correct for convergence failure for essentially antipodal points
try:
i = (lamb > pi).any()
except AttributeError:
i = lamb > pi
if i:
logging.warning(
"Essentially antipodal points encountered. Precision may be reduced slightly."
)
warninggiven = True
lambdaold = pi
lamb = pi
try:
notdone = (abs(lamb - lambdaold) > 1e-12).any()
except AttributeError:
notdone = abs(lamb - lambdaold) > 1e-12
u2 = cos(alpha) ** 2 * (a ** 2 - b ** 2) / b ** 2
A = 1 + u2 / 16384 * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))
B = u2 / 1024 * (256 + u2 * (-128 + u2 * (74 - 47 * u2)))
deltasigma = (
B
* sin(sigma)
* (
cos2sigmam
+ B
/ 4
* (
cos(sigma) * (-1 + 2 * cos2sigmam ** 2)
- B / 6 * cos2sigmam * (-3 + 4 * sin(sigma) ** 2) * (-3 + 4 * cos2sigmam ** 2)
)
)
)
dist_m = b * A * (sigma - deltasigma)
# %% From point #1 to point #2
# correct sign of lambda for azimuth calcs:
lamb = abs(lamb)
try:
i = sign(sin(lon2 - lon1)) * sign(sin(lamb)) < 0
lamb[i] = -lamb[i]
except TypeError:
if sign(sin(lon2 - lon1)) * sign(sin(lamb)) < 0:
lamb = -lamb
numer = cos(U2) * sin(lamb)
denom = cos(U1) * sin(U2) - sin(U1) * cos(U2) * cos(lamb)
a12 = atan2(numer, denom)
a12 %= 2 * pi
az = degrees(a12)
try:
return dist_m.squeeze()[()], az.squeeze()[()]
except AttributeError:
return dist_m, az
def vreckon(
Lat1: float | ndarray, Lon1: float | ndarray, Rng: ndarray, Azim: ndarray, ell: Ellipsoid = None
) -> tuple[ndarray, ndarray]:
"""
This is the Vincenty "forward" solution.
Computes points at a specified azimuth and range in an ellipsoidal earth.
Using the reference ellipsoid, travel a given distance along a given azimuth starting at a given initial point,
and return the endpoint within a few millimeters of accuracy, using Vincenty's algorithm.
Example:
lat2, lon2 = vreckon(lat1, lon1, ground_range_m, azimuth_deg)
Parameters
----------
Lat1 : float
inital geodetic latitude (degrees)
Lon1 : float
initial geodetic longitude (degrees)
Rng : float
ground distance (meters)
Azim : float
intial azimuth (degrees) clockwide from north.
ell : Ellipsoid, optional
reference ellipsoid
Results
-------
Lat2 : float
final geodetic latitude (degrees)
Lon2 : float
final geodetic longitude (degrees)
Original algorithm: T. Vincenty, "Direct and Inverse Solutions of Geodesics on the Ellipsoid with Application of Nested Equations", Survey Review, vol. 23, no. 176, April 1975, pp 88-93. http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf
Notes:
1. The Vincenty reckoning algorithm was transcribed verbatim into JavaScript by Chris Veness.
It was modified and translated to Matlab by Michael Kleder.
Mr. Veness's website is: http://www.movable-type.co.uk/scripts/latlong-vincenty-direct.html
2. Error correcting code, polar error corrections, WGS84 ellipsoid parameters, testing, and comments by Michael Kleder.
3. By convention, when starting at a pole, the longitude of the initial point (otherwise meaningless) determines the longitude line along which to traverse, and hence the longitude of the final point.
4. The convention noted in (3) above creates a discrepancy with VDIST when the the intial or final point is at a pole. In the VDIST
function, when traversing from a pole, the azimuth is 0 when
heading away from the south pole and 180 when heading away from the north pole. In contrast, this VRECKON function uses the azimuth as noted in (3) above when traversing away form a pole.
5. In testing, where the traversal subtends no more than 178 degrees, this function correctly inverts the VDIST function to within 0.2 millimeters of distance, 5e-10 degrees of forward azimuth,
and 5e-10 degrees of reverse azimuth. Precision reduces as test
points approach antipodal because the precision of VDIST is reduced
for nearly antipodal points. (A warning is given by VDIST.)
6. Tested but no warranty. Use at your own risk.
7. Ver 1.0, Michael Kleder, November 2007. Ver 2.0, Joaquim Luis, September 2008
Added ellipsoid and vectorized whenever possible. Also, lon2 is always converted to the [-180 180] interval.
Joaquim Luis
"""
try:
Lat1 = atleast_1d(Lat1)
Lon1 = atleast_1d(Lon1)
Rng = atleast_1d(Rng)
Azim = atleast_1d(Azim)
if (abs(Lat1) > 90).any():
raise ValueError("Input lat. must be between -90 and 90 deg., inclusive.")
if (Rng < 0).any():
raise ValueError("Ground distance must be positive")
except NameError:
if abs(Lat1) > 90:
raise ValueError("Input lat. must be between -90 and 90 deg., inclusive.")
if Rng < 0:
raise ValueError("Ground distance must be positive")
if ell is not None:
a = ell.semimajor_axis
b = ell.semiminor_axis
f = ell.flattening
else: # Supply WGS84 earth ellipsoid axis lengths in meters:
a = 6378137 # semimajor axis
b = 6356752.31424518 # WGS84 earth flattening coefficient definition
f = (a - b) / a
lat1 = radians(Lat1) # intial latitude in radians
lon1 = radians(Lon1) # intial longitude in radians
# correct for errors at exact poles by adjusting 0.6 millimeters:
try:
i = abs(pi / 2 - abs(lat1)) < 1e-10
lat1[i] = sign(lat1[i]) * (pi / 2 - (1e-10))
except TypeError:
if abs(pi / 2 - abs(lat1)) < 1e-10:
lat1 = sign(lat1) * (pi / 2 - (1e-10))
alpha1 = radians(Azim) # inital azimuth in radians
sinAlpha1 = sin(alpha1)
cosAlpha1 = cos(alpha1)
tanU1 = (1 - f) * tan(lat1)
cosU1 = 1 / sqrt(1 + tanU1 ** 2)
sinU1 = tanU1 * cosU1
sigma1 = atan2(tanU1, cosAlpha1)
sinAlpha = cosU1 * sinAlpha1
cosSqAlpha = 1 - sinAlpha * sinAlpha
uSq = cosSqAlpha * (a ** 2 - b ** 2) / b ** 2
A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))
B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))
sigma = Rng / (b * A)
sigmaP = 2 * pi
sinSigma = nan
cosSigma = nan
cos2SigmaM = nan
try:
i = (abs(sigma - sigmaP) > 1e-12).any()
except AttributeError:
i = abs(sigma - sigmaP) > 1e-12
while i:
cos2SigmaM = cos(2 * sigma1 + sigma)
sinSigma = sin(sigma)
cosSigma = cos(sigma)
deltaSigma = (
B
* sinSigma
* (
cos2SigmaM
+ B
/ 4
* (
cosSigma * (-1 + 2 * cos2SigmaM * cos2SigmaM)
- B
/ 6
* cos2SigmaM
* (-3 + 4 * sinSigma * sinSigma)
* (-3 + 4 * cos2SigmaM * cos2SigmaM)
)
)
)
sigmaP = sigma
sigma = Rng / (b * A) + deltaSigma
try:
i = (abs(sigma - sigmaP) > 1e-12).any()
except AttributeError:
i = abs(sigma - sigmaP) > 1e-12
tmp = sinU1 * sinSigma - cosU1 * cosSigma * cosAlpha1
lat2 = atan2(
sinU1 * cosSigma + cosU1 * sinSigma * cosAlpha1,
(1 - f) * sqrt(sinAlpha * sinAlpha + tmp ** 2),
)
lamb = atan2(sinSigma * sinAlpha1, cosU1 * cosSigma - sinU1 * sinSigma * cosAlpha1)
C = f / 16 * cosSqAlpha * (4 + f * (4 - 3 * cosSqAlpha))
L = lamb - (
f
* (1 - C)
* sinAlpha
* (sigma + C * sinSigma * (cos2SigmaM + C * cosSigma * (-1 + 2 * cos2SigmaM * cos2SigmaM)))
)
lon2 = degrees(lon1 + L)
# Truncates angles into the [-pi pi] range
# if lon2 > pi:
# lon2 = pi*((absolute(lon2)/pi) -
# 2*ceil(((absolute(lon2)/pi)-1)/2)) * sign(lon2)
lon2 = lon2 % 360 # follow [0, 360) convention
try:
return degrees(lat2).squeeze()[()], lon2.squeeze()[()]
except AttributeError:
return degrees(lat2), lon2
def track2(
lat1: ndarray,
lon1: ndarray,
lat2: ndarray,
lon2: ndarray,
ell: Ellipsoid = None,
npts: int = 100,
deg: bool = True,
) -> tuple[list[ndarray], list[ndarray]]:
"""
computes great circle tracks starting at the point lat1, lon1 and ending at lat2, lon2
Parameters
----------
Lat1 : float
Geodetic latitude of first point (degrees)
Lon1 : float
Geodetic longitude of first point (degrees)
Lat2 : float
Geodetic latitude of second point (degrees)
Lon2 : float
Geodetic longitude of second point (degrees)
ell : Ellipsoid, optional
reference ellipsoid
npts : int, optional
number of points (default is 100)
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
lats : list of float
latitudes of points along track
lons : list of float
longitudes of points along track
Based on code posted to the GMT mailing list in Dec 1999 by Jim Levens and by Jeff Whitaker <jeffrey.s.whitaker@noaa.gov>
"""
if ell is None:
ell = Ellipsoid()
if npts < 2:
raise ValueError("npts must be greater than 1")
if npts == 2:
return [lat1, lat2], [lon1, lon2]
if deg:
rlat1 = radians(lat1)
rlon1 = radians(lon1)
rlat2 = radians(lat2)
rlon2 = radians(lon2)
else:
rlat1, rlon1, rlat2, rlon2 = lat1, lon1, lat2, lon2
gcarclen = 2.0 * asin(
sqrt(
(sin((rlat1 - rlat2) / 2)) ** 2
+ cos(rlat1) * cos(rlat2) * (sin((rlon1 - rlon2) / 2)) ** 2
)
)
# check to see if points are antipodal (if so, route is undefined).
if abs(gcarclen - pi) < 1e-12:
raise ValueError(
"cannot compute intermediate points on a great circle whose endpoints are antipodal"
)
distance, azimuth = vdist(lat1, lon1, lat2, lon2)
incdist = distance / (npts - 1)
latpt = lat1
lonpt = lon1
lons = [lonpt]
lats = [latpt]
for _ in range(npts - 2):
latptnew, lonptnew = vreckon(latpt, lonpt, incdist, azimuth)
azimuth = vdist(latptnew, lonptnew, lat2, lon2, ell=ell)[1]
lats.append(latptnew)
lons.append(lonptnew)
latpt = latptnew
lonpt = lonptnew
lons.append(lon2)
lats.append(lat2)
if not deg:
lats = list(map(radians, lats))
lons = list(map(radians, lons))
return lats, lons
| |
#!/usr/bin/env python
from python_qt_binding import QtWidgets, QtCore
from python_qt_binding.QtWidgets import QWidget, QPushButton, QColorDialog
from python_qt_binding.QtCore import Slot
from python_qt_binding.QtGui import QColor
from frame_editor.commands import *
from frame_editor.interface import Interface
import rospkg
import os
class FrameEditor_StyleWidget(Interface):
def __init__(self, frame_editor):
self.editor = frame_editor
self.editor.observers.append(self)
self.old_frame = None
self.layout = QtWidgets.QGridLayout()
self.widget = QWidget()
self.widget.setLayout(self.layout)
self.mesh_label = QtWidgets.QLineEdit("File:")
self.mesh_label.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed)
self.mesh_button = QtWidgets.QPushButton("Open")
self.mesh_button.clicked.connect(lambda: self.btn_open_mesh_clicked())
self.diameter_label = QtWidgets.QLabel("Diameter:")
self.diameter_spinbox = QtWidgets.QDoubleSpinBox()
self.diameter_spinbox.editingFinished.connect(lambda: self.diameter_changed())
self.length_label = QtWidgets.QLabel("Length:")
self.length_spinbox = QtWidgets.QDoubleSpinBox()
self.length_spinbox.editingFinished.connect(lambda: self.length_changed())
self.width_label = QtWidgets.QLabel("Width:")
self.width_spinbox = QtWidgets.QDoubleSpinBox()
self.width_spinbox.editingFinished.connect(lambda: self.width_changed())
self.height_label = QtWidgets.QLabel("Height:")
self.height_spinbox = QtWidgets.QDoubleSpinBox()
self.height_spinbox.editingFinished.connect(lambda: self.height_changed())
self.color_label = QtWidgets.QLabel()
self.color_label.setAutoFillBackground(True)
self.update_color_label(None)
self.color_button = QtWidgets.QPushButton("Set Color")
self.color_button.clicked.connect(lambda: self.btn_color_clicked())
self.layout.addWidget(self.mesh_label, 0, 0)
self.layout.addWidget(self.mesh_button, 0, 1)
self.layout.addWidget(self.diameter_label, 1, 0)
self.layout.addWidget(self.diameter_spinbox, 1, 1)
self.layout.addWidget(self.length_label, 2, 0)
self.layout.addWidget(self.length_spinbox, 2, 1)
self.layout.addWidget(self.width_label, 3, 0)
self.layout.addWidget(self.width_spinbox, 3, 1)
self.layout.addWidget(self.height_label, 4, 0)
self.layout.addWidget(self.height_spinbox, 4, 1)
self.layout.addWidget(self.color_label, 5, 0)
self.layout.addWidget(self.color_button, 5, 1)
print "init"
self.update_widget(None)
def get_widget(self):
return self.widget
def update(self, editor, level, elements):
if level & 2:
## Check for change
if self.editor.active_frame is not self.old_frame:
self.update_widget(self.editor.active_frame)
self.update_values(self.editor.active_frame)
self.update_color_label(self.editor.active_frame)
elif level & 4:
if self.editor.active_frame is not None:
self.update_values(self.editor.active_frame)
self.update_color_label(self.editor.active_frame)
def update_widget(self, frame):
## Clear layout
#while self.layout.count():
# child = self.layout.takeAt(0)
# child.widget().deleteLater()
self.mesh_label.hide()
self.mesh_button.hide()
self.diameter_label.hide()
self.diameter_spinbox.hide()
self.length_label.hide()
self.length_spinbox.hide()
self.width_label.hide()
self.width_spinbox.hide()
self.height_label.hide()
self.height_spinbox.hide()
if frame is None or frame.style == "none":
self.widget.setEnabled(False)
return
if frame.style == "mesh":
self.mesh_label.show()
self.mesh_button.show()
elif frame.style == "sphere":
self.diameter_label.show()
self.diameter_spinbox.show()
else:
self.length_label.show()
self.length_spinbox.show()
self.width_label.show()
self.width_spinbox.show()
if frame.style == "cube":
self.height_label.show()
self.height_spinbox.show()
self.widget.setEnabled(True)
def update_values(self, frame):
if frame is None or frame.style == "none":
return
if frame.style == "mesh":
self.mesh_label.setText(frame.path)
elif frame.style == "sphere":
self.diameter_spinbox.setValue(frame.diameter)
else:
self.length_spinbox.setValue(frame.length)
self.width_spinbox.setValue(frame.width)
if frame.style == "cube":
self.height_spinbox.setValue(frame.height)
def update_color_label(self, frame):
if frame is None:
values = "{}, {}, {}, {}".format(200, 200, 200, 255)
else:
values = "{}, {}, {}, {}".format(frame.color[0]*255, frame.color[1]*255, frame.color[2]*255, frame.color[3]*255)
self.color_label.setStyleSheet("QLabel { background-color: rgba("+values+"); }")
@Slot(float)
def diameter_changed(self):
if self.editor.active_frame.diameter != self.diameter_spinbox.value():
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "diameter", self.diameter_spinbox.value()))
@Slot(float)
def length_changed(self):
if self.editor.active_frame.length != self.length_spinbox.value():
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "length", self.length_spinbox.value()))
@Slot(float)
def width_changed(self):
if self.editor.active_frame.width != self.width_spinbox.value():
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "width", self.width_spinbox.value()))
@Slot(float)
def height_changed(self):
if self.editor.active_frame.height != self.height_spinbox.value():
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "height", self.height_spinbox.value()))
@Slot(bool)
def btn_open_mesh_clicked(self):
path = QtWidgets.QFileDialog.getOpenFileName(None, 'Open Mesh', '/home', 'Mesh Files (*.stl)')[0]
try:
rospackage = rospkg.get_package_name(path)
if rospackage is None:
QtWidgets.QMessageBox.warning(self.widget, "Saving absolute path to mesh",
"Cannot find rospackage with selected mesh in it!\nSaving absolute path to mesh instead!")
#print "WARNING cannot find rospackage with mesh in it, saving absolute path"
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "package", ""))
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "path", path))
else:
rel_path = os.path.relpath(path , rospkg.RosPack().get_path(rospackage))
print "Saving: package:", rospackage, "+ relative path:", rel_path
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "package", rospackage))
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "path", rel_path))
except:
QtWidgets.QMessageBox.warning(self.widget, "Saving absolute path to mesh",
"The found rospackage with selected mesh in it is not sourced in your ROS workspace!\n"+
"Cannot resolve the packagepath\nSaving absolute path to mesh instead!")
#print "The package found is not sourced withing the current workspace, saving absolute path instead!"
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "package", ""))
self.editor.command(Command_SetGeometry(self.editor, self.editor.active_frame, "path", path))
@Slot(bool)
def btn_color_clicked(self):
frame = self.editor.active_frame
color = QtWidgets.QColorDialog.getColor(
QColor(frame.color[0]*255,
frame.color[1]*255,
frame.color[2]*255,
frame.color[3]*255),
None,
"Select Color",
options=QtWidgets.QColorDialog.ShowAlphaChannel)
self.editor.command(Command_SetStyleColor(self.editor, frame, color.getRgbF()))
# eof
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Read the phone calls list and extract the ones that have no phonebook entry
Do a backward search with the used number and if a name has been found add the entry to the given phonebook
@Werner Kuehn - Use at your own risk
29.01.2016 Add alternate number search
09.02.2016 Fixed duplicate phonebook entries. Handling of type 2 calls
17.02.2016 Append numbers to existing phonebook entries
18.02.2016 Remove quickdial entry
17.03.2016 Changed html.parser to html.parser.HTMLParser()
21.03.2016 Added config file
23.03.2016 Fixed phone book entry names handling for html special characters
08.04.2016 0.2.0 WK Added fritzCallMon.py, made fritzBackwardSearch module callable
27.04.2016 0.2.2 WK Enhanced search by removing numbers at the end in case someone has dialed more numbers
03.08.2016 0.2.3 WK Fix duplicate phonebook entries caused by following call of Type 10
27.12.2016 0.2.4 WK Improve search by adding zero at the end
25.07.2017 0.2.5 WK Correct html conversion in dastelefonbuch
09.08.2017 0.2.6 WK Add area code length into suzzy search. Avoid adding pre-dial numbers into the phone book
27.08.2017 0.2.7 WK Replace & in phonebook name with u. as AVM hasn't fixed this problem yet
"""
__version__ = '0.2.7'
import argparse
import configparser
import copy
import datetime
import html.parser
import logging
import os
import re
from xml.etree.ElementTree import fromstring, tostring
import certifi
import urllib3
import xmltodict
from bs4 import BeautifulSoup
from fritzconnection import FritzConnection
logger = logging.getLogger(__name__)
args = argparse.Namespace()
args.logfile = ''
class FritzCalls(object):
def __init__(self, connection, notfoundfile):
self.areaCode = (connection.call_action('X_VoIP', 'GetVoIPCommonAreaCode'))['NewVoIPAreaCode']
self.notfoundfile = notfoundfile
if notfoundfile and type(notfoundfile) is list:
self.notfoundfile = notfoundfile[0]
self.http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
callURLList = connection.call_action('X_AVM-DE_OnTel', 'GetCallList')
response = self.http.request('GET', callURLList['NewCallListURL'])
self.calldict = xmltodict.parse(response.data)['root']
# not working yet
def remove_known(self): # remove all callers listed by name
for i in self.calldict['Call']:
remove = True
callentry = self.calldict['Call'][i]
if (callentry['Type'] in ('1', '2') and callentry['Caller'] != None and callentry['Caller'].isdigit()) \
or (callentry['Type'] == '3' and callentry['Called'] != None and callentry['Called'].isdigit()):
if callentry['Name'] == None or callentry['Name'].startswith(
callentry['Caller']) or callentry['Name'].startswith(
callentry['Called']):
remove = False
if remove:
del self.calldict['Call'][i]
def get_unknown(self): # get list of callers not listed with their name
numberlist = {}
for callentry in self.calldict['Call']:
if datetime.datetime.strptime(callentry['Date'], "%d.%m.%y %H:%M") < datetime.datetime.today() - datetime.timedelta(days=7):
break
number = None
if callentry['Type'] in ('1', '2') and callentry['Caller'] != None and callentry['Caller'].isdigit():
number = callentry['Caller']
elif callentry['Type'] == '3' and callentry['Called'] != None and callentry['Called'].isdigit():
number = callentry['Called']
if number:
if callentry['Name'] == None or callentry['Name'].startswith(number):
numberlist[number] = ''
if callentry['Name'] != None and callentry['Name'].startswith(number):
startAlternate = callentry['Name'].find('(')
numberlist[number] = callentry['Name'][startAlternate+1:len(callentry['Name'])-1]
return numberlist
def get_names(self, searchlist, nameNotFoundList):
foundlist = {}
for number in searchlist:
origNumber = number
# remove international numbers
if number.startswith("00"):
fullNumber = ""
logger.info("Ignoring international number {}".format(number))
nameNotFoundList.append(number)
# remove pre-dial number
elif number.startswith("010"):
nextZero = number.find('0', 3)
number = number[nextZero:]
fullNumber = number
else:
# add the area code for local numbers
m = re.search('^[1-9][0-9]+', number)
if m:
fullNumber = '{}{}'.format(self.areaCode, number)
else:
fullNumber = number
name = None
numberLogged = False
numberSaved = False
l_onkz = FritzBackwardSearch().get_ONKz_length(fullNumber)
while (name == None and len(fullNumber) >= (l_onkz + 3)):
name = self.dasoertliche(fullNumber)
# if not name and searchlist[number] != '':
# name = self.dasoertliche(searchlist[number])
if not name:
logger.info('{} not found'.format(fullNumber))
nameNotFoundList.append(fullNumber)
if fullNumber != number and not numberLogged:
nameNotFoundList.append(number)
if origNumber != number and not numberLogged:
nameNotFoundList.append(origNumber)
numberLogged = True
# don't do fuzzy search for mobile numbers and 0800
if fullNumber[0:3] in ("015", "016", "017") or fullNumber[0:4] in ("0800"):
fullNumber = ""
elif fullNumber[-1] == "0":
fullNumber = fullNumber[:-2]+"0"
else:
fullNumber = fullNumber[:-2]+"0"
else:
foundlist[fullNumber] = name
if fullNumber != number and not numberSaved:
foundlist[number] = name
numberSaved = True
return foundlist
def dasoertliche(self, number):
url = 'https://www.dasoertliche.de/Controller?form_name=search_inv&ph={}'.format(number)
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'}
response = self.http.request('GET', url, headers=headers)
content = response.data.decode("utf-8", "ignore") \
.replace('\t', '').replace('\n', '').replace('\r', '').replace(' ', ' ')
soup = BeautifulSoup(content, 'html.parser')
name = soup.find('span', class_='st-treff-name')
if name:
logger.info('{} = dasoertliche({})'.format(number, name.get_text().encode(
'ascii', 'xmlcharrefreplace').decode('ascii')))
return name.get_text().encode('ascii', 'xmlcharrefreplace').decode('ascii').replace(' & ', ' u. ')
class FritzPhonebook(object):
def __init__(self, connection, name):
self.connection = connection
if name and type(name) == list:
name = name[0]
bookNumbers = self.connection.call_action('X_AVM-DE_OnTel', 'GetPhonebookList')['NewPhonebookList'].split(",")
self.bookNumber = -1
for number in bookNumbers:
a = connection.call_action('X_AVM-DE_OnTel', 'GetPhonebook', NewPhonebookID=number)
if a['NewPhonebookName'] == name:
self.bookNumber = number
logger.debug("PhonebookNumber = {}".format(number))
break
if self.bookNumber == -1:
logger.error('Phonebook: {} not found !'.format(name), True)
exit(1)
self.get_phonebook()
def get_phonebook(self):
self.http = urllib3.PoolManager()
response = self.http.request('GET', self.connection.call_action(
'X_AVM-DE_OnTel', 'GetPhonebook', NewPhonebookID=self.bookNumber)['NewPhonebookURL'])
self.phonebook = fromstring(
re.sub("!-- idx:(\d+) --", lambda m: "idx>"+m.group(1)+"</idx", response.data.decode("utf-8")))
def get_entry(self, name=None, number=None, uid=None, id=None):
for contact in self.phonebook.iter('contact'):
if name != None:
for realName in contact.iter('realName'):
if html.parser.HTMLParser().unescape(realName.text) == html.parser.HTMLParser().unescape(name):
for idx in contact.iter('idx'):
return {'contact_id': idx.text, 'contact': contact}
elif number != None:
for realNumber in contact.iter('number'):
if realNumber.text == number:
for idx in contact.iter('idx'):
return {'contact_id': idx.text, 'contact': contact}
elif uid != None:
for uniqueid in contact.iter('uniqueid'):
if uniqueid.text == uid:
for idx in contact.iter('idx'):
return {'contact_id': idx.text, 'contact': contact}
elif id != None:
phone_entry = fromstring(self.connection.call_action(
'X_AVM-DE_OnTel', 'GetPhonebookEntry', NewPhonebookID=self.bookNumber,
NewPhonebookEntryID=id)['NewPhonebookEntryData'])
return {'contact_id': id, 'contact': phone_entry}
def append_entry(self, entry, phone_number):
phonebookEntry = self.get_entry(id=entry['contact_id'])['contact']
for realName in phonebookEntry.iter('realName'):
realName.text = realName.text.replace('& ', '& ')
newnumber = None
for number in phonebookEntry.iter('number'):
if 'quickdial' in number.attrib:
del number.attrib['quickdial']
newnumber = copy.deepcopy(number)
newnumber.text = phone_number
newnumber.set('type', 'home')
newnumber.set('prio', '1')
if not newnumber == None:
for telephony in phonebookEntry.iter('telephony'):
telephony.append(newnumber)
self.connection.call_action('X_AVM-DE_OnTel', 'SetPhonebookEntry',
NewPhonebookEntryData='<?xml version="1.0" encoding="utf-8"?>' +
tostring(phonebookEntry).decode("utf-8"),
NewPhonebookID=self.bookNumber, NewPhonebookEntryID=entry['contact_id'])
def add_entry(self, phone_number, name):
phonebookEntry = fromstring(
'<contact><person><realName></realName></person><telephony><number type="home" prio="1"></number></telephony></contact>')
for number in phonebookEntry.iter('number'):
number.text = phone_number
number.set('type', 'home')
number.set('prio', '1')
for realName in phonebookEntry.iter('realName'):
realName.text = html.parser.HTMLParser().unescape(name)
self.connection.call_action('X_AVM-DE_OnTel', 'SetPhonebookEntry',
NewPhonebookEntryData='<?xml version="1.0" encoding="utf-8"?>' +
tostring(phonebookEntry).decode("utf-8"),
NewPhonebookID=self.bookNumber, NewPhonebookEntryID='')
self.get_phonebook()
def add_entry_list(self, entry_list):
if entry_list:
for number, name in entry_list.items():
entry = self.get_entry(name=name)
if entry:
self.append_entry(entry, number)
else:
self.add_entry(number, name)
class FritzBackwardSearch(object):
def __init__(self):
fname = os.path.join(os.path.dirname(__file__), 'fritzBackwardSearch.ini')
if os.path.isfile(fname):
self.prefs = self.__read_configuration__(fname)
else:
logger.error('{} not found'.format(fname), True)
exit(1)
self.__init_logging__()
global args
args = self.__get_cli_arguments__()
self.__read_ONKz__()
self.connection = FritzConnection(
address=args.address,
port=args.port,
user=args.username,
password=args.password)
self.phonebook = FritzPhonebook(self.connection, name=args.phonebook)
self.notfoundfile = args.notfoundfile
if args.notfoundfile and type(args.notfoundfile) is list:
self.notfoundfile = args.notfoundfile[0]
try:
self.nameNotFoundList = open(self.notfoundfile, 'r').read().splitlines()
except:
self.nameNotFoundList = open(self.notfoundfile, 'w+').read().splitlines()
def __init_logging__(self):
numeric_level = getattr(logging, self.prefs['loglevel'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % self.prefs['loglevel'])
logging.basicConfig(
filename=self.prefs['logfile'],
level=numeric_level,
format=('%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(message)s'),
datefmt='%Y-%m-%d %H:%M:%S',
)
def __read_configuration__(self, filename): # read configuration from the configuration file and prepare a preferences dict
cfg = configparser.ConfigParser()
cfg.read(filename)
preferences = {}
for name, value in cfg.items('DEFAULT'):
preferences[name] = value
logger.debug(preferences)
return preferences
def __read_ONKz__(self): # read area code numbers
self.onkz = []
fname = args.areacodefile
if os.path.isfile(fname):
with open(fname, 'r') as csvfile:
for row in csvfile:
self.onkz.append(row.strip().split('\t'))
else:
logger.error('{} not found'.format(fname))
exit
def get_ONKz_length(self, phone_number):
for row in self.onkz:
if phone_number[0:len(row[0])] == row[0]:
return len(row[0])
# return 4 as default length if not found (e.g. 0800)
return 4
# ---------------------------------------------------------
# cli-section:
# ---------------------------------------------------------
def __get_cli_arguments__(self):
parser = argparse.ArgumentParser(description='Update phonebook with caller list')
parser.add_argument('-p', '--password',
nargs=1, default=self.prefs['password'],
help='Fritzbox authentication password')
parser.add_argument('-u', '--username',
nargs=1, default=self.prefs['fritz_username'],
help='Fritzbox authentication username')
parser.add_argument('-i', '--ip-address',
nargs=1, default=self.prefs['fritz_ip_address'],
dest='address',
help='IP-address of the FritzBox to connect to. '
'Default: %s' % self.prefs['fritz_ip_address'])
parser.add_argument('--port',
nargs=1, default=self.prefs['fritz_tcp_port'],
help='Port of the FritzBox to connect to. '
'Default: %s' % self.prefs['fritz_tcp_port'])
parser.add_argument('--phonebook',
nargs=1, default=self.prefs['fritz_phone_book'],
help='Existing phone book the numbers should be added to. '
'Default: %s' % self.prefs['fritz_phone_book'])
parser.add_argument('-l', '--logfile',
nargs=1, default=self.prefs['logfile'],
help='Path/Log file name. '
'Default: %s' % self.prefs['logfile'])
parser.add_argument('-a', '--areacodefile',
nargs=1, default=self.prefs['area_code_file'],
help='Path/file name where the area codes are listed. '
'Default: %s' % self.prefs['area_code_file'])
parser.add_argument(
'-n', '--notfoundfile', nargs=1, default=self.prefs['name_not_found_file'],
help='Path/file name where the numbers not found during backward search are saved to in order to prevent further unnessessary searches. '
'Default: %s' % self.prefs['name_not_found_file'])
parser.add_argument('-s', '--searchnumber',
nargs='?', default='',
help='Phone number(s) to search for.')
parser.add_argument('-v', '--version',
action='version', version=__version__,
help='Print the program version')
return parser.parse_args()
def runSearch(self, s=''):
if self.prefs['password'] != '':
args.password = self.prefs['password']
if args.password == '':
logger.error('No password given', True)
exit(1)
if args.password and type(args.password) == list:
args.password = args.password[0].rstrip()
calls = FritzCalls(self.connection, notfoundfile=args.notfoundfile)
unknownCallers = calls.get_unknown()
searchnumber = []
nameList = ''
if args.searchnumber:
if type(args.searchnumber) == tuple:
searchnumber += args.searchnumber
else:
searchnumber.append(args.searchnumber)
if s:
if type(s) == tuple:
searchnumber += s
else:
searchnumber.append(s)
if searchnumber:
for number in searchnumber:
logger.info("Searching for {}".format(number))
contact = self.phonebook.get_entry(number=number)
if not contact:
if number in self.nameNotFoundList:
logger.info('{} already in nameNotFoundList'.format(number))
else:
unknownCallers[number] = ''
else:
for realName in contact['contact'].iter('realName'):
logger.info('{} = {}({})'.format(number, args.phonebook, realName.text))
nameList += realName.text.replace('& ', '& ')+'\n'
else:
logger.error("Searchnumber nicht gesetzt")
nameNotFoundList_length = len(self.nameNotFoundList)
unknownCallers = set(unknownCallers.keys()).difference(set(self.nameNotFoundList))
logger.debug("Length unknownCallers = {}".format(len(unknownCallers)))
knownCallers = calls.get_names(unknownCallers, self.nameNotFoundList)
if len(self.nameNotFoundList) > nameNotFoundList_length:
with open(self.notfoundfile, "w") as outfile:
outfile.write("\n".join(self.nameNotFoundList))
self.phonebook.add_entry_list(knownCallers)
if s in knownCallers:
nameList += knownCallers[s].replace('& ', '& ')+'\n'
elif not nameList:
nameList = 'Nicht gefunden'
return nameList
if __name__ == '__main__':
FBS = FritzBackwardSearch()
# to search for a number specify it in here:
# FBS.runSearch(s=('111', '1550'))
# FBS.runSearch(s=('333'))
FBS.runSearch()
| |
from simulate import *
import matplotlib.pyplot as plt
class possum(simulate):
"""
Class for creating polarization and
faraday rotation spectra.
Frequency Coverages:
_createWSRT()
Frequency range for the Westerbork
Synthesis Radio Telescope
310 - 380 MHz
_createASKAP12()
ASKAP12 frequency coverage
700 - 1300 MHz
1500 - 1800 MHz
_createASKAP36()
ASKAP36 frequency coverage
1130 - 1430 MHz
"""
def __init__(self):
self.__c = 2.99e+08 # speed of light in m/s
self.__mhz = 1.0e+06
def _createWSRT(self, *args):
"""
Create the WSRT frequency spectrum:
310 - 380 MHz
"""
self.nu_ = self._createFrequency(310., 380., nchan=400)
def _createASKAP12(self, *args):
"""
Create the ASKAP12 frequency range:
700 - 1300 MHz
1500 - 1800 MHz
To call:
_createASKAP12()
Parameters:
[None]
Postcondition:
"""
band12 = self._createFrequency(700.,1300.,nchan=600)
band3 = self._createFrequency(1500.,1800.,nchan=300)
self.nu_ = np.concatenate((band12, band3))
def _createASKAP36(self, *args):
"""
Create the ASKAP36 frequency range:
1130 - 1430 MHZ
To call:
_createASKAP36()
Parameters:
[None]
Postcondition:
"""
self.nu_ = self._createFrequency(1130., 1430., nchan=300)
def _createFrequency(self, numin=700., numax=1800., nchan=100., store=False):
"""
Creates an array of evenly spaced frequencies
numin and numax are in [MHz]
To call:
_createFrequency(numin, numax, nchan)
Parameters:
numin
numax
Postcondition:
"""
# ======================================
# Convert MHz to Hz
# ======================================
numax = numax * self.__mhz
numin = numin * self.__mhz
# ======================================
# Generate an evenly spaced grid
# of frequencies and return
# ======================================
if store:
self.nu_ = np.arange(nchan)*(numax-numin)/(nchan-1) + numin
else:
return(np.arange(nchan)*(numax-numin)/(nchan-1) + numin)
def _createNspec(self, flux, depth, chi, sig=0):
"""
Function for generating N faraday spectra
and merging into one polarization spectrum.
To call:
createNspec(flux, depth, chi, sig)
Parameters:
flux [float, array]
depth [float, array]
chi [float, array]
sig [float, const]
"""
# ======================================
# Convert inputs to matrices
# ======================================
nu = np.asmatrix(self.nu_)
flux = np.asmatrix(flux).T
chi = np.asmatrix(chi).T
depth = np.asmatrix(depth).T
# ======================================
# Compute the polarization
# ======================================
P = flux.T * np.exp(2j * (chi + depth * np.square(self.__c / nu)))
P = np.ravel(P)
# ======================================
# Add Gaussian noise
# ======================================
if sig != 0:
P += self._addNoise(sig, P.size)
# ======================================
# Store the polarization
# ======================================
self.polarization_ = P
def _createFaradaySpectrum(self, philo=-250, phihi=250):
"""
Function for creating the Faraday spectrum
"""
F = []
phi = []
chiSq = np.mean( (self.__c / self.nu_)**2)
for far in range(philo, phihi+1):
phi.append(far)
temp = np.exp(-2j * far * ((self.__c / self.nu_)**2 - chiSq))
temp = np.sum( self.polarization_ * temp)
F.append(temp)
faraday = np.asarray(F) / len(self.nu_)
self.phi_ = np.asarray(phi)
self.faraday_ = faraday / np.abs(faraday).max()
def _addNoise(self, sigma, N):
"""
Function for adding real and
imaginary noise
To call:
_addNoise(sigma, N)
Parameters:
sigma
N
"""
noiseReal = np.random.normal(scale=sigma, size=N)
noiseImag = 1j * np.random.normal(scale=sigma, size=N)
return(noiseReal + noiseImag)
# ======================================================
# Try to recreate figure 21 in Farnsworth et. al (2011)
#
# Haven't been able to get the large offset;
# peak appears between the two RM components
# ======================================================
if __name__ == '__main__':
spec = possum()
spec._simulateNspec(5)
plt.plot(spec.X_[1,:,0], 'r-', label='real')
plt.plot(spec.X_[1,:,1], 'b-', label='imag')
plt.plot(np.abs(spec.X_[1,:,0] + 1j*spec.X_[1,:,1]), 'k--', label='abs')
plt.legend(loc='best')
plt.show()
| |
"""Tests for the WLED config flow."""
import aiohttp
from wled import WLEDConnectionError
from homeassistant import data_entry_flow
from homeassistant.components.wled import config_flow
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
from homeassistant.core import HomeAssistant
from . import init_integration
from tests.async_mock import MagicMock, patch
from tests.common import load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_show_user_form(hass: HomeAssistant) -> None:
"""Test that the user set up form is served."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": SOURCE_USER},
)
assert result["step_id"] == "user"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_show_zeroconf_confirm_form(hass: HomeAssistant) -> None:
"""Test that the zeroconf confirmation form is served."""
flow = config_flow.WLEDFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_ZEROCONF, CONF_NAME: "test"}
result = await flow.async_step_zeroconf_confirm()
assert result["description_placeholders"] == {CONF_NAME: "test"}
assert result["step_id"] == "zeroconf_confirm"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_show_zerconf_form(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test that the zeroconf confirmation form is served."""
aioclient_mock.get(
"http://192.168.1.123:80/json/",
text=load_fixture("wled/rgb.json"),
headers={"Content-Type": "application/json"},
)
flow = config_flow.WLEDFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_ZEROCONF}
result = await flow.async_step_zeroconf(
{"host": "192.168.1.123", "hostname": "example.local.", "properties": {}}
)
assert flow.context[CONF_HOST] == "192.168.1.123"
assert flow.context[CONF_NAME] == "example"
assert result["description_placeholders"] == {CONF_NAME: "example"}
assert result["step_id"] == "zeroconf_confirm"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
@patch("homeassistant.components.wled.WLED.update", side_effect=WLEDConnectionError)
async def test_connection_error(
update_mock: MagicMock, hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on WLED connection error."""
aioclient_mock.get("http://example.com/json/", exc=aiohttp.ClientError)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "example.com"},
)
assert result["errors"] == {"base": "connection_error"}
assert result["step_id"] == "user"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
@patch("homeassistant.components.wled.WLED.update", side_effect=WLEDConnectionError)
async def test_zeroconf_connection_error(
update_mock: MagicMock, hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort zeroconf flow on WLED connection error."""
aioclient_mock.get("http://192.168.1.123/json/", exc=aiohttp.ClientError)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"host": "192.168.1.123", "hostname": "example.local.", "properties": {}},
)
assert result["reason"] == "connection_error"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
@patch("homeassistant.components.wled.WLED.update", side_effect=WLEDConnectionError)
async def test_zeroconf_confirm_connection_error(
update_mock: MagicMock, hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort zeroconf flow on WLED connection error."""
aioclient_mock.get("http://192.168.1.123:80/json/", exc=aiohttp.ClientError)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={
"source": SOURCE_ZEROCONF,
CONF_HOST: "example.com",
CONF_NAME: "test",
},
data={"host": "192.168.1.123", "hostname": "example.com.", "properties": {}},
)
assert result["reason"] == "connection_error"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
@patch("homeassistant.components.wled.WLED.update", side_effect=WLEDConnectionError)
async def test_zeroconf_no_data(
update_mock: MagicMock, hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort if zeroconf provides no data."""
flow = config_flow.WLEDFlowHandler()
flow.hass = hass
result = await flow.async_step_zeroconf()
assert result["reason"] == "connection_error"
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_user_device_exists_abort(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
await init_integration(hass, aioclient_mock)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "192.168.1.123"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_device_exists_abort(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
await init_integration(hass, aioclient_mock)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"host": "192.168.1.123", "hostname": "example.local.", "properties": {}},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_with_mac_device_exists_abort(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
await init_integration(hass, aioclient_mock)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={
"host": "192.168.1.123",
"hostname": "example.local.",
"properties": {CONF_MAC: "aabbccddeeff"},
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_full_user_flow_implementation(
hass: HomeAssistant, aioclient_mock
) -> None:
"""Test the full manual user flow from start to finish."""
aioclient_mock.get(
"http://192.168.1.123:80/json/",
text=load_fixture("wled/rgb.json"),
headers={"Content-Type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": SOURCE_USER},
)
assert result["step_id"] == "user"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "192.168.1.123"}
)
assert result["data"][CONF_HOST] == "192.168.1.123"
assert result["data"][CONF_MAC] == "aabbccddeeff"
assert result["title"] == "192.168.1.123"
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_full_zeroconf_flow_implementation(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the full manual user flow from start to finish."""
aioclient_mock.get(
"http://192.168.1.123:80/json/",
text=load_fixture("wled/rgb.json"),
headers={"Content-Type": "application/json"},
)
flow = config_flow.WLEDFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_ZEROCONF}
result = await flow.async_step_zeroconf(
{"host": "192.168.1.123", "hostname": "example.local.", "properties": {}}
)
assert flow.context[CONF_HOST] == "192.168.1.123"
assert flow.context[CONF_NAME] == "example"
assert result["description_placeholders"] == {CONF_NAME: "example"}
assert result["step_id"] == "zeroconf_confirm"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await flow.async_step_zeroconf_confirm(user_input={})
assert result["data"][CONF_HOST] == "192.168.1.123"
assert result["data"][CONF_MAC] == "aabbccddeeff"
assert result["title"] == "example"
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
| |
from __future__ import absolute_import
import abc
import sys
import threading
import typing as tp # NOQA
import warnings
import numpy
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import types # NOQA
from chainer import utils
import chainerx
_thread_local = threading.local()
# Used in _ToDeviceVisitor to detect GPU-to-GPU (cupy-to-cupy) device transfer.
# It is usually `None`.
# Assign `False` to enable GPU-to-GPU detection. If detected, `True` will be
# assigned. `None` should be assigned again after retrieving the result.
_thread_local.flag_gpu_to_gpu = None
class DeviceResident(utils.enable_final(meta_base=abc.ABCMeta)):
"""A base class of objects with multi-device hierarchy."""
_device = _cpu.CpuDevice()
def __init__(self):
# Store overridden to_device family method names.
self._overridden_to_methods = tuple([
m for m in ('to_cpu', 'to_gpu', 'to_intel64')
if _is_to_device_method_overridden(self, m)])
def device_resident_accept(self, visitor):
"""Applies the visitor to all the device objects in this instance.
Args:
visitor(~chainer.device_resident.DeviceResidentsVisitor): Visitor.
This method should be overridden if the concrete class has custom
sub-hierarchy of device resident objects.
"""
visitor.visit_device_resident(self)
@property
def device(self):
""":class:`~chainer.backend.Device` instance."""
return self._device
@property
def xp(self):
# type: () -> types.Xp
"""Array module corresponding to the device.
Depending on the device in which this object resides, this property
returns :mod:`numpy`, :mod:`cupy` or :mod:`chainerx`.
"""
device = self.device
if device is None:
return None
return device.xp
@utils.final(action=DeprecationWarning)
def to_cpu(self):
# type: () -> 'DeviceResident'
"""Copies parameter variables and persistent values to CPU.
.. deprecated:: v7.0.0
Use :meth:`to_device` instead.
This method does not handle non-registered attributes. If some of such
attributes must be copied to CPU, the link implementation should
override :meth:`~DeviceResident.device_resident_accept` to do so.
Returns: self
"""
visitor = _ToDeviceVisitor(
backend.CpuDevice(),
entry_method_info=('to_cpu', {}),
starting_device_resident=self)
self.__to_device(visitor)
return self
@utils.final(action=DeprecationWarning)
def to_gpu(
self,
device=None, # type: tp.Optional[types.CudaDeviceSpec]
):
# type: (...) -> 'DeviceResident'
"""Copies parameter variables and persistent values to GPU.
.. deprecated:: v7.0.0
Use :meth:`to_device` instead.
This method does not handle non-registered attributes. If some of such
attributes must be copied to GPU, the link implementation must
override :meth:`~DeviceResident.device_resident_accept` to do so.
.. warning::
This method does not transfer the parameters if they are already on
GPU. Use ``to_device`` to perform inter-GPU transfer.
Args:
device: Target device specifier. If omitted, the current device is
used.
Returns: self
"""
cuda.check_cuda_available()
cuda_device = cuda._get_device_or_current(device)
device = chainer.backends.cuda.GpuDevice(cuda_device)
visitor = _ToDeviceVisitor(
device,
entry_method_info=('to_gpu', {'device': device.device}),
skip_between_cupy_devices=True,
starting_device_resident=self)
self.__to_device(visitor)
return self
@utils.final(action=DeprecationWarning)
def to_intel64(self):
# type: () -> 'DeviceResident'
"""Copies parameter variables and persistent values to CPU.
.. deprecated:: v7.0.0
Use :meth:`to_device` instead.
"""
intel64.check_ideep_available()
visitor = _ToDeviceVisitor(
chainer.get_device(intel64.Intel64Device()),
entry_method_info=('to_intel64', {}),
starting_device_resident=self)
self.__to_device(visitor)
return self
@utils.final
def to_chx(self):
"""Converts parameter variables and persistent values to ChainerX \
without any copy.
This method does not handle non-registered attributes. If some of such
attributes must be copied to ChainerX, the link implementation must
override this method to do so.
Returns: self
"""
if not chainerx.is_available():
raise RuntimeError('ChainerX is not available.')
if self.xp is chainerx:
return self
self.device_resident_accept(_ToChxVisitor())
return self
@utils.final
def from_chx(self):
"""Converts parameter variables and persistent values from ChainerX \
to NumPy/CuPy devices without any copy."""
if self._device.xp is chainerx:
self._device = self._device.fallback_device
self.device_resident_accept(_FromChxVisitor())
return self
def __to_device(self, to_device_visitor):
self.device_resident_accept(to_device_visitor)
@utils.final
def to_device(
self,
device # type: types.DeviceSpec
):
# type: (...) -> 'DeviceResident'
"""Copies parameter variables and persistent values to the specified \
device.
This method does not handle non-registered attributes. If some of such
attributes must be copied to the device, the link implementation must
override this method to do so.
Args:
device: Target device specifier. See
:func:`~chainer.get_device` for available values.
Returns: self
"""
device = chainer.get_device(device)
self.__to_device(_ToDeviceVisitor(device))
return self
def _is_to_device_method_overridden(device_resident, method_name):
# Returns whether the specified to_device family method is overridden.
to_method = getattr(device_resident, method_name, None).__func__
to_method_orig = getattr(DeviceResident, method_name)
if sys.version_info < (3,):
to_method_orig = to_method_orig.__func__
if to_method is not to_method_orig:
return True # overridden
return False
class DeviceResidentsVisitor(object):
"""Base class of visitors that visits device resident objects recursively.
.. seealso::
:class:`chainer.DeviceResident`
"""
def visit_device_resident(self, device_resident):
"""Processes a :class:`~chainer.DeviceResident` instance."""
raise NotImplementedError()
def visit_array(self, arr):
"""Processes an array and returns a new one.
If the visitor does not create a new array, it can simply return the
original array.
"""
raise NotImplementedError()
def visit_variable(self, param):
"""Processes a :class:`~chainer.Variable` or a \
:class:`~chainer.Parameter`."""
raise NotImplementedError()
class _ToDeviceVisitor(DeviceResidentsVisitor):
# A visitor that implements recursive to_device().
# For backward compatibility, if any of to_cpu/to_gpu/to_intel64 are
# overridden on a device resident, this visitor calls it instead of
# `visit_device_resident`. That's true even if `to_device` was originally
# called.
def __init__(
self, device, entry_method_info=None,
skip_between_cupy_devices=False,
starting_device_resident=None):
assert isinstance(device, chainer.backend.Device)
# `entry_method_info` is for backward compatibility workaround for
# overridden methods.
# It indicates which method originally causes this visitor.
# If it is any of the to_??? method names, descendant resident's
# respective method will be called if it's overridden
# (instead of `device_resident_accept`).
if entry_method_info is not None:
device_names = {
'to_cpu': '@numpy',
'to_gpu': '@cupy:N',
'to_intel64': '@intel64',
}
assert len(entry_method_info) == 2
method = entry_method_info[0]
assert method in device_names
warnings.warn(
'{} is deprecated. '
'Please use to_device(\'{}\') instead.'.format(
method, device_names[method]),
DeprecationWarning)
# starting_device_resident is also for backward compatibility
# workaround for overridden methods.
# It is a DeviceResident if to_xxx methods were initially called
# on this visitor. This is used to avoid infinite accept-visit loop
# that would occur by calling to_xxx methods.
assert (starting_device_resident is None
or isinstance(starting_device_resident, DeviceResident))
self._device = device
self._entry_method_info = entry_method_info
self._skip_between_cupy_devices = skip_between_cupy_devices
self._starting_device_resident = starting_device_resident
def visit_device_resident(self, device_resident):
device_resident._device = self._device
# Backward compatibility workaround for overridden methods
if device_resident._overridden_to_methods:
# Skip this device resident, if the visitor was initially triggered
# from it.
if device_resident is self._starting_device_resident:
return
if self._entry_method_info is not None:
# Deprecated method is being called: e.g. to_cpu and to_gpu.
method_name, kwargs = self._entry_method_info
else:
# to_device is being called
method_name, kwargs = (
self._device_to_method_name_and_kwargs(self._device))
if method_name in device_resident._overridden_to_methods:
to_method = getattr(device_resident, method_name)
to_method(**kwargs)
return
def _device_to_method_name_and_kwargs(self, device):
# Converts a device instance to the corresponding combination of
# to_??? method name and kwargs.
# chainerx
if device.xp is chainerx:
return None, {}
# cupy
if device.xp is cuda.cupy:
return 'to_gpu', {'device': device.device.id}
# numpy
assert device.xp is numpy
if isinstance(device, _cpu.CpuDevice):
return 'to_cpu', {}
# intel64
assert isinstance(device, intel64.Intel64Device)
return 'to_intel64', {}
def visit_array(self, arr):
assert isinstance(arr, chainer.get_array_types())
device = backend.get_device_from_array(arr)
if self._skip_visiting(device):
self._warn_to_gpu(device, self._device)
return arr
return self._device.send(arr)
def visit_variable(self, param):
assert isinstance(param, chainer.Variable)
device = param.device
if self._skip_visiting(device):
self._warn_to_gpu(device, self._device)
return
param.to_device(self._device)
def _skip_visiting(self, obj_device):
return (
self._skip_between_cupy_devices
and isinstance(self._device, backend.GpuDevice)
and isinstance(obj_device, backend.GpuDevice))
@staticmethod
def _warn_to_gpu(src_device, dst_device):
src_id = src_device.device.id
dst_id = dst_device.device.id
if src_id != dst_id:
if _thread_local.flag_gpu_to_gpu is None:
warnings.warn('''\
You are trying to transfer a DeviceResident to GPU-{dst} which is already on \
GPU-{src}.
`DeviceResident.to_gpu` does nothing if the DeviceResident is already on GPU.
You can use `DeviceResident.to_device()` method to perform inter-GPU transfer.
'''.format(dst=dst_id, src=src_id), RuntimeWarning)
else:
assert isinstance(_thread_local.flag_gpu_to_gpu, bool)
_thread_local.flag_gpu_to_gpu = True
class _ToChxVisitor(DeviceResidentsVisitor):
# A visitor that recursively calls to_chx().
def visit_device_resident(self, device_resident):
device_resident._device = backend.ChainerxDevice.from_fallback_device(
device_resident._device)
def visit_array(self, arr):
assert isinstance(arr, chainer.get_array_types())
return backend.to_chx(arr)
def visit_variable(self, param):
assert isinstance(param, chainer.Variable)
param.to_chx()
class _FromChxVisitor(DeviceResidentsVisitor):
# A visitor that recursively calls from_chx().
def visit_device_resident(self, device_resident):
if isinstance(device_resident._device, backend.ChainerxDevice):
device_resident._device = device_resident._device.fallback_device
def visit_array(self, arr):
assert isinstance(arr, chainer.get_array_types())
return backend.from_chx(arr)
def visit_variable(self, param):
assert isinstance(param, chainer.Variable)
param.from_chx()
| |
# Snake
# by KidsCanCode 2014
# A Pygame snake clone
import pygame
import sys
import random
from os import path
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
DARKRED = (155, 0, 0)
GREEN = (0, 255, 0)
DARKGREEN = (0, 155, 0)
DARKGRAY = (40, 40, 40)
BGCOLOR = BLACK
FPS = 15
# WIDTH & HEIGHT need to be multiples of CELLSIZE
WIDTH = 640
HEIGHT = 480
CELLSIZE = 20
CELLWIDTH = WIDTH / CELLSIZE
CELLHEIGHT = HEIGHT / CELLSIZE
class Coord:
# a utility object to hold X/Y coordinates
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Apple:
# apple object for the snake to eat
def __init__(self):
self.loc = Coord(random.randrange(0, CELLWIDTH-1),
random.randrange(0, CELLHEIGHT-1))
def draw(self):
x = self.loc.x * CELLSIZE
y = self.loc.y * CELLSIZE
apple_rect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(screen, RED, apple_rect)
class Snake:
# snake object - made up of a list of coordinates
def __init__(self):
# load the sound effects
snd_dir = path.join(path.dirname(__file__), 'snd')
self.eat_snd = pygame.mixer.Sound(path.join(snd_dir, "eat.wav"))
self.eat_snd.set_volume(0.2)
self.hit_snd = pygame.mixer.Sound(path.join(snd_dir, "hit.wav"))
self.hit_snd.set_volume(0.2)
# pick a random spot for the snake to start (not too close to the wall)
x = random.randrange(5, CELLWIDTH-5)
y = random.randrange(5, CELLHEIGHT-5)
# this list will hold the coordinates of the snake's body
self.coords = []
# the snake starts with 3 segments to the left of the head
for i in range(3):
self.coords.append(Coord(x-i, y))
# start moving right
self.dir = 'r'
def draw(self):
# draw the snake on the screen
for coord in self.coords:
x = coord.x * CELLSIZE
y = coord.y * CELLSIZE
# each segment is two squares (dark/light)
segment_rect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(screen, DARKGREEN, segment_rect)
inside_rect = pygame.Rect(x+4, y+4, CELLSIZE-8, CELLSIZE-8)
pygame.draw.rect(screen, GREEN, inside_rect)
def move(self):
# move the snake by adding a new segment in the direction
if self.dir == 'u':
new_head = Coord(self.coords[0].x, self.coords[0].y-1)
elif self.dir == 'd':
new_head = Coord(self.coords[0].x, self.coords[0].y+1)
elif self.dir == 'l':
new_head = Coord(self.coords[0].x-1, self.coords[0].y)
elif self.dir == 'r':
new_head = Coord(self.coords[0].x+1, self.coords[0].y)
# insert the new coord at the front of the coord list
self.coords.insert(0, new_head)
def eat(self, apple):
# check if the snake has eaten an apple
if self.coords[0].x == apple.loc.x and self.coords[0].y == apple.loc.y:
self.eat_snd.play()
return True
return False
def hit(self):
# check if snake hit an edge
if self.coords[0].x == -1:
return True
if self.coords[0].x == CELLWIDTH:
return True
if self.coords[0].y == -1:
return True
if self.coords[0].y == CELLHEIGHT:
return True
# check if snake hit itself
for snake_body in self.coords[1:]:
if snake_body.x == self.coords[0].x and snake_body.y == self.coords[0].y:
return True
return False
# initialize pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption("Snake")
def run_game():
# this function runs the game
snake = Snake()
apple = Apple()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
# don't move in the opposite direction
elif event.key == pygame.K_LEFT and snake.dir != 'r':
snake.dir = 'l'
elif event.key == pygame.K_RIGHT and snake.dir != 'l':
snake.dir = 'r'
elif event.key == pygame.K_UP and snake.dir != 'd':
snake.dir = 'u'
elif event.key == pygame.K_DOWN and snake.dir != 'u':
snake.dir = 'd'
if snake.eat(apple):
# make a new apple
apple = Apple()
else:
# remove the last segment from the snake (it stayed the same size)
del snake.coords[-1]
snake.move()
if snake.hit():
# dead - play the hit sound and return the score
snake.hit_snd.play()
return len(snake.coords) - 3
# Update screen
screen.fill(BGCOLOR)
draw_grid()
draw_score(len(snake.coords) - 3)
snake.draw()
apple.draw()
pygame.display.flip()
clock.tick(FPS)
def draw_grid():
# draw the grid of lines on the screen
for x in range(0, WIDTH, CELLSIZE):
pygame.draw.line(screen, DARKGRAY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, CELLSIZE):
pygame.draw.line(screen, DARKGRAY, (0, y), (WIDTH, y))
def draw_text(text, size, x, y):
# utility function to draw text on screen
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
screen.blit(text_surface, text_rect)
def draw_score(score):
text = 'Score: %s' % score
draw_text(text, 18, WIDTH-70, 10)
def wait_for_key():
# utility function to pause waiting for a keypress
# still allow Esc to exit
# Actually, we look for KEYUP event, not KEYPRESS
if len(pygame.event.get(pygame.QUIT)) > 0:
pygame.quit()
sys.exit()
keyup_events = pygame.event.get(pygame.KEYUP)
if len(keyup_events) == 0:
return None
if keyup_events[0].key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
return keyup_events[0].key
def show_start_screen():
# Display the starting screen
screen.fill(BGCOLOR)
draw_text("SNAKE", 64, WIDTH/2, HEIGHT/4)
draw_text("Move with the arrow keys", 24, WIDTH/2, HEIGHT/2)
draw_text("Press a key to begin", 24, WIDTH/2, HEIGHT*3/4)
pygame.display.update()
# wait for a keypress to start
wait_for_key()
while True:
if wait_for_key():
pygame.event.get()
return
def show_go_screen(score):
# display the Game Over screen
screen.fill(BGCOLOR)
draw_text("GAME OVER", 58, WIDTH/2, HEIGHT/4)
text = "Score: %s" % score
draw_text(text, 24, WIDTH/2, HEIGHT/2)
draw_text("Press a key to begin", 24, WIDTH/2, HEIGHT*3/4)
pygame.display.update()
# pause for a moment and then wait for key
pygame.time.wait(500)
wait_for_key()
while True:
if wait_for_key():
pygame.event.get()
return
show_start_screen()
while True:
score = run_game()
show_go_screen(score)
| |
# -*- coding: utf-8 -*-
"""
Project Tracking & Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
mode_task = settings.get_project_mode_task()
# =============================================================================
def index():
""" Module's Home Page """
if mode_task:
# Bypass home page & go direct to browsing Tasks for a Project
s3_redirect_default(URL(f="project", vars={"tasks":1}))
else:
# Bypass home page & go direct to filterable list of Projects
s3_redirect_default(URL(f="project"))
# =============================================================================
def create():
""" Redirect to project/create """
redirect(URL(f="project", args="create"))
# -----------------------------------------------------------------------------
def project():
""" RESTful CRUD controller """
if "tasks" in get_vars:
# Open-Tasks-For-Project Selector
return open_tasks_for_project()
# Pre-process
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
component = r.component
component_name = component.name if component else None
hr_group = r.get_vars.get("group")
if r.method == "datalist":
# Set list_fields for renderer (project_project_list_layout)
s3db.configure("project_project",
list_fields = ["name",
"description",
"location.location_id",
"start_date",
"organisation_id",
"organisation_id$logo",
"modified_by",
]
)
# Filter human resource records if "group" in get_vars
elif component_name == "human_resource":
type_field = FS("human_resource.type")
if hr_group == "staff":
query = (type_field == 1)
elif hr_group == "volunteer":
query = (type_field == 2)
else:
query = None
if query:
r.resource.add_component_filter("human_resource", query)
if r.interactive:
htable = s3db.hrm_human_resource
htable.person_id.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Person"),
T("Select the person assigned to this role for this project."),
)
)
if not component or component_name == "activity":
# Filter Themes/Activity Types based on Sector
if r.record:
table = s3db.project_sector_project
query = (table.project_id == r.id) & \
(table.deleted == False)
rows = db(query).select(table.sector_id)
sector_ids = [row.sector_id for row in rows]
set_theme_requires(sector_ids)
if not r.component:
if r.method in ("create", "update"):
# Context from a Profile page?"
location_id = get_vars.get("(location)", None)
if location_id:
field = s3db.project_location.location_id
field.default = location_id
field.readable = field.writable = False
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = r.table.organisation_id
field.default = organisation_id
field.readable = field.writable = False
if r.id:
r.table.human_resource_id.represent = \
s3db.hrm_HumanResourceRepresent(show_link=True)
elif r.get_vars.get("project.status_id", None):
stable = s3db.project_status
status = get_vars.get("project.status_id")
row = db(stable.name == status).select(stable.id,
limitby=(0, 1)).first()
if row:
r.table.status_id.default = row.id
r.table.status_id.writable = False
elif component_name == "organisation":
if r.method != "update":
allowed_roles = dict(settings.get_project_organisation_roles())
if settings.get_template() == "DRRPP":
# Partner NS should only come via sync from RMS
allowed_roles.pop(9, None)
lead_role = 1
otable = s3db.project_organisation
query = (otable.project_id == r.id) & \
(otable.role == lead_role) & \
(otable.deleted != True)
row = db(query).select(otable.id,
limitby=(0, 1)).first()
if row:
# Project has already a lead organisation
# => exclude lead_role in component add-form
allowed_roles.pop(lead_role, None)
otable.role.requires = IS_EMPTY_OR(IS_IN_SET(allowed_roles))
elif component_name == "activity":
# Filter Activity Type based on Sector
set_activity_type_requires("project_activity_activity_type", sector_ids)
elif component_name == "goal":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
elif component_name == "outcome":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
if settings.get_project_goals():
#ctable =
# Filter to just those for this Project & make mandatory
r.component.table.goal_id.requires = IS_ONE_OF(db, "project_goal.id",
s3db.project_goal_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
# Not working for embedded create form
#if r.method == "create":
field = r.component.table.weighting
field.readable = field.writable = False
r.component.table.current_status.readable = False
r.component.table.overall_status.readable = False
elif component_name == "output":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
if settings.get_project_outcomes():
# Filter to just those for this Project & make mandatory
r.component.table.outcome_id.requires = IS_ONE_OF(db, "project_outcome.id",
s3db.project_outcome_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif component_name == "indicator":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
if settings.get_project_outputs():
# Filter to just those for this Project & make mandatory
r.component.table.output_id.requires = IS_ONE_OF(db, "project_output.id",
s3db.project_output_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif settings.get_project_outcomes():
# Filter to just those for this Project & make mandatory
r.component.table.outcome_id.requires = IS_ONE_OF(db, "project_outcome.id",
s3db.project_outcome_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif settings.get_project_goals():
# Filter to just those for this Project & make mandatory
r.component.table.goal_id.requires = IS_ONE_OF(db, "project_goal.id",
s3db.project_goal_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif component_name == "indicator_data":
# Filter to just those for this Project & make mandatory
r.component.table.indicator_id.requires = IS_ONE_OF(db, "project_indicator.id",
s3db.project_indicator_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
# Have a filter for indicator in indicator data report
#if r.method == "report":
# from s3 import S3OptionsFilter
# filter_widgets = [S3OptionsFilter("indicator_id",
# label = T("Indicator"),
# ),
# ]
#else:
# filter_widgets = None
#r.component.configure(filter_widgets = filter_widgets)
elif component_name == "task":
if not auth.s3_has_role("STAFF"):
# Hide fields which are meant for staff members
# (avoid confusion both of inputters & recipients)
unwanted_fields = ["source",
"pe_id",
"date_due",
"time_estimated",
"time_actual",
"status",
]
ttable = component.table
for fieldname in unwanted_fields:
field = ttable[fieldname]
field.readable = field.writable = False
if "open" in r.get_vars:
# Show only the Open Tasks for this Project (unused?)
statuses = s3.project_task_active_statuses
query = FS("status").belongs(statuses)
r.resource.add_component_filter("task", query)
# Filter activities and milestones to the current project
options_filter = {"filterby": "project_id",
"filter_opts": (r.id,),
}
fields = []
if settings.get_project_activities():
fields.append(s3db.project_task_activity.activity_id)
if settings.get_project_milestones():
fields.append(s3db.project_task_milestone.milestone_id)
for f in fields:
requires = f.requires
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if hasattr(requires, "set_filter"):
requires.set_filter(**options_filter)
elif component_name == "beneficiary":
# Filter the location selector to the project's locations
component.table.project_location_id.requires = \
IS_EMPTY_OR(IS_ONE_OF(db, "project_location.id",
s3db.project_location_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
)
elif component_name == "human_resource":
htable = s3db.hrm_human_resource
htable.person_id.represent = \
s3db.pr_PersonRepresent(show_link=True)
# These values are defined in hrm_type_opts
human_resource_id = r.table.human_resource_id
filter_opts = None
if hr_group:
crud_strings = s3.crud_strings
if hr_group == "staff":
filter_opts = (1,)
human_resource_id.label = T("Staff")
crud_strings["project_human_resource"] = crud_strings["hrm_staff"]
elif hr_group == "volunteer":
filter_opts = (2,)
human_resource_id.label = T("Volunteer")
crud_strings["project_human_resource"] = crud_strings["hrm_volunteer"]
if filter_opts:
# Use the group to filter the form widget when
# adding a new record
human_resource_id.requires = \
IS_ONE_OF(db, "hrm_human_resource.id",
s3db.hrm_human_resource_represent,
filterby="type",
filter_opts=filter_opts,
orderby="hrm_human_resource.person_id",
sort=True
)
elif component_name == "document":
# Hide unnecessary fields
dtable = component.table
dtable.organisation_id.readable = \
dtable.organisation_id.writable = False
dtable.person_id.readable = \
dtable.person_id.writable = False
dtable.location_id.readable = \
dtable.location_id.writable = False
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
component_name = r.component_name
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('project_project_start_date','project_project_end_date')''')
if mode_task:
read_url = URL(args=["[id]", "task"])
update_url = URL(args=["[id]", "task"])
s3_action_buttons(r,
read_url=read_url,
update_url=update_url)
elif component_name == "beneficiary":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('project_beneficiary_date','project_beneficiary_end_date')''')
elif component_name == "indicator_data" and r.record and \
isinstance(output, dict):
report_link = A(current.T("Show Report"),
_href=r.url(method="report"),
_class="action-btn",
)
showadd_btn = output.get("showadd_btn", "")
output["showadd_btn"] = TAG[""](showadd_btn, report_link)
elif component_name == "task" and r.component_id:
# Put Comments in rfooter
s3db.project_ckeditor()
s3.rfooter = LOAD("project", "comments.load",
args=[r.component_id],
ajax=True)
return output
s3.postp = postp
return s3_rest_controller(module, "project",
csv_template = "project",
hide_filter = {None: False,
#"indicator_data": False,
"_default": True,
},
rheader = s3db.project_rheader,
)
# -----------------------------------------------------------------------------
def open_tasks_for_project():
"""
Simplified controller to select a project and open the
list of open tasks for it
"""
def prep(r):
tablename = "project_project"
s3.crud_strings[tablename].title_list = T("Open Tasks for Project")
s3.crud_labels.READ = s3.crud_labels.UPDATE = T("Select")
s3db.configure(tablename,
deletable=False,
listadd=False,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive and not r.component:
tasklist_url = URL(f="task", vars={"project":"[id]"})
s3_action_buttons(r,
deletable=False,
read_url=tasklist_url,
update_url=tasklist_url)
return output
s3.postp = postp
return s3_rest_controller(module, "project",
hide_filter=False,
)
# -----------------------------------------------------------------------------
def set_theme_requires(sector_ids):
"""
Filters the theme_id based on the sector_id
"""
ttable = s3db.project_theme
tstable = s3db.project_theme_sector
# All themes linked to the project's sectors or to no sectors
rows = db().select(ttable.id,
tstable.sector_id,
left=tstable.on(ttable.id == tstable.theme_id))
sector_ids = sector_ids or []
theme_ids = [row.project_theme.id for row in rows
if not row.project_theme_sector.sector_id or
row.project_theme_sector.sector_id in sector_ids]
table = s3db.project_theme_project
table.theme_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_theme.id",
s3base.S3Represent(lookup="project_theme"),
filterby="id",
filter_opts=theme_ids,
sort=True,
)
)
# -----------------------------------------------------------------------------
def set_activity_type_requires(tablename, sector_ids):
"""
Filters the activity_type_id based on the sector_id
"""
attable = s3db.project_activity_type
if sector_ids:
atstable = s3db.project_activity_type_sector
# All activity_types linked to the projects sectors or to no sectors
rows = db().select(attable.id,
atstable.sector_id,
left=atstable.on(attable.id == atstable.activity_type_id))
activity_type_ids = [row.project_activity_type.id for row in rows
if not row.project_activity_type_sector.sector_id or
row.project_activity_type_sector.sector_id in sector_ids]
else:
activity_type_ids = []
s3db[tablename].activity_type_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_activity_type.id",
s3base.S3Represent(lookup="project_activity_type"),
filterby="id",
filter_opts=activity_type_ids,
sort=True,
)
)
# =============================================================================
def sector():
""" RESTful CRUD controller """
return s3_rest_controller("org", "sector")
# -----------------------------------------------------------------------------
def status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme_project():
"""
RESTful CRUD controller
- not normally exposed to users via a menu
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme_sector():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def hazard():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def framework():
""" RESTful CRUD controller """
return s3_rest_controller(dtargs={"dt_text_maximum_len": 160},
hide_filter=True,
)
# =============================================================================
def organisation():
""" RESTful CRUD controller """
if settings.get_project_multiple_organisations():
# e.g. IFRC
s3db.configure("project_organisation",
insertable=False,
editable=False,
deletable=False)
#list_btn = A(T("Funding Report"),
# _href=URL(c="project", f="organisation",
# args="report", vars=get_vars),
# _class="action-btn")
return s3_rest_controller(#list_btn=list_btn,
)
else:
# e.g. DRRPP
tabs = [(T("Basic Details"), None),
(T("Projects"), "project"),
(T("Contacts"), "human_resource"),
]
rheader = lambda r: s3db.org_rheader(r, tabs)
return s3_rest_controller("org", resourcename,
rheader=rheader)
# =============================================================================
def beneficiary_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def beneficiary():
""" RESTful CRUD controller """
# Normally only used in Report
# - make changes as component of Project
s3db.configure("project_beneficiary",
deletable = False,
editable = False,
insertable = False,
)
list_btn = A(T("Beneficiary Report"),
_href=URL(c="project", f="beneficiary",
args="report", vars=get_vars),
_class="action-btn")
#def prep(r):
# if r.method in ("create", "create.popup", "update", "update.popup"):
# # Coming from Profile page?
# location_id = r.get_vars.get("~.(location)", None)
# if location_id:
# field = r.table.location_id
# field.default = location_id
# field.readable = field.writable = False
# if r.record:
# field = r.table.location_id
# field.comment = None
# field.writable = False
# return True
#s3.prep = prep
return s3_rest_controller(hide_filter=False)
# =============================================================================
def activity_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity_type_sector():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity_organisation():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity():
""" RESTful CRUD controller """
table = s3db.project_activity
if "project_id" in get_vars:
field = table.project_id
field.default = get_vars.project_id
field.writable = False
field.comment = None
# Pre-process
def prep(r):
if r.interactive:
if r.component is not None:
if r.component_name == "document":
doc_table = s3db.doc_document
doc_table.organisation_id.readable = doc_table.organisation_id.writable = False
doc_table.person_id.readable = doc_table.person_id.writable = False
doc_table.location_id.readable = doc_table.location_id.writable = False
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
s3.jquery_ready.append(
'''S3.start_end_date('project_activity_date','project_activity_end_date')''')
return output
s3.postp = postp
return s3_rest_controller(csv_template = "activity",
hide_filter = False,
rheader = s3db.project_rheader,
)
# -----------------------------------------------------------------------------
def location():
""" RESTful CRUD controller """
table = s3db.project_location
# Pre-process
def prep(r):
if r.interactive:
if r.record:
table = s3db.project_sector_project
query = (table.project_id == r.record.project_id) & \
(table.deleted == False)
rows = db(query).select(table.sector_id)
sector_ids = [row.sector_id for row in rows]
else:
sector_ids = []
set_activity_type_requires("project_activity_type_location", sector_ids)
if r.component_name == "document":
table = db.doc_document
table.organisation_id.readable = table.organisation_id.writable = False
table.person_id.readable = table.person_id.writable = False
table.location_id.readable = table.location_id.writable = False
return True
s3.prep = prep
# Pre-process
def postp(r, output):
if r.representation == "plain":
# Replace the Map Popup contents with custom content
item = TABLE()
if settings.get_project_community():
# The Community is the primary resource
record = r.record
table.id.readable = False
table.location_id.readable = False
fields = [table[f] for f in table.fields if table[f].readable]
for field in fields:
data = record[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
hierarchy = gis.get_location_hierarchy()
gtable = s3db.gis_location
location = db(gtable.id == record.location_id).select(gtable.L1,
gtable.L2,
gtable.L3,
gtable.L4,
).first()
if location:
for field in ["L4", "L3", "L2", "L1"]:
if field in hierarchy and location[field]:
item.append(TR(TD(hierarchy[field]),
TD(location[field])))
output["item"] = item
else:
# The Project is the primary resource
project_id = r.record.project_id
ptable = s3db.project_project
query = (ptable.id == project_id)
project = db(query).select(limitby=(0, 1)).first()
ptable.id.readable = False
fields = [ptable[f] for f in ptable.fields if ptable[f].readable]
for field in fields:
if field == "currency":
# Don't display Currency if no Budget
if not project["budget"]:
continue
data = project[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
title = s3.crud_strings["project_project"].title_display
# Assume authorised to see details
popup_url = URL(f="project", args=[project_id])
details_btn = A(T("Open"),
_href=popup_url,
_class="btn",
_id="details-btn",
_target="_blank")
output = dict(item = item,
title = title,
details_btn = details_btn,
)
return output
s3.postp = postp
return s3_rest_controller(interactive_report=True,
rheader=s3db.project_rheader,
hide_filter=False,
csv_template="location",
)
# -----------------------------------------------------------------------------
def demographic():
""" RESTful CRUD controller """
return s3_rest_controller("stats", "demographic")
# -----------------------------------------------------------------------------
def demographic_data():
""" RESTful CRUD controller """
return s3db.stats_demographic_data_controller()
# -----------------------------------------------------------------------------
def location_contact():
""" RESTful CRUD controller for Community Contacts """
return s3_rest_controller(hide_filter=False)
# -----------------------------------------------------------------------------
def report():
"""
RESTful CRUD controller
@ToDo: Why is this needed? To have no rheader?
"""
return s3_rest_controller(module, "activity")
# -----------------------------------------------------------------------------
def partners():
"""
RESTful CRUD controller for Organisations filtered by Type
"""
# @ToDo: This could need to be a deployment setting
get_vars["organisation_type.name"] = \
"Academic,Bilateral,Government,Intergovernmental,NGO,UN agency"
# Load model
table = s3db.org_organisation
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create Partner Organization"),
title_display = T("Partner Organization Details"),
title_list = T("Partner Organizations"),
title_update = T("Edit Partner Organization"),
title_upload = T("Import Partner Organizations"),
label_list_button = T("List Partner Organizations"),
label_delete_button = T("Delete Partner Organization"),
msg_record_created = T("Partner Organization added"),
msg_record_modified = T("Partner Organization updated"),
msg_record_deleted = T("Partner Organization deleted"),
msg_list_empty = T("No Partner Organizations currently registered")
)
return s3db.org_organisation_controller()
# =============================================================================
def task():
""" RESTful CRUD controller """
return s3db.project_task_controller()
# =============================================================================
def task_project():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_activity():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_milestone():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_tag():
""" RESTful CRUD controller for options.s3json lookups """
# Pre-process
def prep(r):
if r.method != "options" or r.representation != "s3json":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def role():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
def member():
""" RESTful CRUD Controller """
return s3_rest_controller()
# =============================================================================
def milestone():
""" RESTful CRUD controller """
if "project_id" in get_vars:
field = s3db.project_milestone.project_id
field.default = get_vars.project_id
field.writable = False
field.comment = None
return s3_rest_controller()
# =============================================================================
def tag():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
def time():
""" RESTful CRUD controller """
# Load model to get normal CRUD strings
table = s3db.project_time
hide_filter = False
if "mine" in get_vars:
# Display this user's Logged Hours in reverse-order
hide_filter = True
s3.crud_strings["project_time"].title_list = T("My Logged Hours")
person_id = auth.s3_logged_in_person()
if person_id:
# @ToDo: Use URL filter instead, but the Search page will have
# to populate it's widgets based on the URL filter
s3.filter = (table.person_id == person_id)
# Log time with just this user's open tasks visible
ttable = db.project_task
query = (ttable.pe_id == auth.user.pe_id) & \
(ttable.deleted == False)
if "update" not in request.args:
# Only log time against Open Tasks
query &= (ttable.status.belongs(s3db.project_task_active_statuses))
dbset = db(query)
table.task_id.requires = IS_ONE_OF(dbset, "project_task.id",
s3db.project_task_represent_w_project
)
list_fields = ["id",
"date",
"hours",
(T("Project"), "task_id$task_project.project_id"),
(T("Activity"), "task_id$task_activity.activity_id"),
"task_id",
"comments",
]
if settings.get_project_milestones():
# Use the field in this format to get the custom represent
list_fields.insert(5, (T("Milestone"), "task_id$task_milestone.milestone_id"))
s3db.configure("project_time",
list_fields = list_fields,
orderby = "project_time.date desc",
)
elif "week" in get_vars:
# Filter to the specified number of weeks
weeks = int(get_vars.get("week", 1))
now = request.utcnow
week = datetime.timedelta(days=7)
delta = week * weeks
s3.filter = (table.date > (now - delta))
elif "month" in get_vars:
# Filter to the specified number of months
months = int(get_vars.get("month", 1))
now = request.utcnow
month = datetime.timedelta(weeks=4)
delta = month * months
s3.filter = (table.date > (now - delta))
return s3_rest_controller(hide_filter=hide_filter)
# =============================================================================
# Programmes
# =============================================================================
def programme():
""" RESTful controller for Programmes """
return s3_rest_controller()
def programme_project():
""" RESTful controller for Programmes <> Projects """
s3.prep = lambda r: r.method == "options" and r.representation == "s3json"
return s3_rest_controller()
# =============================================================================
# Planning
# =============================================================================
def goal():
""" RESTful controller for Goals """
return s3_rest_controller()
def outcome():
""" RESTful controller for Outcomes """
return s3_rest_controller()
def output():
""" RESTful controller for Outputs """
return s3_rest_controller()
def indicator():
""" RESTful CRUD controller """
return s3_rest_controller()
def indicator_data():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
# Community Volunteers
# =============================================================================
#def human_resource():
# """ RESTful controller for Project <> Staff Assignments """
# return s3_rest_controller()
def person():
""" RESTful controller for Community Volunteers """
# @ToDo: Filter
return s3db.vol_person_controller()
def volunteer():
""" RESTful controller for Community Volunteers """
# @ToDo: Filter
#s3.filter = FS("type") == 2
return s3db.vol_volunteer_controller()
# =============================================================================
# Comments
# =============================================================================
def comment_parse(comment, comments, task_id=None):
"""
Parse a Comment
@param: comment - a gluon.sql.Row: the current comment
@param: comments - a gluon.sql.Rows: full list of comments
@param: task_id - a reference ID: optional task commented on
"""
author = B(T("Anonymous"))
if comment.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == comment.created_by)
left = [ltable.on(ltable.user_id == utable.id),
ptable.on(ptable.pe_id == ltable.pe_id)]
row = db(query).select(utable.email,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
import hashlib
hash = hashlib.md5(email).hexdigest()
url = "http://www.gravatar.com/%s" % hash
author = B(A(username, _href=url, _target="top"))
if not task_id and comment.task_id:
table = s3db.project_task
task = "re: %s" % table[comment.task_id].name
header = DIV(author, " ", task)
task_id = comment.task_id
else:
header = author
thread = LI(DIV(s3base.s3_avatar_represent(comment.created_by),
DIV(DIV(header,
_class="comment-header"),
DIV(XML(comment.body),
_class="comment-body"),
_class="comment-text"),
DIV(DIV(comment.created_on,
_class="comment-date"),
DIV(A(T("Reply"),
_class="action-btn"),
_onclick="comment_reply(%i);" % comment.id,
_class="comment-reply"),
_class="fright"),
_id="comment-%i" % comment.id,
_task_id=task_id,
_class="comment-box"))
# Add the children of this thread
children = UL(_class="children")
id = comment.id
count = 0
for comment in comments:
if comment.parent == id:
count = 1
child = comment_parse(comment, comments, task_id=task_id)
children.append(child)
if count == 1:
thread.append(children)
return thread
# -----------------------------------------------------------------------------
def comments():
""" Function accessed by AJAX from rfooter to handle Comments """
try:
task_id = request.args[0]
except:
raise HTTP(400)
table = s3db.project_comment
field = table.task_id
field.default = task_id
field.writable = field.readable = False
# Create S3Request for S3SQLForm
r = s3_request(prefix="project",
name="comment",
# Override task_id
args=[],
vars=None,
# Override .loads
extension="html")
# Customise resource
r.customise_resource()
# Form to add a new Comment
form = s3base.S3SQLCustomForm("parent", "task_id", "body")(r)
# List of existing Comments
comments = db(field == task_id).select(table.id,
table.parent,
table.body,
table.created_by,
table.created_on)
output = UL(_id="comments")
for comment in comments:
if not comment.parent:
# Show top-level threads at top-level
thread = comment_parse(comment, comments, task_id=task_id)
output.append(thread)
script = "".join((
'''$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})
$('#project_comment_parent__row1').hide()
$('#project_comment_parent__row').hide()
$('#project_comment_body').ckeditor(ck_config)
$('#submit_record__row input').click(function(){
$('#comment-form').hide()
$('#project_comment_body').ckeditorGet().destroy()
return true
})'''))
# No layout in this output!
#s3.jquery_ready.append(script)
output = DIV(output,
DIV(H4(T("New Post"),
_id="comment-title"),
form,
_id="comment-form",
_class="clear"),
SCRIPT(script))
return XML(output)
def comment():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
# Campaigns
# =============================================================================
def campaign():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_keyword():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_message():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_response():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_response_summary():
""" RESTful CRUD controller """
return s3_rest_controller()
# END =========================================================================
| |
import os
import sys
import textwrap
import pytest
from tests.lib import (
assert_all_changes, pyversion, _create_test_package,
_change_test_package_version,
)
from tests.lib.local_repos import local_checkout
def test_no_upgrade_unless_requested(script):
"""
No upgrade if not specifically requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools', expect_error=True)
assert not result.files_created, (
'pip install INITools upgraded when it should not have'
)
@pytest.mark.network
def test_upgrade_to_specific_version(script):
"""
It does upgrade to specific version requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert result.files_created, (
'pip install with specific version did not upgrade'
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_deleted
)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion in result.files_created
)
@pytest.mark.network
def test_upgrade_if_requested(script):
"""
And it does upgrade if requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '--upgrade', 'INITools', expect_error=True)
assert result.files_created, 'pip install --upgrade did not upgrade'
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion not in result.files_created
)
def test_upgrade_with_newest_already_installed(script, data):
"""
If the newest version of a package is already installed, the package should
not be reinstalled and the user should be informed.
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple')
result = script.pip(
'install', '--upgrade', '-f', data.find_links, '--no-index', 'simple'
)
assert not result.files_created, 'simple upgraded when it should not have'
assert 'already up-to-date' in result.stdout, result.stdout
@pytest.mark.network
def test_upgrade_force_reinstall_newest(script):
"""
Force reinstallation of a package even if it is already at its newest
version if --force-reinstall is supplied.
"""
result = script.pip('install', 'INITools')
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install', '--upgrade', '--force-reinstall', 'INITools'
)
assert result2.files_updated, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_before_upgrade(script):
"""
Automatic uninstall-before-upgrade.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('install', 'INITools==0.3', expect_error=True)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_before_upgrade_from_url(script):
"""
Automatic uninstall-before-upgrade from URL.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'http://pypi.python.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_upgrade_to_same_version_from_url(script):
"""
When installing from a URL the same version that is already installed, no
need to uninstall and reinstall if --upgrade is not specified.
"""
result = script.pip('install', 'INITools==0.3', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'http://pypi.python.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert not result2.files_updated, 'INITools 0.3 reinstalled same version'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_upgrade_from_reqs_file(script):
"""
Upgrade from a requirements file.
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo<0.4
# and something else to test out:
INITools==0.3
"""))
install_result = script.pip(
'install', '-r', script.scratch_path / 'test-req.txt'
)
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo
# and something else to test out:
INITools
"""))
script.pip(
'install', '--upgrade', '-r', script.scratch_path / 'test-req.txt'
)
uninstall_result = script.pip(
'uninstall', '-r', script.scratch_path / 'test-req.txt', '-y'
)
assert_all_changes(
install_result,
uninstall_result,
[script.venv / 'build', 'cache', script.scratch / 'test-req.txt'],
)
def test_uninstall_rollback(script, data):
"""
Test uninstall-rollback (using test package with a setup.py
crafted to fail on install).
"""
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.1'
)
assert script.site_packages / 'broken.py' in result.files_created, list(
result.files_created.keys()
)
result2 = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken===0.2broken',
expect_error=True,
)
assert result2.returncode == 1, str(result2)
assert script.run(
'python', '-c', "import broken; print(broken.VERSION)"
).stdout == '0.1\n'
assert_all_changes(
result.files_after,
result2,
[script.venv / 'build'],
)
# Issue #530 - temporarily disable flaky test
@pytest.mark.skipif
def test_editable_git_upgrade(script):
"""
Test installing an editable git package from a repository, upgrading the
repository, installing again, and check it gets the newer version
"""
version_pkg_path = _create_test_package(script)
script.pip(
'install', '-e',
'%s#egg=version_pkg' % ('git+file://' + version_pkg_path),
)
version = script.run('version_pkg')
assert '0.1' in version.stdout
_change_test_package_version(script, version_pkg_path)
script.pip(
'install', '-e',
'%s#egg=version_pkg' % ('git+file://' + version_pkg_path),
)
version2 = script.run('version_pkg')
assert 'some different version' in version2.stdout, (
"Output: %s" % (version2.stdout)
)
@pytest.mark.network
def test_should_not_install_always_from_cache(script):
"""
If there is an old cached package, pip should download the newer version
Related to issue #175
"""
script.pip('install', 'INITools==0.2', expect_error=True)
script.pip('uninstall', '-y', 'INITools')
result = script.pip('install', 'INITools==0.1', expect_error=True)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion not in result.files_created
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_created
)
@pytest.mark.network
def test_install_with_ignoreinstalled_requested(script):
"""
Test old conflicting package is completely ignored
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '-I', 'INITools==0.3', expect_error=True)
assert result.files_created, 'pip install -I did not install'
# both the old and new metadata should be present.
assert os.path.exists(
script.site_packages_path / 'INITools-0.1-py%s.egg-info' % pyversion
)
assert os.path.exists(
script.site_packages_path / 'INITools-0.3-py%s.egg-info' % pyversion
)
@pytest.mark.network
def test_upgrade_vcs_req_with_no_dists_found(script, tmpdir):
"""It can upgrade a VCS requirement that has no distributions otherwise."""
req = "%s#egg=pip-test-package" % local_checkout(
"git+http://github.com/pypa/pip-test-package.git",
tmpdir.join("cache"),
)
script.pip("install", req)
result = script.pip("install", "-U", req)
assert not result.returncode
@pytest.mark.network
def test_upgrade_vcs_req_with_dist_found(script):
"""It can upgrade a VCS requirement that has distributions on the index."""
# TODO(pnasrat) Using local_checkout fails on windows - oddness with the
# test path urls/git.
req = (
"%s#egg=pretend" %
(
"git+git://github.com/alex/pretend@e7f26ad7dbcb4a02a4995aade4"
"743aad47656b27"
)
)
script.pip("install", req)
result = script.pip("install", "-U", req)
assert "pypi.python.org" not in result.stdout, result.stdout
class TestUpgradeSetuptools(object):
"""
Tests for upgrading to setuptools (using pip from src tree)
The tests use a *fixed* set of packages from our test packages dir
note: virtualenv-1.9.1 contains distribute-0.6.34
note: virtualenv-1.10 contains setuptools-0.9.7
"""
def prep_ve(self, script, version, pip_src, distribute=False):
self.script = script
self.script.pip_install_local('virtualenv==%s' % version)
args = ['virtualenv', self.script.scratch_path / 'VE']
if distribute:
args.insert(1, '--distribute')
if version == "1.9.1" and not distribute:
# setuptools 0.6 didn't support PYTHONDONTWRITEBYTECODE
del self.script.environ["PYTHONDONTWRITEBYTECODE"]
self.script.run(*args)
if sys.platform == 'win32':
bindir = "Scripts"
else:
bindir = "bin"
self.ve_bin = self.script.scratch_path / 'VE' / bindir
self.script.run(self.ve_bin / 'pip', 'uninstall', '-y', 'pip')
self.script.run(
self.ve_bin / 'python', 'setup.py', 'install',
cwd=pip_src,
expect_stderr=True,
)
@pytest.mark.skipif("sys.version_info >= (3,0)")
def test_py2_from_setuptools_6_to_setuptools_7(
self, script, data, virtualenv):
self.prep_ve(script, '1.9.1', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-use-wheel', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert (
"Found existing installation: setuptools 0.6rc11" in result.stdout
)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
def test_py2_py3_from_distribute_6_to_setuptools_7(
self, script, data, virtualenv):
self.prep_ve(
script, '1.9.1', virtualenv.pip_source_dir, distribute=True
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert (
"Found existing installation: distribute 0.6.34" in result.stdout
)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
assert "distribute (0.7.3)" not in result.stdout
def test_from_setuptools_7_to_setuptools_7(self, script, data, virtualenv):
self.prep_ve(script, '1.10', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.7" in result.stdout
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
def test_from_setuptools_7_to_setuptools_7_using_wheel(
self, script, data, virtualenv):
self.prep_ve(script, '1.10', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--use-wheel', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.7" in result.stdout
# only wheels use dist-info
assert 'setuptools-0.9.8.dist-info' in str(result.files_created)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
# disabling intermittent travis failure:
# https://github.com/pypa/pip/issues/1379
@pytest.mark.skipif("hasattr(sys, 'pypy_version_info')")
def test_from_setuptools_7_to_setuptools_7_with_distribute_7_installed(
self, script, data, virtualenv):
self.prep_ve(
script, '1.9.1', virtualenv.pip_source_dir, distribute=True
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, 'setuptools==0.9.6'
)
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.6)" in result.stdout
assert "distribute (0.7.3)" not in result.stdout
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.6" in result.stdout
result = self.script.run(self.ve_bin / 'pip', 'list')
assert "setuptools (0.9.8)" in result.stdout
| |
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base baremetal driver class."""
import mock
import mox
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova import db as main_db
from nova import exception
from nova import objects
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
from nova.virt.baremetal import fake
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.fake.FakeDriver',
flavor_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalDriverNoDBTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = bm_driver.BareMetalDriver(None)
def test_validate_driver_loading(self):
self.assertIsInstance(self.driver.driver, fake.FakeDriver)
self.assertIsInstance(self.driver.vif_driver, fake.FakeVifDriver)
self.assertIsInstance(self.driver.volume_driver, fake.FakeVolumeDriver)
self.assertIsInstance(self.driver.firewall_driver,
fake.FakeFirewallDriver)
def test_driver_capabilities(self):
self.assertTrue(self.driver.capabilities['has_imagecache'])
self.assertFalse(self.driver.capabilities['supports_recreate'])
class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(fake_virt.FakeVirtAPI())
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self, node_info=None, nic_info=None, ephemeral=True):
result = {}
if node_info is None:
node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
if nic_info is None:
nic_info = [
{'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
'port_no': 1},
{'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
'port_no': 2},
]
result['node_info'] = node_info
result['nic_info'] = nic_info
result['node'] = db.bm_node_create(self.context, node_info)
for nic in nic_info:
db.bm_interface_create(
self.context,
result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
if ephemeral:
result['instance'] = utils.get_test_instance()
else:
flavor = utils.get_test_flavor(options={'ephemeral_gb': 0})
result['instance'] = utils.get_test_instance(flavor=flavor)
result['instance']['node'] = result['node']['uuid']
result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
image_meta=utils.get_test_image_info(
None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
instance=result['instance'],
network_info=utils.get_test_network_info(),
)
result['destroy_params'] = dict(
context=self.context,
instance=result['instance'],
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), result['instance'])
instance.node = result['node']['uuid']
result['rebuild_params'] = dict(
context=self.context,
instance=instance,
image_meta=utils.get_test_image_info(None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
admin_password='test_pass',
bdms={},
detach_block_devices=self.mox.CreateMockAnything(),
attach_block_devices=self.mox.CreateMockAnything(),
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
return result
def test_get_host_stats(self):
node = self._create_node()
stats = self.driver.get_host_stats()
self.assertIsInstance(stats, list)
self.assertEqual(len(stats), 1)
stats = stats[0]
self.assertEqual(stats['cpu_arch'], 'test')
self.assertEqual(stats['test_spec'], 'test_value')
self.assertEqual(stats['hypervisor_type'], 'baremetal')
self.assertEqual(stats['hypervisor_hostname'], node['node']['uuid'])
self.assertEqual(stats['host'], 'test_host')
self.assertEqual(stats['vcpus'], 2)
self.assertEqual(stats['host_memory_total'], 2048)
def test_spawn_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
instance = main_db.instance_get_by_uuid(self.context,
node['instance']['uuid'])
self.assertEqual(instance['default_ephemeral_device'], '/dev/sda1')
def test_set_default_ephemeral_device(self):
instance = objects.Instance(context=self.context)
instance.system_metadata = flavors.save_flavor_info(
{}, flavors.get_default_flavor())
instance.system_metadata['instance_type_ephemeral_gb'] = 1
with mock.patch.object(instance, 'save') as mock_save:
self.driver._set_default_ephemeral_device(instance)
mock_save.assert_called_once_with()
self.assertEqual('/dev/sda1', instance.default_ephemeral_device)
def test_spawn_no_ephemeral_ok(self):
node = self._create_node(ephemeral=False)
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
instance = main_db.instance_get_by_uuid(self.context,
node['instance']['uuid'])
self.assertIsNone(instance['default_ephemeral_device'])
def _test_rebuild(self, ephemeral):
node = self._create_node(ephemeral=ephemeral)
self.driver.spawn(**node['spawn_params'])
after_spawn = db.bm_node_get(self.context, node['node']['id'])
instance = node['rebuild_params']['instance']
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self.driver.rebuild(preserve_ephemeral=ephemeral,
**node['rebuild_params'])
after_rebuild = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(after_rebuild['task_state'], baremetal_states.ACTIVE)
self.assertEqual(after_rebuild['preserve_ephemeral'], ephemeral)
self.assertEqual(after_spawn['instance_uuid'],
after_rebuild['instance_uuid'])
def test_rebuild_ok(self):
self._test_rebuild(ephemeral=False)
def test_rebuild_preserve_ephemeral(self):
self._test_rebuild(ephemeral=True)
def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_after_spawn(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance(self):
node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
self.driver.macs_for_instance, node['instance'])
def test_spawn_node_already_associated(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertIsNone(row['task_state'])
def test_spawn_node_in_use(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
def test_spawn_node_not_found(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertIsNone(row['task_state'])
def test_spawn_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
def test_spawn_prepared(self):
node = self._create_node()
def update_2prepared(context, node, instance, state):
row = db.bm_node_get(context, node['id'])
self.assertEqual(row['task_state'], baremetal_states.BUILDING)
db.bm_node_update(
context, node['id'],
{'task_state': baremetal_states.PREPARED})
self.mox.StubOutWithMock(fake.FakeDriver, 'activate_node')
self.mox.StubOutWithMock(bm_driver, '_update_state')
bm_driver._update_state(
self.context,
mox.IsA(node['node']),
node['instance'],
baremetal_states.PREPARED).WithSideEffects(update_2prepared)
fake.FakeDriver.activate_node(
self.context,
mox.IsA(node['node']),
node['instance']).AndRaise(test.TestingException)
bm_driver._update_state(
self.context,
mox.IsA(node['node']),
node['instance'],
baremetal_states.ERROR).AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.PREPARED)
def test_spawn_fails_to_cleanup(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
def test_spawn_destroy_images_on_deploy(self):
node = self._create_node()
self.driver.driver.destroy_images = mock.MagicMock()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
instance = main_db.instance_get_by_uuid(self.context,
node['instance']['uuid'])
self.assertIsNotNone(instance)
self.assertEqual(1, self.driver.driver.destroy_images.call_count)
def test_destroy_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.driver.destroy(**node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
self.assertIsNone(row['instance_uuid'])
self.assertIsNone(row['instance_name'])
def test_destroy_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(test.TestingException,
self.driver.destroy, **node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
def test_get_available_resources(self):
node = self._create_node()
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb'],
node['node_info']['memory_mb'])
self.assertEqual(resources['memory_mb_used'], 0)
self.assertEqual(resources['supported_instances'],
'[["test", "baremetal", "baremetal"]]')
self.assertEqual(resources['stats'],
'{"cpu_arch": "test", "baremetal_driver": '
'"nova.virt.baremetal.fake.FakeDriver", '
'"test_spec": "test_value"}')
self.driver.spawn(**node['spawn_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'],
node['node_info']['memory_mb'])
self.driver.destroy(**node['destroy_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'], 0)
stats = jsonutils.loads(resources['stats'])
self.assertEqual(stats['test_spec'], 'test_value')
def test_get_available_nodes(self):
self.assertEqual(0, len(self.driver.get_available_nodes()))
self.assertEqual(0, len(self.driver.get_available_nodes(refresh=True)))
node1 = self._create_node()
self.assertEqual(1, len(self.driver.get_available_nodes()))
node1['instance']['hostname'] = 'test-host-1'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(1, len(self.driver.get_available_nodes()))
self.assertEqual([node1['node']['uuid']],
self.driver.get_available_nodes())
def test_list_instances(self):
self.assertEqual([], self.driver.list_instances())
node1 = self._create_node()
self.assertEqual([], self.driver.list_instances())
node_info = bm_db_utils.new_bm_node(
id=456,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
nic_info = [
{'address': 'cc:cc:cc', 'datapath_id': '0x1',
'port_no': 1},
{'address': 'dd:dd:dd', 'datapath_id': '0x2',
'port_no': 2},
]
node2 = self._create_node(node_info=node_info, nic_info=nic_info)
self.assertEqual([], self.driver.list_instances())
node1['instance']['hostname'] = 'test-host-1'
node2['instance']['hostname'] = 'test-host-2'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(['test-host-1'],
self.driver.list_instances())
self.driver.spawn(**node2['spawn_params'])
self.assertEqual(['test-host-1', 'test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node1['destroy_params'])
self.assertEqual(['test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node2['destroy_params'])
self.assertEqual([], self.driver.list_instances())
def test_get_info_no_such_node(self):
node = self._create_node()
self.assertRaises(exception.InstanceNotFound,
self.driver.get_info,
node['instance'])
def test_get_info_ok(self):
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
res = self.driver.get_info(node['instance'])
self.assertEqual(res['state'], power_state.RUNNING)
def test_get_info_with_defunct_pm(self):
# test fix for bug 1178378
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
# fake the power manager and don't get a power state
self.mox.StubOutWithMock(fake.FakePowerManager, 'is_power_on')
fake.FakePowerManager.is_power_on().AndReturn(None)
self.mox.ReplayAll()
res = self.driver.get_info(node['instance'])
# prior to the fix, returned power_state was SHUTDOWN
self.assertEqual(res['state'], power_state.NOSTATE)
self.mox.VerifyAll()
def test_attach_volume(self):
connection_info = {'_fake_connection_info': None}
instance = utils.get_test_instance()
mountpoint = '/dev/sdd'
self.mox.StubOutWithMock(self.driver.volume_driver, 'attach_volume')
self.driver.volume_driver.attach_volume(connection_info,
instance,
mountpoint)
self.mox.ReplayAll()
self.driver.attach_volume(None, connection_info, instance, mountpoint)
def test_detach_volume(self):
connection_info = {'_fake_connection_info': None}
instance = utils.get_test_instance()
mountpoint = '/dev/sdd'
self.mox.StubOutWithMock(self.driver.volume_driver, 'detach_volume')
self.driver.volume_driver.detach_volume(connection_info,
instance,
mountpoint)
self.mox.ReplayAll()
self.driver.detach_volume(connection_info, instance, mountpoint)
def test_attach_block_devices(self):
connection_info_1 = {'_fake_connection_info_1': None}
connection_info_2 = {'_fake_connection_info_2': None}
block_device_mapping = [{'connection_info': connection_info_1,
'mount_device': '/dev/sde'},
{'connection_info': connection_info_2,
'mount_device': '/dev/sdf'}]
block_device_info = {'block_device_mapping': block_device_mapping}
instance = utils.get_test_instance()
self.mox.StubOutWithMock(self.driver, 'attach_volume')
self.driver.attach_volume(None, connection_info_1, instance,
'/dev/sde')
self.driver.attach_volume(None, connection_info_2, instance,
'/dev/sdf')
self.mox.ReplayAll()
self.driver._attach_block_devices(instance, block_device_info)
def test_detach_block_devices(self):
connection_info_1 = {'_fake_connection_info_1': None}
connection_info_2 = {'_fake_connection_info_2': None}
block_device_mapping = [{'connection_info': connection_info_1,
'mount_device': '/dev/sde'},
{'connection_info': connection_info_2,
'mount_device': '/dev/sdf'}]
block_device_info = {'block_device_mapping': block_device_mapping}
instance = utils.get_test_instance()
self.mox.StubOutWithMock(self.driver, 'detach_volume')
self.driver.detach_volume(connection_info_1, instance, '/dev/sde')
self.driver.detach_volume(connection_info_2, instance, '/dev/sdf')
self.mox.ReplayAll()
self.driver._detach_block_devices(instance, block_device_info)
| |
import os
import stat
import unittest
import six
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.utils.tools import NO_SETTINGS_PACKAGE_ID, TestClient
from conans.util.files import load
base_conanfile = '''
from conans import ConanFile
class TestSystemReqs(ConanFile):
name = "Test"
version = "0.1"
options = {"myopt": [True, False]}
default_options = "myopt=True"
def system_requirements(self):
self.output.info("*+Running system requirements+*")
%GLOBAL%
return "Installed my stuff"
'''
class SystemReqsTest(unittest.TestCase):
def test_force_system_reqs_rerun(self):
client = TestClient()
files = {'conanfile.py': base_conanfile.replace("%GLOBAL%", "")}
client.save(files)
client.run("create . user/channel")
self.assertIn("*+Running system requirements+*", client.out)
client.run("install Test/0.1@user/channel")
self.assertNotIn("*+Running system requirements+*", client.out)
ref = ConanFileReference.loads("Test/0.1@user/channel")
pfs = client.cache.package_layout(ref).packages()
pid = os.listdir(pfs)[0]
reqs_file = client.cache.package_layout(ref).system_reqs_package(PackageReference(ref, pid))
os.unlink(reqs_file)
client.run("install Test/0.1@user/channel")
self.assertIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(reqs_file))
def test_local_system_requirements(self):
client = TestClient()
files = {'conanfile.py': base_conanfile.replace("%GLOBAL%", "")}
client.save(files)
client.run("install .")
self.assertIn("*+Running system requirements+*", client.out)
files = {'conanfile.py': base_conanfile.replace("%GLOBAL%", "self.run('fake command!')")}
client.save(files)
with six.assertRaisesRegex(self, Exception, "Command failed"):
client.run("install .")
def test_per_package(self):
client = TestClient()
files = {'conanfile.py': base_conanfile.replace("%GLOBAL%", "")}
client.save(files)
client.run("export . user/testing")
client.run("install Test/0.1@user/testing --build missing")
self.assertIn("*+Running system requirements+*", client.out)
ref = ConanFileReference.loads("Test/0.1@user/testing")
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs()))
pref = PackageReference(ref, "f0ba3ca2c218df4a877080ba99b65834b9413798")
load_file = load(client.cache.package_layout(ref).system_reqs_package(pref))
self.assertIn("Installed my stuff", load_file)
# Run again
client.run("install Test/0.1@user/testing --build missing")
self.assertNotIn("*+Running system requirements+*", client.out)
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs()))
load_file = load(client.cache.package_layout(ref).system_reqs_package(pref))
self.assertIn("Installed my stuff", load_file)
# Run with different option
client.run("install Test/0.1@user/testing -o myopt=False --build missing")
self.assertIn("*+Running system requirements+*", client.out)
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs()))
pref2 = PackageReference(ref, NO_SETTINGS_PACKAGE_ID)
load_file = load(client.cache.package_layout(ref).system_reqs_package(pref2))
self.assertIn("Installed my stuff", load_file)
# remove packages
client.run("remove Test* -f -p 544")
layout1 = client.cache.package_layout(pref.ref)
layout2 = client.cache.package_layout(pref2.ref)
self.assertTrue(os.path.exists(layout1.system_reqs_package(pref)))
client.run("remove Test* -f -p f0ba3ca2c218df4a877080ba99b65834b9413798")
self.assertFalse(os.path.exists(layout1.system_reqs_package(pref)))
self.assertTrue(os.path.exists(layout2.system_reqs_package(pref2)))
client.run("remove Test* -f -p %s" % NO_SETTINGS_PACKAGE_ID)
self.assertFalse(os.path.exists(layout1.system_reqs_package(pref)))
self.assertFalse(os.path.exists(layout2.system_reqs_package(pref2)))
def test_global(self):
client = TestClient()
files = {
'conanfile.py': base_conanfile.replace("%GLOBAL%",
"self.global_system_requirements=True")
}
client.save(files)
client.run("export . user/testing")
client.run("install Test/0.1@user/testing --build missing")
self.assertIn("*+Running system requirements+*", client.out)
ref = ConanFileReference.loads("Test/0.1@user/testing")
pref = PackageReference(ref, "a527106fd9f2e3738a55b02087c20c0a63afce9d")
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs_package(pref)))
load_file = load(client.cache.package_layout(ref).system_reqs())
self.assertIn("Installed my stuff", load_file)
# Run again
client.run("install Test/0.1@user/testing --build missing")
self.assertNotIn("*+Running system requirements+*", client.out)
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs_package(pref)))
load_file = load(client.cache.package_layout(ref).system_reqs())
self.assertIn("Installed my stuff", load_file)
# Run with different option
client.run("install Test/0.1@user/testing -o myopt=False --build missing")
self.assertNotIn("*+Running system requirements+*", client.out)
pref2 = PackageReference(ref, "54c9626b48cefa3b819e64316b49d3b1e1a78c26")
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs_package(pref)))
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs_package(pref2)))
load_file = load(client.cache.package_layout(ref).system_reqs())
self.assertIn("Installed my stuff", load_file)
# remove packages
client.run("remove Test* -f -p")
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs_package(pref)))
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs_package(pref2)))
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs()))
def test_wrong_output(self):
client = TestClient()
files = {
'conanfile.py':
base_conanfile.replace("%GLOBAL%", "").replace('"Installed my stuff"', 'None')
}
client.save(files)
client.run("export . user/testing")
client.run("install Test/0.1@user/testing --build missing")
self.assertIn("*+Running system requirements+*", client.out)
ref = ConanFileReference.loads("Test/0.1@user/testing")
self.assertFalse(os.path.exists(client.cache.package_layout(ref).system_reqs()))
pref = PackageReference(ref, "f0ba3ca2c218df4a877080ba99b65834b9413798")
load_file = load(client.cache.package_layout(pref.ref).system_reqs_package(pref))
self.assertEqual('', load_file)
def test_remove_system_reqs(self):
ref = ConanFileReference.loads("Test/0.1@user/channel")
client = TestClient()
files = {'conanfile.py': base_conanfile.replace("%GLOBAL%", "")}
client.save(files)
system_reqs_path = os.path.dirname(client.cache.package_layout(ref).system_reqs())
# create package to populate system_reqs folder
self.assertFalse(os.path.exists(system_reqs_path))
client.run("create . user/channel")
self.assertIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(system_reqs_path))
# a new build must not remove or re-run
client.run("create . user/channel")
self.assertNotIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(system_reqs_path))
# remove system_reqs global
client.run("remove --system-reqs Test/0.1@user/channel")
self.assertIn("Cache system_reqs from Test/0.1@user/channel has been removed",
client.out)
self.assertFalse(os.path.exists(system_reqs_path))
# re-create system_reqs folder
client.run("create . user/channel")
self.assertIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(system_reqs_path))
# Wildcard system_reqs removal
ref_other = ConanFileReference.loads("Test/0.1@user/channel_other")
system_reqs_path_other = os.path.dirname(client.cache.package_layout(ref_other).system_reqs())
client.run("create . user/channel_other")
client.run("remove --system-reqs '*'")
self.assertIn("Cache system_reqs from Test/0.1@user/channel has been removed",
client.out)
self.assertIn("Cache system_reqs from Test/0.1@user/channel_other has been removed",
client.out)
self.assertFalse(os.path.exists(system_reqs_path))
self.assertFalse(os.path.exists(system_reqs_path_other))
# Check that wildcard isn't triggered randomly
client.run("create . user/channel_other")
client.run("remove --system-reqs Test/0.1@user/channel")
self.assertIn("Cache system_reqs from Test/0.1@user/channel has been removed",
client.out)
self.assertNotIn("Cache system_reqs from Test/0.1@user/channel_other has been removed",
client.out)
self.assertFalse(os.path.exists(system_reqs_path))
self.assertTrue(os.path.exists(system_reqs_path_other))
# Check partial wildcard
client.run("create . user/channel")
client.run("remove --system-reqs Test/0.1@user/channel_*")
self.assertNotIn("Cache system_reqs from Test/0.1@user/channel has been removed",
client.out)
self.assertIn("Cache system_reqs from Test/0.1@user/channel_other has been removed",
client.out)
self.assertTrue(os.path.exists(system_reqs_path))
self.assertFalse(os.path.exists(system_reqs_path_other))
def test_invalid_remove_reqs(self):
client = TestClient()
with six.assertRaisesRegex(self, Exception,
"ERROR: Please specify a valid pattern or reference to be cleaned"):
client.run("remove --system-reqs")
# wrong file reference should be treated as error
with six.assertRaisesRegex(self, Exception, "ERROR: Unable to remove system_reqs: "
"foo/version@bar/testing does not exist"):
client.run("remove --system-reqs foo/version@bar/testing")
# package is not supported with system_reqs
with six.assertRaisesRegex(self, Exception, "ERROR: '-t' and '-p' parameters "
"can't be used at the same time"):
client.run("remove --system-reqs foo/bar@foo/bar "
"-p f0ba3ca2c218df4a877080ba99b65834b9413798")
def test_permission_denied_remove_system_reqs(self):
ref = ConanFileReference.loads("Test/0.1@user/channel")
client = TestClient()
files = {'conanfile.py': base_conanfile.replace("%GLOBAL%", "")}
client.save(files)
system_reqs_path = os.path.dirname(client.cache.package_layout(ref).system_reqs())
# create package to populate system_reqs folder
self.assertFalse(os.path.exists(system_reqs_path))
client.run("create . user/channel")
self.assertIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(system_reqs_path))
# remove write permission
current = stat.S_IMODE(os.lstat(system_reqs_path).st_mode)
os.chmod(system_reqs_path, current & ~stat.S_IWRITE)
# friendly message for permission error
with six.assertRaisesRegex(self, Exception, "ERROR: Unable to remove system_reqs:"):
client.run("remove --system-reqs Test/0.1@user/channel")
self.assertTrue(os.path.exists(system_reqs_path))
# restore write permission so the temporal folder can be deleted later
os.chmod(system_reqs_path, current | stat.S_IWRITE)
def test_duplicate_remove_system_reqs(self):
ref = ConanFileReference.loads("Test/0.1@user/channel")
client = TestClient()
files = {'conanfile.py': base_conanfile.replace("%GLOBAL%", "")}
client.save(files)
system_reqs_path = os.path.dirname(client.cache.package_layout(ref).system_reqs())
# create package to populate system_reqs folder
self.assertFalse(os.path.exists(system_reqs_path))
client.run("create . user/channel")
self.assertIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(system_reqs_path))
# a new build must not remove or re-run
client.run("create . user/channel")
self.assertNotIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(system_reqs_path))
# remove system_reqs global
client.run("remove --system-reqs Test/0.1@user/channel")
self.assertIn("Cache system_reqs from Test/0.1@user/channel has been removed",
client.out)
self.assertFalse(os.path.exists(system_reqs_path))
# try to remove system_reqs global again
client.run("remove --system-reqs Test/0.1@user/channel")
self.assertIn("Cache system_reqs from Test/0.1@user/channel has been removed",
client.out)
self.assertFalse(os.path.exists(system_reqs_path))
# re-create system_reqs folder
client.run("create . user/channel")
self.assertIn("*+Running system requirements+*", client.out)
self.assertTrue(os.path.exists(system_reqs_path))
| |
"""
The form module contains the main form, field, group and sequence classes
"""
import re
import warnings
from peak.util.proxies import ObjectWrapper
from webob.multidict import UnicodeMultiDict
import schemaish, validatish
from formish import util
from dottedish import dotted, unflatten, set as dottedish_set
from formish import validation
from formish import widgets
from formish.renderer import _default_renderer
UNSET = object()
def mroattrs(cls, attr):
"""
Yield the values of class attributes that were changed according to the
class hierarchy starting with and including the given class, ordered by
specificity (i.e. deepest first).
"""
seen = set()
for cls in cls.__mro__:
value = getattr(cls, attr, None)
if value and value not in seen:
yield value
seen.add(value)
def container_factory(parent_key, item_key):
if item_key.isdigit():
return []
return {}
def is_int(v):
""" raise error if not """
try:
int(v)
return True
except ValueError:
return False
class Action(object):
"""
An action that that can added to a form.
:arg name: an valid html id used to lookup an action
:arg value: The 'value' of the submit button and hence the text that people see
:arg callback: A callable with the signature (request, form, *args)
"""
def __init__(self, name=None, value=None, callback=None):
if name and not util.valid_identifier(name):
raise validation.FormError('Invalid action name %r.'% name)
self.callback = callback
self.name = name
self.value = value
def _cssname(self):
""" Returns a hyphenated identifier using the form name and field name """
if self.form.name:
return '%s-%s'% (self.form.name, '-'.join(self.name.split('.')))
return '%s'% ('-'.join(self.name.split('.')))
def _classes(self):
""" Works out a list of classes that should be applied to the field """
schema_types = mroattrs(self.attr.__class__,'type')
widget_types = mroattrs(self.widget.widget.__class__,'type')
classes = ['field',re.sub('[0-9\*]+','n',_cssname(self))]
for t in schema_types:
classes.append('type-%s'%t.lower())
for t in widget_types:
classes.append('widget-%s'%t.lower())
if self.required:
classes.append('required')
if self.widget.css_class is not None:
classes.append(self.widget.css_class)
if str(self.error):
classes.append('error')
if getattr(self,'contains_error',None):
classes.append('contains-error')
return ' '.join(classes)
def starify(name):
"""
Replace any ints in a dotted key with stars. Used when applying defaults and widgets to fields
"""
newname = []
for key in name.split('.'):
if is_int(key):
newname.append('*')
else:
newname.append(key)
name = '.'.join(newname)
return name
def fall_back_renderer(renderer, name, widget, vars):
"""
Tries to find template in widget directly then tries in top level directory
This allows a field level widget override it's container by including the
changed version in the widgets directory with the same name
"""
import mako
try:
return renderer('/formish/widgets/%s/%s.html'%(widget,name), vars)
except mako.exceptions.TopLevelLookupException:
return renderer('/formish/%s.html'%(name), vars)
class TemplatedString(object):
"""
A callable, teplated string
"""
def __init__(self, obj, attr_name, val):
self.obj = obj
self.attr_name = attr_name
self.val = val
def __str__(self):
if not self.val:
return ''
return unicode(self.val)
def __repr__(self):
if not self.val:
return ''
return unicode(self.val)
def __nonzero__(self):
if self.val:
return True
else:
return False
def __call__(self):
widget_type, widget = self.obj.widget.template.split('.')
renderer = self.obj.form.renderer
name = '%s/%s'%(widget_type,self.attr_name)
vars = {'field':self.obj}
return fall_back_renderer(renderer, name, widget, vars)
class RenderableProperty(object):
"""
Property descriptor that returns a renderable proxy when retrieved from the
the property's instance.
"""
def __init__(self, property_name, getter, setter=None, deleter=None):
self.property_name = property_name
self.getter = getter
self.setter = setter
self.deleter = deleter
def __get__(self, instance, owner):
return RenderableObjectWrapper(self.getter(instance), instance,
self.property_name)
def __set__(self, instance, value):
self.setter(instance, value)
def __delete__(self, instance):
self.deleter(instance)
class RenderableObjectWrapper(ObjectWrapper):
"""
Proxy object that, when called, renders itself using a template. The name
of the template is derived from the form item and the name of the property.
"""
# Non-proxied attributes.
__form_item = None
__property_name = None
def __init__(self, obj, form_item, property_name):
ObjectWrapper.__init__(self, obj)
self.__form_item = form_item
self.__property_name = property_name
def __call__(self):
# XXX This is a hack to make it work for a Form property. A Form
# doesn't look enough like any other form item right now to work
# otherwise.
if isinstance(self.__form_item, Form):
return self.__form_item.renderer(
'/formish/form/%s.html' % self.__property_name,
{'form': self.__form_item})
# So, we're a field (doesn't seem to support container form items yet).
widget_type, widget = self.__form_item.widget.template.split('.')
name = '%s/%s'%(widget_type,self.__property_name)
vars = {'field':self.__form_item}
return fall_back_renderer(self.__form_item.form.renderer, name, widget, vars)
class Field(object):
"""
A wrapper for a schema field type that includes form information.
The Schema Type Atribute does not have any bindings to the form library, it can be
used on it's own. We bind the Schema Attribute to a Field in order to include form
related information.
:method __call__: returns a serialisation for this field using the form's renderer - read only
"""
type = 'field'
def __init__(self, name, attr, form):
"""
:arg name: Name for the field
:arg attr: Schema attr to bind to the field
:type attr: schemaish.attr.*
:param form: The form the field belongs to.
:type form: formish.Form instance.
"""
self.name = name
self.nodename = name.split('.')[-1]
self.attr = attr
self.form = form
def __repr__(self):
return 'formish.Field(name=%r, attr=%r)'% (self.name, self.attr)
@property
def title(self):
""" The Field schema's title - derived from name if not specified """
try:
return self.form.get_item_data(self.name,'title')
except KeyError:
if self.attr.title is not None:
return self.attr.title
else:
return util.title_from_name(self.name.split('.')[-1])
def description():
def get(self):
try:
return self.form.get_item_data(self.name, 'description')
except KeyError:
return self.attr.description
return RenderableProperty('description', get)
description = description()
@property
def cssname(self):
""" cssname identifier for the field """
return _cssname(self)
@property
def classes(self):
""" Works out a list of classes that should be applied to the field """
return _classes(self)
@property
def value(self):
"""Convert the request_data to a value object for the form or None."""
if '*' in self.name:
return self.widget.to_request_data(self, self.defaults)
return self.form.request_data.get(self.name, None)
@property
def required(self):
""" Does this field have a Not Empty validator of some sort """
return validatish.validation_includes(self.attr.validator, validatish.Required)
@property
def defaults(self):
"""Get the defaults from the form."""
if '*' not in self.name:
defaults = self.form.defaults.get(self.name, None)
else:
defaults = self.form.get_item_data(self.name, 'default', None)
return defaults
@property
def error(self):
""" Lazily get the error from the form.errors when needed """
error = self.form.errors.get(self.name, None)
if error is not None:
val = str(error)
else:
val = ''
return TemplatedString(self, 'error', val)
def _get_errors(self):
""" Lazily get the error from the form.errors when needed """
return self.form.errors.get(self.name, None)
def _set_errors(self, v):
self.form.errors[self.name] = v
errors = property(_get_errors, _set_errors)
@property
def widget(self):
""" return the fields widget bound with extra params. """
# Loop on the name to work out if any '*' widgets are used
try:
widget_type = self.form.get_item_data(starify(self.name),'widget')
except KeyError:
if self.attr.type == 'Boolean':
if self.required is True:
widget_type = widgets.Checkbox()
else:
widget_type = widgets.RadioChoice(options = [(True,'True'),(False,'False')], none_option=(None,'None'))
elif self.attr.type == 'File':
widget_type = widgets.FileUpload()
else:
widget_type = widgets.Input()
self.form.set_item_data(starify(self.name),'widget',widget_type)
return BoundWidget(widget_type, self)
@property
def contains_error(self):
""" Check to see if any child elements have errors """
for k in self.form.errors.keys():
if k != self.name and k.startswith(self.name):
return True
return False
@property
def contained_errors(self):
contained_errors = []
for k in self.form.errors.keys():
if k.startswith(self.name):
contained_errors.append( (k[len(self.name)+1:],self.form.errors[k]) )
return contained_errors
def __call__(self):
""" returns a serialisation for this field using the form's renderer """
widget_type, widget = self.widget.template.split('.')
renderer = self.form.renderer
name = 'field/main'
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def label(self):
widget_type, widget = self.widget.template.split('.')
""" returns the templated title """
renderer = self.form.renderer
name = 'field/label'
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def seqdelete(self):
widget_type, widget = self.widget.template.split('.')
""" creates a seq delete hook if this is an item in a updateable sequence """
parentkey = '.'.join(self.name.split('.')[:-1])
if not parentkey:
return ''
parent = self.form.get_field(parentkey)
if getattr(parent.widget,'addremove',False) == False:
return ''
renderer = self.form.renderer
name = 'field/seqdelete'
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def seqgrab(self):
widget_type, widget = self.widget.template.split('.')
""" creates a seq grab hook if this is an item in a updateable sequence """
parentkey = '.'.join(self.name.split('.')[:-1])
if not parentkey:
return ''
parent = self.form.get_field(parentkey)
if getattr(parent.widget,'sortable',False) == False:
return ''
renderer = self.form.renderer
name = 'field/seqgrab'
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def inputs(self):
""" returns the templated widget """
widget_type, widget = self.widget.template.split('.')
renderer = self.form.renderer
name = 'field/inputs'
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
class CollectionFieldsWrapper(ObjectWrapper):
"""
Allow fields attr of a form to be accessed (as a generator) but also callable
"""
collection = None
def __init__(self, collection):
ObjectWrapper.__init__(self, iter(collection.collection_fields()))
self.collection = collection
def __call__(self):
widget_type, widget = self.collection.widget.template.split('.')
renderer = self.collection.form.renderer
name = '%s/fields'%widget_type
vars = {'field':self.collection}
return fall_back_renderer(renderer, name, widget, vars)
class Collection(object):
"""
A wrapper for a schema group type that includes form information.
The Schema structure does not have any bindings to the form library, it can
be used on it's own. We bind the schema Structure Attribute to a Group
which includes form information.
"""
type = None
def __init__(self, name, attr, form):
"""
:arg name: Name for the Collection
:arg attr: Schema attr to bind to the field
:type attr: schemaish.attr.*
:param form: The form the field belongs to.
:type form: formish.Form instance.
"""
self.name = name
if name is not None:
self.nodename = name.split('.')[-1]
else:
self.nodename = ''
self.attr = attr
self.form = form
self._fields = {}
# Construct a title
self.title = self.attr.title
if self.title is None and name is not None:
self.title = util.title_from_name(self.name.split('.')[-1])
@property
def template_type(self):
""" Returns the template type to use for this item """
if self.attr.type == 'Structure':
name = 'structure'
elif self.attr.type == 'Sequence' and self.widget.type == 'SequenceDefault':
name = 'sequence'
else:
name = 'field'
return name
def description():
def get(self):
return self.attr.description
return RenderableProperty('description', get)
description = description()
@property
def cssname(self):
""" Works out a list of classes that can be applied to the field """
return _cssname(self)
@property
def classes(self):
"""
Works out a list of classes that can be applied to the field """
return _classes(self)
@property
def value(self):
"""Convert the request_data to a value object for the form or None."""
return self.form.request_data.get(self.name, [''])
@property
def required(self):
""" Does this field have a Not Empty validator of some sort """
return validatish.validation_includes(self.attr.validator, validatish.Required)
@property
def defaults(self):
"""Get the defaults from the form."""
defaults = self.form.defaults.get(self.name, None)
return defaults
@property
def error(self):
""" Lazily get the error from the form.errors when needed """
val = self.form.errors.get(self.name, None)
return TemplatedString(self, 'error', val)
def _get_errors(self):
""" Lazily get the error from the form.errors when needed """
return self.form.errors.get(self.name, None)
def _set_errors(self, v):
self.form.errors[self.name] = v
errors = property(_get_errors, _set_errors)
@property
def contains_error(self):
""" Check to see if any child elements have errors """
for k in self.form.errors.keys():
if k != self.name and k.startswith(self.name):
return True
return False
@property
def contained_errors(self):
contained_errors = []
for k in self.form.errors.keys():
if k.startswith(self.name):
contained_errors.append( (k[len(self.name)+1:],self.form.errors[k]) )
return contained_errors
@property
def widget(self):
""" return the fields widget bound with extra params. """
try:
w = self.form.get_item_data(starify(self.name),'widget')
if not isinstance(w, BoundWidget):
widget_type = BoundWidget(self.form.get_item_data(starify(self.name),'widget'),self)
else:
widget_type = w
except KeyError:
if self.type == 'group':
widget_type = BoundWidget(widgets.StructureDefault(),self)
else:
widget_type = BoundWidget(widgets.SequenceDefault(),self)
self.form.set_item_data(starify(self.name),'widget',widget_type)
return widget_type
def get_field(self, name):
""" recursively get dotted field names """
segments = name.split('.')
if segments[0] == '*':
# Bind '*' to a fake field.
field = self.bind('*', self.attr)
else:
# Find named field.
for field in self.fields:
if field.name.split('.')[-1] == segments[0]:
break
else:
field = None
# Raise a nice error if the field was not found.
if not field:
raise KeyError('%s %r has no field %r' % (self.__class__.__name__,
self.name, segments[0]))
# Return field or recurse.
if len(segments) == 1:
return field
else:
return field.get_field('.'.join(segments[1:]))
def __getitem__(self, key):
return FormAccessor(self.form, '%s.%s'% (self.name, key))
@property
def attrs(self):
""" The schemaish attrs below this collection """
return self.attr.attrs
def collection_fields(self):
for attr in self.attrs:
yield self.bind(attr[0], attr[1])
@property
def fields(self):
"""
Iterate through the fields, lazily bind the schema to the fields
before returning.
"""
return CollectionFieldsWrapper(self)
@property
def allfields(self):
fields = []
for field in self.fields:
if hasattr(field,'allfields'):
fields.extend( field.allfields )
else:
fields.append(field)
return fields
def bind(self, attr_name, attr):
"""
return cached bound schema as a field; Otherwise bind the attr to a
Group or Field as appropriate and store on the _fields cache
:param attr_name: Form Field/Group identifier
:type attr_name: Python identifier string
:param attr: Attribute to bind
:type attr: Schema attribute
"""
try:
return self._fields[attr_name]
except KeyError:
if self.name is None:
keyprefix = attr_name
else:
keyprefix = '%s.%s'% (self.name, attr_name)
if isinstance(attr, schemaish.Sequence):
bound_field = Sequence(keyprefix, attr, self.form)
elif isinstance(attr, schemaish.Structure):
bound_field = Group(keyprefix, attr, self.form)
else:
bound_field = Field(keyprefix, attr, self.form)
self._fields[attr_name] = bound_field
return bound_field
def __call__(self):
""" returns a serialisation for this field using the form's renderer """
widget_type, widget = self.widget.template.split('.')
renderer = self.form.renderer
name = '%s/main'%widget_type
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def label(self):
""" returns the templated title """
widget_type, widget = self.widget.template.split('.')
renderer = self.form.renderer
name = '%s/label'%widget_type
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def seqgrab(self):
widget_type, widget = self.widget.template.split('.')
""" creates a seq grab hook if this is an item in a updateable sequence """
parentkey = '.'.join(self.name.split('.')[:-1])
if not parentkey:
return ''
parent = self.form.get_field(parentkey)
if getattr(parent.widget,'sortable',False) == False:
return ''
renderer = self.form.renderer
name = 'field/seqgrab'
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def seqdelete(self):
widget_type, widget = self.widget.template.split('.')
""" creates a seq delete hook if this is an item in a updateable sequence """
parentkey = '.'.join(self.name.split('.')[:-1])
if not parentkey:
return ''
parent = self.form.get_field(parentkey)
if getattr(parent.widget,'addremove',False) == False:
return ''
renderer = self.form.renderer
name = 'field/seqdelete'
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def inputs(self):
""" returns the templated widget """
widget_type, widget = self.widget.template.split('.')
renderer = self.form.renderer
name = '%s/inputs'%widget_type
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
def __repr__(self):
return 'formish.%s(name=%r, attr=%r)'% (self.type.title(), self.name, self.attr)
class Group(Collection):
"""
A group is a basic collection with a different template
"""
type = 'group'
template = 'structure'
class Sequence(Collection):
"""
A sequence is a collection with a variable number of fields depending on request data, data or min/max values
"""
type = 'sequence'
template = 'sequence'
@property
def num_fields(self):
if not self.form._request_data:
if self.defaults is not None:
num_fields = len(self.defaults)
else:
num_fields = 0
else:
num_fields = len(self.form._request_data.get(self.name, []))
return num_fields
def collection_fields(self):
"""
For sequences we check to see if the name is numeric. As names cannot be numeric normally, the first iteration loops
on a fields values and spits out a
"""
# Work out how many fields are in the sequence.
# XXX We can't use self.form.request_data here because to build the
# request_data we need to recurse throught the fields ... which calls
# Sequence.fields ... which tries to build the request data ... which
# calls Sequence.fields, etc, etc. Bang!
if not self.form._request_data:
if self.defaults is not None:
num_fields = len(self.defaults)
else:
num_fields = 0
num_nonempty_fields = 0
if self.widget is not None:
empty_checker = self.widget.empty_checker
if self.defaults is not None:
for n,d in enumerate(self.defaults):
if not empty_checker(d):
num_nonempty_fields=n+1
min_start_fields = None
min_empty_start_fields = None
if self.widget is not None:
min_start_fields = getattr(self.widget, 'min_start_fields', None)
min_empty_start_fields = getattr(self.widget, 'min_empty_start_fields', None)
if min_start_fields is not None and num_fields < min_start_fields:
num_fields = min_start_fields
if min_empty_start_fields is not None and (num_fields - num_nonempty_fields) < min_empty_start_fields:
num_fields = num_nonempty_fields + min_empty_start_fields
else:
num_fields = len(self.form._request_data.get(self.name, []))
for num in xrange(num_fields):
field = self.bind(num, self.attr.attr)
yield field
@property
def template(self):
return self.bind('*', self.attr.attr)
def metadata(self):
""" returns the metadata """
widget_type, widget = self.widget.template.split('.')
renderer = self.form.renderer
name = '%s/metadata'%widget_type
vars = {'field':self}
return fall_back_renderer(renderer, name, widget, vars)
class BoundWidget(object):
"""
Because widget's need to be able to render themselves
"""
def __init__(self, widget, field):
if hasattr(field.form,'empty'):
widget.empty = field.form.empty
self.__dict__['widget'] = widget
self.__dict__['field'] = field
def __getattr__(self, name):
return getattr(self.widget, name)
def __setattr__(self, name, value):
setattr(self.widget, name, value)
def __call__(self, **kw):
widget_type, widget = self.widget.template.split('.')
if self.widget.readonly == True:
widget_template = 'readonly'
else:
widget_template = 'widget'
vars = {'field':self.field}
vars.update(kw)
return self.field.form.renderer('/formish/widgets/%s/%s.html'%(widget, widget_template), vars)
def __repr__(self):
return 'BoundWidget(widget=%r, field=%r)'%(self.widget, self.field)
class FormFieldsWrapper(ObjectWrapper):
"""
Allow fields attr of a form to be accessed (as a generator) but also callable
"""
form = None
def __init__(self, form):
self.form = form
ObjectWrapper.__init__(self, form.structure.fields)
def keys(self):
keys = []
for f in self.form.fields:
keys.append(f.name)
return keys
def __call__(self,fields=None):
return self.form.renderer('/formish/form/fields.html', {'form':self.form,'fields':fields})
def tryint(v):
try:
return int(v)
except ValueError:
return v
class ErrorDict(dict):
def __init__(self, form):
self.form = form
def keys(self):
return list(self.__iter__())
def iteritems(self):
for key in self:
yield (key, self[key])
def items(self):
return list(self.iteritems())
def __iter__(self):
for field in self.form.allfields:
if field.name in self:
fieldvalue = self[field.name]
yield fieldvalue
if isinstance(fieldvalue, dict):
keys = []
for k in fieldvalue.keys():
keys.append( [tryint(k) for k in k.split('.')] )
keys.sort()
for k in keys:
yield '.'.join(k)
class Form(object):
"""
The definition of a form
The Form type is the container for all the information a form needs to
render and validate data.
"""
SUPPORTED_METHODS = ['get', 'post']
renderer = _default_renderer
_name = None
_request_data = None
base_classes = ['formish-form']
def __init__(self, structure, name=None, defaults=None, errors=None,
action_url=None, renderer=None, method='post',
add_default_action=True, include_charset=True,
empty=UNSET, error_summary=None,error_summary_message=None,classes=None):
"""
Create a new form instance
:arg structure: Schema Structure attribute to bind to the the form
:type structure: schemaish.Structure
:arg name: Optional form name used to identify multiple forms on the same page
:type name: str "valid html id"
:arg defaults: Default values for the form
:type defaults: dict
:arg errors: Errors to store on the form for redisplay
:type errors: dict
:arg action_url: Use if you don't want the form to post to itself
:type action_url: string "url or path"
:arg renderer: Something that returns a form serialization when called
:type renderer: callable
:arg method: Option method, default POST
:type method: string
:arg error_summary: None, 'message', 'list'
:type error_summary: string
"""
if method.lower() not in self.SUPPORTED_METHODS:
raise ValueError("method must be one of GET or POST")
# allow a single schema items to be used on a form
if not isinstance(structure, schemaish.Structure):
structure = schemaish.Structure([structure])
self.structure = Group(None, structure, self)
self.item_data = {}
self.name = name
if defaults is None:
defaults = self.structure.attr.default or {}
if errors is None:
errors = ErrorDict(self)
self.defaults = defaults
self.errors = errors
self._alert = None
self.error_summary = error_summary
self.error_summary_message = error_summary_message
self._actions = []
if classes is not None:
if isinstance(classes, basestring):
self.classes = self.base_classes + [classes]
else:
self.classes = self.base_classes + classes
else:
self.classes = list(self.base_classes)
if add_default_action:
self.add_action( None, 'Submit' )
self.action_url = action_url
if renderer is not None:
self.renderer = renderer
self.method = method
self.widget = widgets.StructureDefault()
self.include_charset = include_charset
if empty is not UNSET:
self.empty = empty
def alert():
def get(self):
return self._alert
def set(self, alert):
self._alert = alert
return RenderableProperty('alert', get, set)
alert = alert()
@staticmethod
def _error_warning():
warnings.warn("Form.error has been deprecated and will change behaviour "
"in the future. Please use Form.alert to display a general "
"form error message.",
DeprecationWarning, stacklevel=3)
def _set_error(self, error):
self._error_warning()
self.alert = error
def _get_error(self):
self._error_warning()
return self.alert
error = property(_get_error, _set_error)
def __repr__(self):
attributes = []
attributes.append('%r'%self.structure.attr)
attributes.append('name=%r'%self.name)
if self.defaults._o != {}:
attributes.append('defaults=%r'%self.defaults._o)
if self.errors != {}:
attributes.append('errors=%r'%self.errors)
if self.action_url:
attributes.append('action_url=%r'%self.action_url)
return 'formish.Form(%s)'%( ', '.join(attributes) )
def add_action(self, name=None, value=None, callback=None):
"""
Add an action callable to the form
:arg callback: A function to call if this action is triggered
:type callback: callable
:arg name: The identifier for this action
:type name: string
:arg label: Use this label instead of the form.name for the value of
the action (for buttons, the value is used as the text on the button)
:type label: string
"""
if name and name in [action.name for action in self._actions]:
raise ValueError('Action with name %r already exists.'% name)
self._actions.append( Action(name, value, callback) )
def action(self, request, *args):
"""
Find and call the action callback for the action found in the request
:arg request: request which is used to find the action and also passed through to
the callback
:type request: webob.Request
:arg args: list of arguments Pass through to the callback
"""
if len(self._actions)==0:
raise validation.NoActionError('The form does not have any actions')
request_data = getattr(request, self.method.upper())
for action in self._actions:
if action.name in request_data:
return action.callback(request, self, *args)
return self._actions[0].callback(request, self, *args)
def get_unvalidated_data(self, request_data, raise_exceptions=True, skip_read_only_defaults=False):
"""
Convert the request object into a nested dict in the correct structure
of the schema but without applying the schema's validation.
:arg request_data: Webob style request data
:arg raise_exceptions: Whether to raise exceptions or return errors
"""
data = self.widget.from_request_data(self.structure, request_data, skip_read_only_defaults=skip_read_only_defaults)
if raise_exceptions and len(self.errors.keys()):
raise validation.FormError( \
'Tried to access data but conversion from request failed with %s errors (%s)'% \
(len(self.errors.keys()), self.errors))
return data
def _get_request_data(self):
"""
Retrieve previously set request_data or return the defaults in
request_data format.
"""
if self._request_data is not None:
return dotted(self._request_data)
self._request_data = dotted(self.widget.to_request_data(self.structure, self._defaults))
return dotted(self._request_data)
def _set_request_data(self, request_data):
"""
Assign raw request data to the form
:arg request_data: raw request data (e.g. request.POST)
:type request_data: Dictionary (dotted or nested or dotted or MultiDict)
"""
self._request_data = dotted(request_data)
request_data = property(_get_request_data, _set_request_data)
def _get_defaults(self):
""" Get the raw default data """
return dotted(self._defaults)
def _set_defaults(self, data):
""" assign data """
self._defaults = data
self._request_data = None
defaults = property(_get_defaults, _set_defaults)
def _set_request(self, request):
"""
Assign raw request data to the form
:arg request_data: raw request data (e.g. request.POST)
:type request_data: Dictionary (dotted or nested or dotted or MultiDict)
"""
self._request = request
request_data = getattr(request, self.method.upper())
# Decode request data according to the request's charset.
request_data = UnicodeMultiDict(request_data,
encoding=util.get_post_charset(request))
# Remove the sequence factory data from the request
for k in request_data.keys():
if '*' in k:
request_data.pop(k)
# We need the _request_data to be populated so sequences know how many
# items they have (i.e. .fields method on a sequence uses the number of
# values on the _request_data)
# Convert request data to a dottedish friendly representation
request_data = _unflatten_request_data(request_data)
self._request_data = dotted(request_data)
self._request_data = dotted(self.widget.pre_parse_incoming_request_data(self.structure,request_data))
def _get_request(self):
return self._request
request = property(_get_request, _set_request)
def name_from_request(self, request):
request_data = getattr(request, self.method.upper())
return request_data.get('__formish_form__')
def validate(self, request, failure_callable=None, success_callable=None, skip_read_only_defaults=False, check_form_name=True):
"""
Validate the form data in the request.
By default, this method returns either a dict of data or raises an
exception if validation fails. However, if either success_callable or
failure_callable are provided then the approriate callback will be
called, and the callback's result will be returned instead.
:arg request: the HTTP request
:type request: webob.Request
:arg failure_callable: Optional callback to call on failure.
:arg success_callable: Optional callback to call on success.
:returns: Python dict of converted and validated data.
:raises: formish.FormError, raised on validation failure.
"""
# Check this request was submitted by this form.
self.request = request
if check_form_name == True and (self.name != self.name_from_request(request)):
raise Exception("request does not match form name")
try:
data = self._validate(request, skip_read_only_defaults=skip_read_only_defaults)
except validation.FormError:
if failure_callable is None:
raise
else:
return failure_callable(request, self)
if success_callable is None:
return data
else:
return success_callable(request, data)
def _validate(self, request, skip_read_only_defaults=False):
"""
Get the data without raising exceptions and then validate the data. If
there are errors, raise them; otherwise return the data
"""
# XXX Should this happen after the basic stuff has happened?
self.errors = {}
data = self.get_unvalidated_data(self._request_data, raise_exceptions=False, skip_read_only_defaults=skip_read_only_defaults)
try:
self.structure.attr.validate(data)
except schemaish.attr.Invalid, e:
for key, value in e.error_dict.items():
if key not in self.errors:
self.errors[key] = value
if len(self.errors.keys()) > 0:
err_msg = 'Tried to access data but conversion from request failed with %s errors'
raise validation.FormError(err_msg% (len(self.errors.keys())))
return data
def set_item_data(self, key, name, value):
"""
Allow the setting os certain attributes on item_data, a dictionary used
to associates data with fields.
"""
allowed = ['title', 'widget', 'description','default']
if name in allowed:
if name == 'default' and '*' not in key:
dottedish_set(self.defaults,key,value,container_factory=container_factory)
else:
self.item_data.setdefault(key, {})[name] = value
else:
raise KeyError('Cannot set data onto this attribute')
def get_item_data(self, key, name, default=UNSET):
"""
Access item data associates with a field key and an attribute name
(e.g. title, widget, description')
"""
if default is UNSET:
data = self.item_data.get(key, {})[name]
else:
data = self.item_data.get(key, {}).get(name, default)
return data
def get_item_data_values(self, name=None):
"""
get all of the item data values
"""
data = dotted({})
for key, value in self.item_data.items():
if name is not None and value.has_key(name):
data[key] = value[name]
else:
data[key] = value
return data
def __getitem__(self, key):
return FormAccessor(self, key)
@property
def fields(self):
"""
Return a generator that yields all of the fields at the top level of
the form (e.g. if a field is a subsection or sequence, it will be up to
the application to iterate that field's fields.
"""
return FormFieldsWrapper(self)
def _has_upload_fields(self):
for f in self.allfields:
if isinstance(f.attr, schemaish.File):
return True
return False
@property
def allfields(self):
"""
"""
fields = []
for field in self.fields:
if hasattr(field,'allfields'):
fields.extend( field.allfields )
else:
fields.append(field)
return fields
def get_field(self, name):
"""
Get a field by dotted field name
:arg name: Dotted name e.g. names.0.firstname
"""
# XXX GET FIELD NEEDS TO CACHE THE * FIELDS
segments = name.split('.')
for field in self.fields:
if segments[0] == '*':
b = self.bind('*',field.attr)
if len(segments) == 1:
return b
else:
return b.get_field('.'.join(segments[1:]))
if field.name.split('.')[-1] == segments[0]:
if len(segments) == 1:
return field
else:
return field.get_field('.'.join(segments[1:]))
def __call__(self):
"""
Calling the Form generates a serialisation using the form's renderer
"""
return self.renderer('/formish/form/main.html', {'form':self})
def header(self):
""" Return just the header part of the template """
return self.renderer('/formish/form/header.html', {'form':self})
def footer(self):
""" Return just the footer part of the template """
return self.renderer('/formish/form/footer.html', {'form':self})
def metadata(self):
""" Return just the metada part of the template """
return self.renderer('/formish/form/metadata.html', {'form':self})
def error_list(self):
""" Return just the metada part of the template """
return self.renderer('/formish/form/error_list.html', {'form':self})
def actions(self):
""" Return just the actions part of the template """
return self.renderer('/formish/form/actions.html', {'form':self})
def _unflatten_request_data(request_data):
"""
Unflatten the request data into nested dicts and lists.
"""
# Build an ordered list of keys. Don't rely on the request_data doing this
# for us because webob's MultiDict yields the same key multiple times!
# Of course, if request_data is not an ordered dict then this is fairly
# pointless anyway.
keys = []
for key in request_data:
if key not in keys:
keys.append(key)
return unflatten(((key, request_data.getall(key)) for key in keys),
container_factory=container_factory)
class FormAccessor(object):
"""
Helps in setting item_data on a form
:arg form: The form instance we're setting data on
:arg key: The dotted key of the field we want to set/get an attribute on e.g. ['x.y']
:arg prefix: A prefix used internally for recursion and allowing ['x']['y'] type access
"""
def __init__(self, form, key, prefix=None):
self.__dict__['form'] = form
if prefix is not None:
self.__dict__['key'] = '%s.%s'% (prefix, key)
else:
self.__dict__['key'] = key
def __setattr__(self, name, value):
self.form.set_item_data(self.key, name, value)
def __getattr__(self, name):
field = self.form.get_field(self.key)
if name == 'field':
return field
else:
return getattr(field, name)
def __getitem__(self, key):
return FormAccessor(self.form, key, prefix=self.key)
def __call__(self):
return self.form.get_field(self.key)()
| |
# encoding: utf-8
'''
A non-blocking REST API for Salt
================================
.. py:currentmodule:: salt.netapi.rest_tornado.saltnado
:depends: - tornado Python module
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
In order to run rest_tornado with the salt-master
add the following to the Salt master config file.
.. code-block:: yaml
rest_tornado:
# can be any port
port: 8000
# address to bind to (defaults to 0.0.0.0)
address: 0.0.0.0
# socket backlog
backlog: 128
ssl_crt: /etc/pki/api/certs/server.crt
# no need to specify ssl_key if cert and key
# are in one single file
ssl_key: /etc/pki/api/certs/server.key
debug: False
disable_ssl: False
webhook_disable_auth: False
.. _rest_tornado-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`SaltAuthHandler` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
.. seealso:: You can bypass the session handling via the :py:class:`RunSaltAPIHandler` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request.
.. glossary::
lowstate
A dictionary containing various keys that instruct Salt which command
to run, where that command lives, any parameters for that command, any
authentication credentials, what returner to use, etc.
Salt uses the lowstate data format internally in many places to pass
command data between functions. Salt also uses lowstate for the
:ref:`LocalClient() <python-api>` Python API interface.
The following example (in JSON format) causes Salt to execute two commands::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
Multiple commands in a Salt API request will be executed in serial and makes
no gaurantees that all commands will run. Meaning that if test.fib (from the
example above) had an exception, the API would still execute "jobs.lookup_jid".
Responses to these lowstates are an in-order list of dicts containing the
return data, a yaml response could look like::
- ms-1: true
ms-2: true
- ms-1: foo
ms-2: bar
In the event of an exception while executing a command the return for that lowstate
will be a string, for example if no minions matched the first lowstate we would get
a return like::
- No minions matched the target. No command was sent, no jid was assigned.
- ms-1: true
ms-2: true
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~SaltAuthHandler`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
.. |500| replace:: internal server error
'''
# pylint: disable=W0232
# Import Python libs
from __future__ import absolute_import
import time
import math
import fnmatch
import logging
from copy import copy
from collections import defaultdict
# pylint: disable=import-error
import yaml
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.gen
from tornado.concurrent import Future
from zmq.eventloop import ioloop, zmqstream
import salt.ext.six as six
# pylint: enable=import-error
# instantiate the zmq IOLoop (specialized poller)
ioloop.install()
# salt imports
import salt.netapi
import salt.utils
import salt.utils.event
from salt.utils.event import tagify
import salt.client
import salt.runner
import salt.auth
from salt.exceptions import EauthAuthenticationError
json = salt.utils.import_json()
logger = logging.getLogger()
# The clients rest_cherrypi supports. We want to mimic the interface, but not
# necessarily use the same API under the hood
# # all of these require coordinating minion stuff
# - "local" (done)
# - "local_async" (done)
# - "local_batch" (done)
# # master side
# - "runner" (done)
# - "wheel" (need async api...)
class SaltClientsMixIn(object):
'''
MixIn class to container all of the salt clients that the API needs
'''
# TODO: load this proactively, instead of waiting for a request
__saltclients = None
@property
def saltclients(self):
if SaltClientsMixIn.__saltclients is None:
local_client = salt.client.get_local_client(mopts=self.application.opts)
# TODO: refreshing clients using cachedict
SaltClientsMixIn.__saltclients = {
'local': local_client.run_job,
# not the actual client we'll use.. but its what we'll use to get args
'local_batch': local_client.cmd_batch,
'local_async': local_client.run_job,
'runner': salt.runner.RunnerClient(opts=self.application.opts).async,
'runner_async': None, # empty, since we use the same client as `runner`
}
return SaltClientsMixIn.__saltclients
AUTH_TOKEN_HEADER = 'X-Auth-Token'
AUTH_COOKIE_NAME = 'session_id'
class TimeoutException(Exception):
pass
class Any(Future):
'''
Future that wraps other futures to "block" until one is done
'''
def __init__(self, futures): # pylint: disable=E1002
super(Any, self).__init__()
for future in futures:
future.add_done_callback(self.done_callback)
def done_callback(self, future):
# Any is completed once one is done, we don't set for the rest
if not self.done():
self.set_result(future)
class EventListener(object):
'''
Class responsible for listening to the salt master event bus and updating
futures. This is the core of what makes this async, this allows us to do
non-blocking work in the main processes and "wait" for an event to happen
'''
def __init__(self, mod_opts, opts):
self.mod_opts = mod_opts
self.opts = opts
self.event = salt.utils.event.get_event(
'master',
opts['sock_dir'],
opts['transport'],
opts=opts,
listen=True,
)
# tag -> list of futures
self.tag_map = defaultdict(list)
# request_obj -> list of (tag, future)
self.request_map = defaultdict(list)
# map of future -> timeout_callback
self.timeout_map = {}
self.stream = zmqstream.ZMQStream(
self.event.sub,
io_loop=tornado.ioloop.IOLoop.current(),
)
self.stream.on_recv(self._handle_event_socket_recv)
def clean_timeout_futures(self, request):
'''
Remove all futures that were waiting for request `request` since it is done waiting
'''
if request not in self.request_map:
return
for tag, future in self.request_map[request]:
# timeout the future
self._timeout_future(tag, future)
# remove the timeout
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
del self.timeout_map[future]
del self.request_map[request]
def get_event(self,
request,
tag='',
callback=None,
timeout=None
):
'''
Get an event (async of course) return a future that will get it later
'''
# if the request finished, no reason to allow event fetching, since we
# can't send back to the client
if request._finished:
future = Future()
future.set_exception(TimeoutException())
return future
future = Future()
if callback is not None:
def handle_future(future):
tornado.ioloop.IOLoop.current().add_callback(callback, future)
future.add_done_callback(handle_future)
# add this tag and future to the callbacks
self.tag_map[tag].append(future)
self.request_map[request].append((tag, future))
if timeout:
timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, future)
self.timeout_map[future] = timeout_future
return future
def _timeout_future(self, tag, future):
'''
Timeout a specific future
'''
if tag not in self.tag_map:
return
if not future.done():
future.set_exception(TimeoutException())
self.tag_map[tag].remove(future)
if len(self.tag_map[tag]) == 0:
del self.tag_map[tag]
def _handle_event_socket_recv(self, raw):
'''
Callback for events on the event sub socket
'''
mtag, data = self.event.unpack(raw[0], self.event.serial)
# see if we have any futures that need this info:
for tag_prefix, futures in six.iteritems(self.tag_map):
if mtag.startswith(tag_prefix):
for future in futures:
if future.done():
continue
future.set_result({'data': data, 'tag': mtag})
self.tag_map[tag_prefix].remove(future)
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
del self.timeout_map[future]
# TODO: move to a utils function within salt-- the batching stuff is a bit tied together
def get_batch_size(batch, num_minions):
'''
Return the batch size that you should have
batch: string
num_minions: int
'''
# figure out how many we can keep in flight
partition = lambda x: float(x) / 100.0 * num_minions
try:
if '%' in batch:
res = partition(float(batch.strip('%')))
if res < 1:
return int(math.ceil(res))
else:
return int(res)
else:
return int(batch)
except ValueError:
print(('Invalid batch data sent: {0}\nData must be in the form'
'of %10, 10% or 3').format(batch))
class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylint: disable=W0223
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', yaml.safe_dump),
)
def _verify_client(self, client):
'''
Verify that the client is in fact one we have
'''
if client not in self.saltclients:
self.set_status(400)
self.write("400 Invalid Client: Client not found in salt clients")
self.finish()
def initialize(self):
'''
Initialize the handler before requests are called
'''
if not hasattr(self.application, 'event_listener'):
logger.critical('init a listener')
self.application.event_listener = EventListener(
self.application.mod_opts,
self.application.opts,
)
@property
def token(self):
'''
The token used for the request
'''
# find the token (cookie or headers)
if AUTH_TOKEN_HEADER in self.request.headers:
return self.request.headers[AUTH_TOKEN_HEADER]
else:
return self.get_cookie(AUTH_COOKIE_NAME)
def _verify_auth(self):
'''
Boolean whether the request is auth'd
'''
return self.token and bool(self.application.auth.get_tok(self.token))
def prepare(self):
'''
Run before get/posts etc. Pre-flight checks:
- verify that we can speak back to them (compatible accept header)
'''
# verify the content type
found = False
for content_type, dumper in self.ct_out_map:
if fnmatch.fnmatch(content_type, self.request.headers.get('Accept', '*/*')):
found = True
break
# better return message?
if not found:
self.send_error(406)
self.content_type = content_type
self.dumper = dumper
# do the common parts
self.start = time.time()
self.connected = True
self.lowstate = self._get_lowstate()
def timeout_futures(self):
'''
timeout a session
'''
# TODO: set a header or something??? so we know it was a timeout
self.application.event_listener.clean_timeout_futures(self)
def on_finish(self):
'''
When the job has been done, lets cleanup
'''
# timeout all the futures
self.timeout_futures()
def on_connection_close(self):
'''
If the client disconnects, lets close out
'''
self.finish()
def serialize(self, data):
'''
Serlialize the output based on the Accept header
'''
self.set_header('Content-Type', self.content_type)
return self.dumper(data)
def _form_loader(self, _):
'''
function to get the data from the urlencoded forms
ignore the data passed in and just get the args from wherever they are
'''
data = {}
for key, val in six.iteritems(self.request.arguments):
if len(val) == 1:
data[key] = val[0]
else:
data[key] = val
return data
def deserialize(self, data):
'''
Deserialize the data based on request content type headers
'''
ct_in_map = {
'application/x-www-form-urlencoded': self._form_loader,
'application/json': json.loads,
'application/x-yaml': yaml.safe_load,
'text/yaml': yaml.safe_load,
# because people are terrible and don't mean what they say
'text/plain': json.loads
}
try:
return ct_in_map[self.request.headers['Content-Type']](data)
except KeyError:
self.send_error(406)
except ValueError:
self.send_error(400)
def _get_lowstate(self):
'''
Format the incoming data into a lowstate object
'''
if not self.request.body:
return
data = self.deserialize(self.request.body)
self.raw_data = copy(data)
if self.request.headers.get('Content-Type') == 'application/x-www-form-urlencoded':
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
lowstate = [data]
else:
lowstate = data
return lowstate
class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223
'''
Handler for login requests
'''
def get(self):
'''
All logins are done over post, this is a parked enpoint
.. http:get:: /login
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 401 Unauthorized
Content-Type: application/json
Content-Length: 58
{"status": "401 Unauthorized", "return": "Please log in"}
'''
self.set_status(401)
self.set_header('WWW-Authenticate', 'Session')
ret = {'status': '401 Unauthorized',
'return': 'Please log in'}
self.write(self.serialize(ret))
# TODO: make async? Underlying library isn't... and we ARE making disk calls :(
def post(self):
'''
:ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:status 500: |500|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
try:
creds = {'username': self.get_arguments('username')[0],
'password': self.get_arguments('password')[0],
'eauth': self.get_arguments('eauth')[0],
}
# if any of the args are missing, its a bad request
except IndexError:
self.send_error(400)
return
token = self.application.auth.mk_token(creds)
if 'token' not in token:
# TODO: nicer error message
# 'Could not authenticate using provided credentials')
self.send_error(401)
# return since we don't want to execute any more
return
# Grab eauth config for the current backend for the current user
try:
perms = self.application.opts['external_auth'][token['eauth']][token['name']]
# If we can't find the creds, then they aren't authorized
except KeyError:
self.send_error(401)
return
except (AttributeError, IndexError):
logging.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
# TODO better error -- 'Configuration for external_auth could not be read.'
self.send_error(500)
return
ret = {'return': [{
'token': token['token'],
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
self.write(self.serialize(ret))
class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W0223
'''
Main API handler for base "/"
'''
def get(self):
'''
An enpoint to determine salt-api capabilities
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Legnth: 83
{"clients": ["local", "local_batch", "local_async", "runner", "runner_async"], "return": "Welcome"}
'''
ret = {"clients": list(self.saltclients.keys()),
"return": "Welcome"}
self.write(self.serialize(ret))
@tornado.web.asynchronous
def post(self):
'''
Send one or more Salt commands (lowstates) in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -si https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
-d arg
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&arg&client=local&tgt=*
**Example response:**
Responses are an in-order list of the lowstate's return data. In the
event of an exception running a command the return will be a string
instead of a mapping.
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
.. admonition:: multiple commands
Note that if multiple :term:`lowstate` structures are sent, the Salt
API will execute them in serial, and will not stop execution upon failure
of a previous job. If you need to have commands executed in order and
stop on failure please use compount-command-execution.
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
self.disbatch()
@tornado.gen.coroutine
def disbatch(self):
'''
Disbatch all lowstates to the appropriate clients
Auth must have been verified before this point
'''
ret = []
# check clients before going, we want to throw 400 if one is bad
for low in self.lowstate:
client = low.get('client')
self._verify_client(client)
for low in self.lowstate:
# make sure that the chunk has a token, if not we can't do auth per-request
# Note: this means that you can send different tokens per lowstate
# as long as the base token (to auth with the API) is valid
if 'token' not in low:
low['token'] = self.token
# disbatch to the correct handler
try:
chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low)
ret.append(chunk_ret)
except Exception as ex:
ret.append('Unexpected exception while handling request: {0}'.format(ex))
logger.error('Unexpected exception while handling request:', exc_info=True)
self.write(self.serialize({'return': ret}))
self.finish()
@tornado.gen.coroutine
def _disbatch_local_batch(self, chunk):
'''
Disbatch local client batched commands
'''
f_call = salt.utils.format_call(self.saltclients['local_batch'], chunk)
# ping all the minions (to see who we have to talk to)
# Don't catch any exception, since we won't know what to do, we'll
# let the upper level deal with this one
ping_ret = yield self._disbatch_local({'tgt': chunk['tgt'],
'fun': 'test.ping',
'expr_form': f_call['kwargs']['expr_form']})
chunk_ret = {}
if not isinstance(ping_ret, dict):
raise tornado.gen.Return(chunk_ret)
minions = list(ping_ret.keys())
maxflight = get_batch_size(f_call['kwargs']['batch'], len(minions))
inflight_futures = []
# override the expr_form
f_call['kwargs']['expr_form'] = 'list'
# do this batch
while len(minions) > 0 or len(inflight_futures) > 0:
# if you have more to go, lets disbatch jobs
while len(inflight_futures) < maxflight and len(minions) > 0:
minion_id = minions.pop(0)
batch_chunk = dict(chunk)
batch_chunk['tgt'] = [minion_id]
batch_chunk['expr_form'] = 'list'
future = self._disbatch_local(batch_chunk)
inflight_futures.append(future)
# if we have nothing to wait for, don't wait
if len(inflight_futures) == 0:
continue
# wait until someone is done
finished_future = yield Any(inflight_futures)
try:
b_ret = finished_future.result()
except TimeoutException:
break
chunk_ret.update(b_ret)
inflight_futures.remove(finished_future)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def _disbatch_local(self, chunk):
'''
Dispatch local client commands
'''
chunk_ret = {}
f_call = salt.utils.format_call(self.saltclients['local'], chunk)
# fire a job off
try:
pub_data = self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {}))
except EauthAuthenticationError:
raise tornado.gen.Return('Not authorized to run this job')
# if the job didn't publish, lets not wait around for nothing
# TODO: set header??
if 'jid' not in pub_data:
raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.')
# seed minions_remaining with the pub_data
minions_remaining = pub_data['minions']
syndic_min_wait = None
if self.application.opts['order_masters']:
syndic_min_wait = tornado.gen.sleep(self.application.opts['syndic_wait'])
job_not_running = self.job_not_running(pub_data['jid'],
chunk['tgt'],
f_call['kwargs']['expr_form'],
minions_remaining=minions_remaining
)
# if we have a min_wait, do that
if syndic_min_wait is not None:
yield syndic_min_wait
# we are completed when either all minions return or the job isn't running anywhere
chunk_ret = yield self.all_returns(pub_data['jid'],
finish_futures=[job_not_running],
minions_remaining=minions_remaining,
)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def all_returns(self,
jid,
finish_futures=None,
minions_remaining=None,
):
'''
Return a future which will complete once all returns are completed
(according to minions_remaining), or one of the passed in "finish_futures" completes
'''
if finish_futures is None:
finish_futures = []
if minions_remaining is None:
minions_remaining = []
ret_tag = tagify([jid, 'ret'], 'job')
chunk_ret = {}
while True:
ret_event = self.application.event_listener.get_event(self,
tag=ret_tag,
)
f = yield Any([ret_event] + finish_futures)
if f in finish_futures:
raise tornado.gen.Return(chunk_ret)
event = f.result()
chunk_ret[event['data']['id']] = event['data']['return']
# its possible to get a return that wasn't in the minion_remaining list
try:
minions_remaining.remove(event['data']['id'])
except ValueError:
pass
if len(minions_remaining) == 0:
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def job_not_running(self,
jid,
tgt,
tgt_type,
minions_remaining=None,
):
'''
Return a future which will complete once jid (passed in) is no longer
running on tgt
'''
if minions_remaining is None:
minions_remaining = []
ping_pub_data = self.saltclients['local'](tgt,
'saltutil.find_job',
[jid],
expr_form=tgt_type)
ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job')
minion_running = False
while True:
try:
event = yield self.application.event_listener.get_event(self,
tag=ping_tag,
timeout=self.application.opts['gather_job_timeout'],
)
except TimeoutException:
if not minion_running:
raise tornado.gen.Return(True)
else:
ping_pub_data = self.saltclients['local'](tgt,
'saltutil.find_job',
[jid],
expr_form=tgt_type)
ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job')
minion_running = False
continue
# Minions can return, we want to see if the job is running...
if event['data'].get('return', {}) == {}:
continue
minion_running = True
id_ = event['data']['id']
if id_ not in minions_remaining:
minions_remaining.append(event['data']['id'])
@tornado.gen.coroutine
def _disbatch_local_async(self, chunk):
'''
Disbatch local client_async commands
'''
f_call = salt.utils.format_call(self.saltclients['local_async'], chunk)
# fire a job off
pub_data = self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {}))
raise tornado.gen.Return(pub_data)
@tornado.gen.coroutine
def _disbatch_runner(self, chunk):
'''
Disbatch runner client commands
'''
f_call = {'args': [chunk['fun'], chunk]}
pub_data = self.saltclients['runner'](chunk['fun'], chunk)
tag = pub_data['tag'] + '/ret'
try:
event = yield self.application.event_listener.get_event(self, tag=tag)
# only return the return data
raise tornado.gen.Return(event['data']['return'])
except TimeoutException:
raise tornado.gen.Return('Timeout waiting for runner to execute')
@tornado.gen.coroutine
def _disbatch_runner_async(self, chunk):
'''
Disbatch runner client_async commands
'''
f_call = {'args': [chunk['fun'], chunk]}
pub_data = self.saltclients['runner'](chunk['fun'], chunk)
raise tornado.gen.Return(pub_data)
class MinionSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
A convenience endpoint for minion related functions
'''
@tornado.web.asynchronous
def get(self, mid=None): # pylint: disable=W0221
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
self.lowstate = [{
'client': 'local',
'tgt': mid or '*',
'fun': 'grains.items',
}]
self.disbatch()
@tornado.web.asynchronous
def post(self):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
# verify that all lowstates are the correct client type
for low in self.lowstate:
# if you didn't specify, its fine
if 'client' not in low:
low['client'] = 'local_async'
continue
# if you specified something else, we don't do that
if low.get('client') != 'local_async':
self.set_status(400)
self.write('We don\'t serve your kind here')
self.finish()
return
self.disbatch()
class JobsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
A convenience endpoint for job cache data
'''
@tornado.web.asynchronous
def get(self, jid=None): # pylint: disable=W0221
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
if jid:
self.lowstate = [{
'fun': 'jobs.list_job',
'jid': jid,
'client': 'runner',
}]
else:
self.lowstate = [{
'fun': 'jobs.list_jobs',
'client': 'runner',
}]
self.disbatch()
class RunSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
Endpoint to run commands without normal session handling
'''
@tornado.web.asynchronous
def post(self):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
self.disbatch()
class EventsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
@tornado.gen.coroutine
def get(self):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
# set the streaming headers
self.set_header('Content-Type', 'text/event-stream')
self.set_header('Cache-Control', 'no-cache')
self.set_header('Connection', 'keep-alive')
self.write(u'retry: {0}\n'.format(400))
self.flush()
while True:
try:
event = yield self.application.event_listener.get_event(self)
self.write(u'tag: {0}\n'.format(event.get('tag', '')))
self.write(u'data: {0}\n\n'.format(json.dumps(event)))
self.flush()
except TimeoutException:
break
class WebhookSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"'
.. seealso:: :ref:`events`, :ref:`reactor`
'''
def post(self, tag_suffix=None): # pylint: disable=W0221
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``http://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
disable_auth = self.application.mod_opts.get('webhook_disable_auth')
if not disable_auth and not self._verify_auth():
self.redirect('/login')
return
# if you have the tag, prefix
tag = 'salt/netapi/hook'
if tag_suffix:
tag += tag_suffix
# TODO: consolidate??
self.event = salt.utils.event.get_event(
'master',
self.application.opts['sock_dir'],
self.application.opts['transport'],
opts=self.application.opts,
listen=False)
ret = self.event.fire_event({
'post': self.raw_data,
'get': dict(self.request.query_arguments),
# In Tornado >= v4.0.3, the headers come
# back as an HTTPHeaders instance, which
# is a dictionary. We must cast this as
# a dictionary in order for msgpack to
# serialize it.
'headers': dict(self.request.headers),
}, tag)
self.write(self.serialize({'success': ret}))
| |
import time
from typing import Dict, List, Tuple
from testinfra.host import Host
from conftest import Email, Lines, Net, Vagrant, corresponding_hostname, for_host_types
class Test01GenericCore:
@for_host_types('pi', 'ubuntu')
def test_00_base_config(self, hostname: str, hosts: Dict[str, Host]) -> None:
lines = Lines(hosts[hostname].check_output('debconf-show locales'), hostname)
assert lines.contains(r'[^a-zA-Z]*locales/locales_to_be_generated: en_GB.UTF-8 UTF-8')
assert lines.contains(r'[^a-zA-Z]*locales/default_environment_locale: en_GB.UTF-8')
@for_host_types('pi', 'ubuntu')
def test_01_packages(self, hostname: str, hosts: Dict[str, Host]) -> None:
# We pick one of the packages that the script installs, that isn't installed by default.
assert hosts[hostname].package('etckeeper').is_installed
# 02-pi-new-user is a no-op in the testbed.
@for_host_types('pi', 'ubuntu')
def test_03_cleanup_users(self, hostname: str, hosts: Dict[str, Host]) -> None:
host = hosts[hostname]
# We pick the most important user - root - and a few that are changed
with host.sudo():
assert host.user('root').password == '!'
assert host.user('systemd-timesync').shell == '/usr/sbin/nologin'
assert host.user('systemd-timesync').password == '!*'
assert host.user('messagebus').shell == '/usr/sbin/nologin'
assert host.user('messagebus').password == '!*'
@for_host_types('pi', 'ubuntu')
def test_04_vars(self, hostname: str, hosts: Dict[str, Host], addrs: Dict[str, str]) -> None:
host = hosts[hostname]
assert host.file('/etc/pi-server/lan-ip').content_string.strip() == addrs[hostname]
assert host.file('/etc/pi-server/lan-iface').content_string.strip() == 'eth1'
assert host.file('/etc/pi-server/fqdn').content_string.strip() == hostname + '.testbed'
assert (host.file('/etc/pi-server/email-target').content_string.strip() ==
'fake@fake.testbed')
assert (host.file('/etc/pi-server/email-smtp-server').content_string.strip() ==
addrs['internet'])
assert host.file('/etc/pi-server/email-smtp-port').content_string.strip() == '1025'
# 05-network is tested by test_base's reachability and routing tests.
@for_host_types('pi', 'ubuntu')
def test_06_email(self, email: Email, hostname: str, hosts: Dict[str, Host]) -> None:
host = hosts[hostname]
client_ip = host.client_ip()
email.clear()
# SSH login emails are on by default, so we expect one email for logging in, and one for
# the command we actually ran.
host.check_output('/etc/pi-server/send-notification-email foo bar')
time.sleep(10)
email.assert_emails([
{
'from': 'notification@%s.testbed' % hostname,
'to': 'fake@fake.testbed',
'subject': '[%s] SSH login: vagrant from %s' % (hostname, client_ip),
'body_re': r'(.*\n)*PAM_USER=vagrant\nPAM_RHOST=%s\n(.*\n)*' % client_ip.replace(
'.', r'\.'),
},
{
'from': 'notification@%s.testbed' % hostname,
'to': 'fake@fake.testbed',
'subject': '[%s] foo' % hostname,
'body_re': r'bar\n\n',
},
], only_from=hostname)
# Disable SSH login emails from our address, and we should only get one email.
with host.disable_login_emails():
email.clear()
host.check_output('/etc/pi-server/send-notification-email foo bar')
time.sleep(10)
email.assert_emails([
{
'from': 'notification@%s.testbed' % hostname,
'to': 'fake@fake.testbed',
'subject': '[%s] foo' % hostname,
'body_re': r'bar\n\n',
},
], only_from=hostname)
# 07-sshd is partially tested by the fact we can still log in at all, and partially
# by the email-at-login behaviour.
@for_host_types('pi', 'ubuntu')
def test_08_firewall(
self, vagrant: Vagrant, net: Net, hostname: str, hosts: Dict[str, Host]) -> None:
host = hosts[hostname]
# Part 1 - test forwarding
allow_forwarding_initially = host.file('/etc/pi-server/firewall/allow-forwarding').exists
try:
# Forwarding disabled
with host.sudo():
host.check_output('rm -f /etc/pi-server/firewall/allow-forwarding')
vagrant.reboot(hostname)
with host.sudo():
host.check_output('iptables -C FORWARD -j DROP')
host.run_expect([1], 'iptables -C FORWARD -j ACCEPT')
# Forwarding enabled
with host.sudo():
host.check_output('touch /etc/pi-server/firewall/allow-forwarding')
vagrant.reboot(hostname)
with host.sudo():
host.check_output('iptables -C FORWARD -j ACCEPT')
host.run_expect([1], 'iptables -C FORWARD -j DROP')
finally:
# Restore initial state
with host.sudo():
if allow_forwarding_initially:
host.check_output('touch /etc/pi-server/firewall/allow-forwarding')
else:
host.check_output('rm -f /etc/pi-server/firewall/allow-forwarding')
vagrant.reboot(hostname)
# Part 2 - test port state manipulation
router = corresponding_hostname(hostname, 'router')
port_script = '/etc/pi-server/firewall/port'
def is_open(port: int, protocol: str) -> bool:
cmd = host.run('%s is-open %d %s' % (port_script, port, protocol))
return cmd.rc == 0 and cmd.stdout == 'Yes\n'
def is_closed(port: int, protocol: str) -> bool:
cmd = host.run('%s is-open %d %s' % (port_script, port, protocol))
return cmd.rc == 1 and cmd.stdout == 'No\n'
def opens_at_boot(port: int, protocol: str) -> bool:
cmd = host.run('%s opens-at-boot %d %s' % (port_script, port, protocol))
return cmd.rc == 0 and cmd.stdout == 'Yes\n'
def doesnt_open_at_boot(port: int, protocol: str) -> bool:
cmd = host.run('%s opens-at-boot %d %s' % (port_script, port, protocol))
return cmd.rc == 1 and cmd.stdout == 'No\n'
with host.shadow_file('/etc/pi-server/firewall/iptables-tcp-open-boot'), \
host.shadow_file('/etc/pi-server/firewall/iptables-udp-open-boot'):
# Base state - only hardcoded ports open at boot
vagrant.reboot(hostname)
net.assert_ports_open(
{router: {
hostname: {'tcp': {22}, 'udp': set()}}})
# Port validation
assert host.check_output('%s is-valid 0' % port_script) == 'Yes'
assert host.check_output('%s is-valid 1234' % port_script) == 'Yes'
assert host.check_output('%s is-valid 65535' % port_script) == 'Yes'
host.run_expect([1], '%s is-valid -1' % port_script)
host.run_expect([1], '%s is-valid 65536' % port_script)
host.run_expect([1], '%s is-valid a' % port_script)
host.run_expect([1], '%s is-valid 10a' % port_script)
host.run_expect([1], '%s is-valid' % port_script)
# Protocol validation
assert host.check_output('%s is-valid 1234 tcp' % port_script) == 'Yes'
assert host.check_output('%s is-valid 1234 udp' % port_script) == 'Yes'
host.run_expect([1], '%s is-valid 1234 foo' % port_script)
# Run servers on the ports we're interested in
host.check_output('tmux new-session -s s1 -d nc -l -p 1995')
host.check_output('tmux new-session -s s2 -d nc -l -k -u -p 1996')
host.check_output('tmux new-session -s s3 -d nc -l -p 1997')
host.check_output('tmux new-session -s s4 -d nc -l -p 1998')
host.check_output('tmux new-session -s s5 -d nc -l -k -u -p 1999')
# Base state
assert is_open(22, 'tcp') # hardcoded
assert is_closed(22, 'udp')
assert is_closed(1995, 'tcp')
assert is_closed(1996, 'udp')
assert is_closed(1997, 'tcp')
assert is_closed(1998, 'tcp')
assert is_closed(1999, 'udp')
assert doesnt_open_at_boot(22, 'tcp') # hardcoded
assert doesnt_open_at_boot(22, 'udp')
assert doesnt_open_at_boot(1995, 'tcp')
assert doesnt_open_at_boot(1996, 'udp')
assert doesnt_open_at_boot(1997, 'tcp')
assert doesnt_open_at_boot(1998, 'tcp')
assert doesnt_open_at_boot(1999, 'udp')
# Open and close at runtime
host.check_output('%s open 1997 tcp' % port_script)
host.check_output('%s open 1997 tcp' % port_script) # idempotent
host.check_output('%s open 1998 tcp' % port_script)
host.check_output('%s open 1999 udp' % port_script)
host.check_output('%s close 1997 tcp' % port_script)
assert is_open(22, 'tcp') # hardcoded
assert is_closed(22, 'udp')
assert is_closed(1995, 'tcp')
assert is_closed(1996, 'udp')
assert is_closed(1997, 'tcp')
assert is_open(1998, 'tcp')
assert is_open(1999, 'udp')
host.run_expect([1], '%s close 1997 tcp' % port_script) # extra close does nothing
assert is_open(22, 'tcp') # hardcoded
assert is_closed(22, 'udp')
assert is_closed(1995, 'tcp')
assert is_closed(1996, 'udp')
assert is_closed(1997, 'tcp')
assert is_open(1998, 'tcp')
assert is_open(1999, 'udp')
net.assert_ports_open({
router: {
hostname: {
'tcp': {22, 1998},
'udp': set(), # for some reason this can't detect the open UDP port
}
}
})
# File manipulation
assert doesnt_open_at_boot(22, 'tcp') # hardcoded
assert doesnt_open_at_boot(22, 'udp')
assert doesnt_open_at_boot(1995, 'tcp')
assert doesnt_open_at_boot(1996, 'udp')
assert doesnt_open_at_boot(1997, 'tcp')
assert doesnt_open_at_boot(1998, 'tcp')
assert doesnt_open_at_boot(1999, 'udp')
host.check_output('%s open-at-boot 1995 tcp' % port_script)
host.check_output('%s open-at-boot 1996 udp' % port_script)
host.check_output('%s open-at-boot 1997 tcp' % port_script)
host.check_output('%s open-at-boot 1997 tcp' % port_script) # idempotent
host.check_output('%s dont-open-at-boot 1997 tcp' % port_script)
assert doesnt_open_at_boot(22, 'tcp') # hardcoded
assert doesnt_open_at_boot(22, 'udp')
assert opens_at_boot(1995, 'tcp')
assert opens_at_boot(1996, 'udp')
assert doesnt_open_at_boot(1997, 'tcp')
assert doesnt_open_at_boot(1998, 'tcp')
assert doesnt_open_at_boot(1999, 'udp')
host.check_output(
'%s dont-open-at-boot 1997 tcp' % port_script) # extra don't open does nothing
assert doesnt_open_at_boot(22, 'tcp') # hardcoded
assert doesnt_open_at_boot(22, 'udp')
assert opens_at_boot(1995, 'tcp')
assert opens_at_boot(1996, 'udp')
assert doesnt_open_at_boot(1997, 'tcp')
assert doesnt_open_at_boot(1998, 'tcp')
assert doesnt_open_at_boot(1999, 'udp')
assert is_open(22, 'tcp') # hardcoded
assert is_closed(22, 'udp')
assert is_closed(1995, 'tcp')
assert is_closed(1996, 'udp')
assert is_closed(1997, 'tcp')
assert is_open(1998, 'tcp')
assert is_open(1999, 'udp')
# Check that open at boot works
vagrant.reboot(hostname)
host.check_output('tmux new-session -s s1 -d nc -l -p 1995')
host.check_output('tmux new-session -s s2 -d nc -l -k -u -p 1996')
host.check_output('tmux new-session -s s3 -d nc -l -p 1997')
host.check_output('tmux new-session -s s4 -d nc -l -p 1998')
host.check_output('tmux new-session -s s5 -d nc -l -k -u -p 1999')
assert doesnt_open_at_boot(22, 'tcp') # hardcoded
assert doesnt_open_at_boot(22, 'udp')
assert opens_at_boot(1995, 'tcp')
assert opens_at_boot(1996, 'udp')
assert doesnt_open_at_boot(1997, 'tcp')
assert doesnt_open_at_boot(1998, 'tcp')
assert doesnt_open_at_boot(1999, 'udp')
assert is_open(22, 'tcp') # hardcoded
assert is_closed(22, 'udp')
assert is_open(1995, 'tcp')
assert is_open(1996, 'udp')
assert is_closed(1997, 'tcp')
assert is_closed(1998, 'tcp')
assert is_closed(1999, 'udp')
net.assert_ports_open({
router: {
hostname: {
'tcp': {22, 1995},
'udp': set(), # for some reason this can't detect the open UDP port
}
}
})
# Restore original state
vagrant.reboot(hostname)
@for_host_types('pi', 'ubuntu')
def test_09_cron(self, hostname: str, hosts: Dict[str, Host], email: Email) -> None:
"""This tests the cron system, not any particular cronjob."""
host = hosts[hostname]
systemd_template = """[Unit]
Description=Fake service
After=network.target
[Service]
ExecStart=/bin/sleep 1h
Restart=always
User=root
Group=root
[Install]
WantedBy=multi-user.target
"""
with host.shadow_file('/etc/systemd/system/fake1.service') as fake1_file, \
host.shadow_file('/etc/systemd/system/fake2.service') as fake2_file, \
host.shadow_dir('/etc/pi-server/cron/cron-normal.d') as normal_dir, \
host.shadow_dir('/etc/pi-server/cron/cron-safe.d') as safe_dir, \
host.shadow_dir('/etc/pi-server/cron/pause-on-cron.d') as pause_dir, \
host.shadow_file('/etc/pi-server/cron/last-run.log') as safe_log_file, \
host.shadow_file('/cron-test-normal-output') as normal_out, \
host.disable_login_emails():
with host.sudo():
fake1_file.write(systemd_template)
fake2_file.write(systemd_template)
host.check_output('systemctl daemon-reload')
host.check_output('systemctl start fake1.service')
host.check_output('systemctl start fake2.service')
fake1_service = host.service('fake1.service')
fake2_service = host.service('fake2.service')
assert fake1_service.is_running
assert fake2_service.is_running
with host.sudo():
pause_dir.file('fake1.service').write('')
pause_dir.file('fake2.service').write('')
safe_cron1 = safe_dir.file('safe1')
safe_cron2 = safe_dir.file('safe2')
normal_cron = normal_dir.file('normal')
# Successful run
email.clear()
run_stamp = 'good'
with host.sudo():
safe_cron1.write('sleep 5\necho \'safe1 %s\' >> "${LOG}"' % run_stamp)
safe_cron2.write('sleep 5\necho \'safe2 %s\' >> "${LOG}"' % run_stamp)
normal_cron.write('#!/bin/bash\nsleep 5\necho \'normal %s\' > \'%s\'' % (
run_stamp, normal_out.path))
host.check_output('chmod a+x %s' % normal_cron.path)
with host.run_crons():
time.sleep(5)
assert not fake1_service.is_running
assert not fake2_service.is_running
assert fake1_service.is_running
assert fake2_service.is_running
safe_log = Lines(safe_log_file.content_string)
assert safe_log.contains(r'Stopping services...')
assert safe_log.contains(r'Stopped services$')
assert safe_log.contains(r"STARTED 'safe1' at .*")
assert safe_log.contains(r'safe1 good')
assert safe_log.contains(r"FINISHED 'safe1' at .*")
assert safe_log.contains(r"STARTED 'safe2' at .*")
assert safe_log.contains(r'safe2 good')
assert safe_log.contains(r"FINISHED 'safe2' at .*")
assert safe_log.contains(r'Starting services...')
assert safe_log.contains(r'Started services')
assert normal_out.content_string == 'normal good\n'
time.sleep(15)
email.assert_emails([], only_from=hostname)
# Run with failures
email.clear()
run_stamp = 'bad'
with host.sudo():
safe_cron1.write(
'sleep 5\necho \'safe1 %s\' >> "${LOG}"\necho \'safe1 echo\'' % run_stamp)
safe_cron2.write('sleep 5\necho \'safe2 %s\' >> "${LOG}"\nfalse' % run_stamp)
normal_cron.write(
'#!/bin/bash\nsleep 5\necho \'normal %s\' > \'%s\'\necho \'normal echo\'' %
(run_stamp, normal_out.path))
with host.run_crons():
time.sleep(5)
assert not fake1_service.is_running
assert not fake2_service.is_running
assert fake1_service.is_running
assert fake2_service.is_running
safe_log = Lines(safe_log_file.content_string)
assert safe_log.contains(r'Stopping services...')
assert safe_log.contains(r'Stopped services$')
assert safe_log.contains(r"STARTED 'safe1' at .*")
assert safe_log.contains(r'safe1 bad')
assert safe_log.contains(r"FINISHED 'safe1' at .*")
assert safe_log.contains(r"STARTED 'safe2' at .*")
assert safe_log.contains(r'safe2 bad')
assert not safe_log.contains(r"FINISHED 'safe2' at .*")
assert safe_log.contains(r"Couldn't run safe2\.")
assert safe_log.contains(r'Starting services...')
assert safe_log.contains(r'Started services')
assert normal_out.content_string == 'normal bad\n'
time.sleep(15)
email.assert_emails([
{
'from': 'notification@%s.testbed' % hostname,
'to': 'fake@fake.testbed',
'subject': '[%s] Cron failed' % hostname,
'body_re': r"Couldn't run safe2.\n\n",
},
{
'from': '',
'to': '',
'subject': (('Cron <root@%s> test -x /usr/sbin/anacron || '
'( cd / && run-parts --report /etc/cron.daily )') % hostname),
'body_re': r"/etc/cron.daily/pi-server:\nsafe1 echo\nnormal echo\n",
},
], only_from=hostname)
# Disable running
with host.shadow_file('/etc/pi-server/cron/cron-disabled'):
email.clear()
run_stamp = 'disabled'
with host.sudo():
safe_log_file.clear()
normal_out.clear()
safe_cron1.write('sleep 5\necho \'safe1 %s\' >> "${LOG}"' % run_stamp)
safe_cron2.write('sleep 5\necho \'safe2 %s\' >> "${LOG}"' % run_stamp)
normal_cron.write(
'#!/bin/bash\nsleep 5\necho \'normal %s\' > \'%s\'' % (
run_stamp, normal_out.path))
with host.run_crons():
pass
assert fake1_service.is_running
assert fake2_service.is_running
assert safe_log_file.content_string == '\n'
assert normal_out.content_string == '\n'
time.sleep(15)
email.assert_emails([], only_from=hostname)
# Cleanup
with host.sudo():
host.check_output('systemctl stop fake1.service')
host.check_output('systemctl stop fake2.service')
assert not fake1_service.is_running
assert not fake2_service.is_running
with host.sudo():
host.check_output('systemctl daemon-reload')
@for_host_types('pi', 'ubuntu')
def test_10_automatic_updates(
self, hostname: str,
hosts: Dict[str, Host],
addrs: Dict[str, str],
email: Email) -> None:
host = hosts[hostname]
internet = hosts['internet']
with host.shadow_file('/etc/apt/sources.list') as sources_list, \
host.disable_login_emails():
internet.check_output('aptly repo add main aptly/pi-server-test_1_all.deb')
internet.check_output('aptly publish update main')
with host.sudo():
sources_list.write(
'deb [trusted=yes] http://%s:8080/ main main' % addrs['internet'])
host.check_output('apt-get update')
host.check_output('apt-get install pi-server-test')
# Nothing to update
email.clear()
with host.run_crons(disable_sources_list=False):
pass
assert host.package('pi-server-test').is_installed
assert host.package('pi-server-test').version == '1'
assert not host.package('pi-server-test2').is_installed
email.assert_emails([], only_from=hostname)
# One package to update
internet.check_output('aptly repo add main aptly/pi-server-test_1.1_all.deb')
internet.check_output('aptly publish update main')
email.clear()
with host.run_crons(disable_sources_list=False):
pass
assert host.package('pi-server-test').is_installed
assert host.package('pi-server-test').version == '1.1'
assert not host.package('pi-server-test2').is_installed
email.assert_emails([{
'from': 'notification@%s.testbed' % hostname,
'to': 'fake@fake.testbed',
'subject': '[%s] Installed 1 update' % hostname,
'body_re': (r"(.*\n)*1 upgraded, 0 newly installed, "
r"0 to remove and 0 not upgraded.\n(.*\n)*"),
}], only_from=hostname)
# One not upgraded
internet.check_output('aptly repo add main aptly/pi-server-test_1.2_all.deb')
internet.check_output('aptly repo add main aptly/pi-server-test2_1_all.deb')
internet.check_output('aptly publish update main')
email.clear()
with host.run_crons(disable_sources_list=False):
pass
assert host.package('pi-server-test').is_installed
assert host.package('pi-server-test').version == '1.1'
assert not host.package('pi-server-test2').is_installed
email.assert_emails([{
'from': 'notification@%s.testbed' % hostname,
'to': 'fake@fake.testbed',
'subject': '[%s] 1 package not updated' % hostname,
'body_re': (r"(.*\n)*0 upgraded, 0 newly installed, "
r"0 to remove and 1 not upgraded.\n(.*\n)*"),
}], only_from=hostname)
# Manual dist-upgrade
with host.sudo():
host.check_output('apt-get -y dist-upgrade')
# Nothing to update
email.clear()
with host.run_crons(disable_sources_list=False):
pass
assert host.package('pi-server-test').is_installed
assert host.package('pi-server-test').version == '1.2'
assert host.package('pi-server-test2').is_installed
assert host.package('pi-server-test2').version == '1'
email.assert_emails([], only_from=hostname)
# Cleanup
with host.sudo():
host.check_output('apt-get -y remove pi-server-test pi-server-test2')
internet.check_output("aptly repo remove main 'Name (% *)'")
internet.check_output('aptly publish update main')
with host.sudo():
host.check_output('apt-get update')
@for_host_types('pi', 'ubuntu')
def test_11_disk_usage(
self, hostname: str,
hosts: Dict[str, Host],
email: Email) -> None:
host = hosts[hostname]
with host.disable_login_emails():
# Lots of space
email.clear()
with host.run_crons():
pass
email.assert_emails([], only_from=hostname)
# Not much space
try:
host.make_bigfile('bigfile', '/')
email.clear()
with host.run_crons():
pass
email.assert_emails([{
'from': 'notification@%s.testbed' % hostname,
'to': 'fake@fake.testbed',
'subject': '[%s] Storage space alert' % hostname,
'body_re': (r'A partition is above 90% full.\n(.*\n)*'
r'(/dev/sda1|/dev/mapper/vagrant--vg-root).*9[0-9]%.*/\n(.*\n)*'),
}], only_from=hostname)
finally:
host.check_output('rm -f bigfile')
# Lots of space again
email.clear()
with host.run_crons():
pass
email.assert_emails([], only_from=hostname)
@for_host_types('pi', 'ubuntu')
def test_12_nginx(self, hostname: str, hosts: Dict[str, Host]) -> None:
"""This just installs the nginx service, not any sites."""
host = hosts[hostname]
assert host.service('nginx').is_enabled
assert host.service('nginx').is_running
| |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import pandas as pd
import numpy as np
from pandas import (Series, date_range, isna, Index, Timestamp)
from pandas.compat import lrange, range
from pandas.core.dtypes.common import is_integer
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
def test_getitem_boolean(test_data):
s = test_data.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty():
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isna()]
assert s.index.name == 'index_name'
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
pytest.raises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
pytest.raises(IndexingError, f)
def test_getitem_boolean_object(test_data):
# using column from DataFrame
s = test_data.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
pytest.raises(Exception, s.__getitem__, omask)
pytest.raises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(test_data):
ts = test_data.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
pytest.raises(Exception, ts.__getitem__, mask_shifted)
pytest.raises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
pytest.raises(Exception, ts.loc.__getitem__, mask_shifted)
pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1)
# ts.loc[mask_shifted]
# ts.loc[mask_shifted] = 2
def test_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
# similar indexed series
result = test_data.series.copy()
result[mask] = test_data.series * 2
expected = test_data.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = test_data.series.copy()
result[mask] = (test_data.series * 2)[0:5]
expected = (test_data.series * 2)[0:5].reindex_like(test_data.series)
expected[-mask] = test_data.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_get_set_boolean_different_order(test_data):
ordered = test_data.series.sort_values()
# setting
copy = test_data.series.copy()
copy[ordered > 0] = 0
expected = test_data.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = test_data.series[ordered > 0]
exp = test_data.series[test_data.series > 0]
assert_series_equal(sel, exp)
def test_where_unsafe():
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
pytest.raises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
pytest.raises(ValueError, f)
def f():
s[mask] = [0] * 5
pytest.raises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_raise_on_error_deprecation():
# gh-14968
# deprecation of raise_on_error
s = Series(np.random.randn(5))
cond = s > 0
with tm.assert_produces_warning(FutureWarning):
s.where(cond, raise_on_error=True)
with tm.assert_produces_warning(FutureWarning):
s.mask(cond, raise_on_error=True)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
pytest.raises(ValueError, s.where, 1)
pytest.raises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
assert_series_equal(result, expected)
@pytest.mark.parametrize('cond', [
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")]
])
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with tm.assert_raises_regex(ValueError, msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
pytest.raises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
pytest.raises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
pytest.raises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
@pytest.mark.parametrize('size', range(2, 6))
@pytest.mark.parametrize('mask', [
[True, False, False, False, False],
[True, False],
[False]
])
@pytest.mark.parametrize('item', [
2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min
])
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize('box', [
lambda x: np.array([x]),
lambda x: [x],
lambda x: (x,)
])
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
s = Series(data)
s[selection] = box(item)
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_numeric_with_string():
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
def test_where_timedelta_coerce():
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='object')
assert_series_equal(rs, expected)
def test_where_datetime_conversion():
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='object')
assert_series_equal(rs, expected)
# GH 15701
timestamps = ['2016-12-31 12:00:04+00:00',
'2016-12-31 12:00:04.010000+00:00']
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
assert_series_equal(rs, expected)
def test_where_dt_tz_values(tz_naive_fixture):
ser1 = pd.Series(pd.DatetimeIndex(['20150101', '20150102', '20150103'],
tz=tz_naive_fixture))
ser2 = pd.Series(pd.DatetimeIndex(['20160514', '20160515', '20160516'],
tz=tz_naive_fixture))
mask = pd.Series([True, True, False])
result = ser1.where(mask, ser2)
exp = pd.Series(pd.DatetimeIndex(['20150101', '20150102', '20160516'],
tz=tz_naive_fixture))
assert_series_equal(exp, result)
def test_mask():
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
pytest.raises(ValueError, s.mask, 1)
pytest.raises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple replicas with synchronous update.
Please see accompanying README.md for details and instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"')
tf.app.flags.DEFINE_string('ps_hosts', '',
"""Comma-separated list of hostname:port for the """
"""parameter server jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('worker_hosts', '',
"""Comma-separated list of hostname:port for the """
"""worker jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.')
tf.app.flags.DEFINE_string('subset', 'train', 'Either "train" or "validation".')
tf.app.flags.DEFINE_boolean('log_device_placement', False,
'Whether to log device placement.')
# Task ID is used to select the chief and also to access the local_step for
# each replica to check staleness of the gradients in sync_replicas_optimizer.
tf.app.flags.DEFINE_integer(
'task_id', 0, 'Task ID of the worker/replica running the training.')
# More details can be found in the sync_replicas_optimizer class:
# tensorflow/python/training/sync_replicas_optimizer.py
tf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1,
"""Number of gradients to collect before """
"""updating the parameters.""")
tf.app.flags.DEFINE_integer('save_interval_secs', 10 * 60,
'Save interval seconds.')
tf.app.flags.DEFINE_integer('save_summaries_secs', 180,
'Save summaries interval seconds.')
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# Learning rate decay factor selected from https://arxiv.org/abs/1604.00981
tf.app.flags.DEFINE_float('initial_learning_rate', 0.045,
'Initial learning rate.')
tf.app.flags.DEFINE_float('num_epochs_per_decay', 2.0,
'Epochs after which learning rate decays.')
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
'Learning rate decay factor.')
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def train(target, dataset, cluster_spec):
"""Train Inception on a dataset for a number of steps."""
# Number of workers and parameter servers are infered from the workers and ps
# hosts string.
num_workers = len(cluster_spec.as_dict()['worker'])
num_parameter_servers = len(cluster_spec.as_dict()['ps'])
# If no value is given, num_replicas_to_aggregate defaults to be the number of
# workers.
if FLAGS.num_replicas_to_aggregate == -1:
num_replicas_to_aggregate = num_workers
else:
num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate
# Both should be greater than 0 in a distributed training.
assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and '
'num_parameter_servers'
' must be > 0.')
# Choose worker 0 as the chief. Note that any worker could be the chief
# but there should be only one chief.
is_chief = (FLAGS.task_id == 0)
# Ops are assigned to worker by default.
with tf.device('/job:worker/task:%d' % FLAGS.task_id):
# Variables and its related init/assign ops are assigned to ps.
with slim.scopes.arg_scope(
[slim.variables.variable, slim.variables.global_step],
device=slim.variables.VariableDeviceChooser(num_parameter_servers)):
# Create a variable to count the number of train() calls. This equals the
# number of updates applied to the variables.
global_step = slim.variables.global_step()
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
# Decay steps need to be divided by the number of replicas to aggregate.
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay /
num_replicas_to_aggregate)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Add a summary to track the learning rate.
tf.scalar_summary('learning_rate', lr)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
images, labels = image_processing.distorted_inputs(
dataset,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
logits = inception.inference(images, num_classes, for_training=True)
# Add classification loss.
inception.loss(logits, labels)
# Gather all of the losses including regularization losses.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses, name='total_loss')
if is_chief:
# Compute the moving average of all individual losses and the
# total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for l in losses + [total_loss]:
loss_name = l.op.name
# Name each loss as '(raw)' and name the moving average version of the
# loss as the original loss name.
tf.scalar_summary(loss_name + ' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
# Add dependency to compute loss_averages.
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
# Track the moving averages of all trainable variables.
# Note that we maintain a 'double-average' of the BatchNormalization
# global statistics.
# This is not needed when the number of replicas are small but important
# for synchronous distributed training with tens of workers/replicas.
exp_moving_averager = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
# Add histograms for model variables.
for var in variables_to_average:
tf.histogram_summary(var.op.name, var)
# Create synchronous replica optimizer.
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=num_replicas_to_aggregate,
replica_id=FLAGS.task_id,
total_num_replicas=num_workers,
variable_averages=exp_moving_averager,
variables_to_average=variables_to_average)
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)
assert batchnorm_updates, 'Batchnorm updates are missing'
batchnorm_updates_op = tf.group(*batchnorm_updates)
# Add dependency to compute batchnorm_updates.
with tf.control_dependencies([batchnorm_updates_op]):
total_loss = tf.identity(total_loss)
# Compute gradients with respect to the loss.
grads = opt.compute_gradients(total_loss)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([apply_gradients_op]):
train_op = tf.identity(total_loss, name='train_op')
# Get chief queue_runners, init_tokens and clean_up_op, which is used to
# synchronize replicas.
# More details can be found in sync_replicas_optimizer.
chief_queue_runners = [opt.get_chief_queue_runner()]
init_tokens_op = opt.get_init_tokens_op()
clean_up_op = opt.get_clean_up_op()
# Create a saver.
saver = tf.train.Saver()
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init_op = tf.initialize_all_variables()
# We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started.
# Running summaries and training operations in parallel could run out of
# GPU memory.
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
summary_op=None,
global_step=global_step,
saver=saver,
save_model_secs=FLAGS.save_interval_secs)
tf.logging.info('%s Supervisor' % datetime.now())
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement)
# Get a session.
sess = sv.prepare_or_wait_for_session(target, config=sess_config)
# Start the queue runners.
queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
sv.start_queue_runners(sess, queue_runners)
tf.logging.info('Started %d queues for processing input data.',
len(queue_runners))
if is_chief:
sv.start_queue_runners(sess, chief_queue_runners)
sess.run(init_tokens_op)
# Train, checking for Nans. Concurrently run the summary operation at a
# specified interval. Note that the summary_op and train_op never run
# simultaneously in order to prevent running out of GPU memory.
next_summary_time = time.time() + FLAGS.save_summaries_secs
while not sv.should_stop():
try:
start_time = time.time()
loss_value, step = sess.run([train_op, global_step])
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step > FLAGS.max_steps:
break
duration = time.time() - start_time
if step % 30 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('Worker %d: %s: step %d, loss = %.2f'
'(%.1f examples/sec; %.3f sec/batch)')
tf.logging.info(format_str %
(FLAGS.task_id, datetime.now(), step, loss_value,
examples_per_sec, duration))
# Determine if the summary_op should be run on the chief worker.
if is_chief and next_summary_time < time.time():
tf.logging.info('Running Summary operation on the chief.')
summary_str = sess.run(summary_op)
sv.summary_computed(sess, summary_str)
tf.logging.info('Finished running Summary operation.')
# Determine the next time for running the summary.
next_summary_time += FLAGS.save_summaries_secs
except:
if is_chief:
tf.logging.info('About to execute sync_clean_up_op!')
sess.run(clean_up_op)
raise
# Stop the supervisor. This also waits for service threads to finish.
sv.stop()
# Save after the training ends.
if is_chief:
saver.save(sess,
os.path.join(FLAGS.train_dir, 'model.ckpt'),
global_step=global_step)
| |
"""
sentry.tagstore.snuba.backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2018 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
from collections import defaultdict
from datetime import timedelta
from dateutil.parser import parse as parse_datetime
from django.utils import timezone
import six
from sentry.tagstore import TagKeyStatus
from sentry.tagstore.base import TagStorage
from sentry.tagstore.exceptions import (
GroupTagKeyNotFound,
GroupTagValueNotFound,
TagKeyNotFound,
TagValueNotFound,
)
from sentry.tagstore.types import TagKey, TagValue, GroupTagKey, GroupTagValue
from sentry.utils import snuba
from sentry.utils.dates import to_timestamp
SEEN_COLUMN = 'timestamp'
tag_value_data_transformers = {
'first_seen': parse_datetime,
'last_seen': parse_datetime,
}
def fix_tag_value_data(data):
for key, transformer in tag_value_data_transformers.items():
if key in data:
data[key] = transformer(data[key])
return data
class SnubaTagStorage(TagStorage):
def get_time_range(self, days=90):
"""
Returns the default (start, end) time range for querrying snuba.
"""
# TODO this should use the per-project retention figure to limit
# the query to looking at only the retention window for the project.
end = timezone.now()
return (end - timedelta(days=days), end)
def __get_tag_key(self, project_id, group_id, environment_id, key):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
conditions = [[tag, '!=', '']]
aggregations = [
['uniq', tag, 'values_seen'],
['count()', '', 'count']
]
result = snuba.query(start, end, [], conditions, filters, aggregations,
referrer='tagstore.__get_tag_key')
if result is None or result['count'] == 0:
raise TagKeyNotFound if group_id is None else GroupTagKeyNotFound
else:
data = {
'key': key,
'values_seen': result['values_seen'],
'count': result['count'],
}
if group_id is None:
return TagKey(**data)
else:
return GroupTagKey(group_id=group_id, **data)
def __get_tag_key_and_top_values(self, project_id, group_id, environment_id,
key, limit=3, raise_on_empty=True):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
conditions = [[tag, '!=', '']]
aggregations = [
['uniq', tag, 'values_seen'],
['count()', '', 'count'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result, totals = snuba.query(
start, end, [tag], conditions, filters, aggregations,
orderby='-count', limit=limit, totals=True,
referrer='tagstore.__get_tag_key_and_top_values'
)
if raise_on_empty and (result is None or totals['count'] == 0):
raise TagKeyNotFound if group_id is None else GroupTagKeyNotFound
else:
if group_id is None:
key_ctor = TagKey
value_ctor = TagValue
else:
key_ctor = functools.partial(GroupTagKey, group_id=group_id)
value_ctor = functools.partial(GroupTagValue, group_id=group_id)
top_values = [
value_ctor(
key=key,
value=value,
times_seen=data['count'],
first_seen=parse_datetime(data['first_seen']),
last_seen=parse_datetime(data['last_seen']),
) for value, data in six.iteritems(result)
]
return key_ctor(
key=key,
values_seen=totals['values_seen'],
count=totals['count'],
top_values=top_values
)
def __get_tag_keys(self, project_id, group_id, environment_id, limit=1000):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
aggregations = [
['uniq', 'tags_value', 'values_seen'],
['count()', '', 'count']
]
# TODO should this be sorted by count() descending, rather than the
# number of unique values
result = snuba.query(start, end, ['tags_key'], [], filters,
aggregations, limit=limit, orderby='-values_seen',
referrer='tagstore.__get_tag_keys')
if group_id is None:
ctor = TagKey
else:
ctor = functools.partial(GroupTagKey, group_id=group_id)
return set([
ctor(
key=key,
values_seen=data['values_seen'],
count=data['count'],
) for key, data in six.iteritems(result) if data['values_seen']
])
def __get_tag_value(self, project_id, group_id, environment_id, key, value):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
conditions = [[tag, '=', value]]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
data = snuba.query(start, end, [], conditions, filters, aggregations,
referrer='tagstore.__get_tag_value')
if not data['times_seen'] > 0:
raise TagValueNotFound if group_id is None else GroupTagValueNotFound
else:
data.update({
'key': key,
'value': value,
})
if group_id is None:
return TagValue(**fix_tag_value_data(data))
else:
return GroupTagValue(group_id=group_id, **fix_tag_value_data(data))
def get_tag_key(self, project_id, environment_id, key, status=TagKeyStatus.VISIBLE):
assert status is TagKeyStatus.VISIBLE
return self.__get_tag_key_and_top_values(project_id, None, environment_id, key)
def get_tag_keys(self, project_id, environment_id, status=TagKeyStatus.VISIBLE):
assert status is TagKeyStatus.VISIBLE
return self.__get_tag_keys(project_id, None, environment_id)
def get_tag_value(self, project_id, environment_id, key, value):
return self.__get_tag_value(project_id, None, environment_id, key, value)
def get_tag_values(self, project_id, environment_id, key):
key = self.__get_tag_key_and_top_values(project_id, None, environment_id, key,
limit=None, raise_on_empty=False)
return set(key.top_values)
def get_group_tag_key(self, project_id, group_id, environment_id, key):
return self.__get_tag_key_and_top_values(project_id, group_id, environment_id, key, limit=9)
def get_group_tag_keys(self, project_id, group_id, environment_id, limit=None):
return self.__get_tag_keys(project_id, group_id, environment_id, limit=limit)
def get_group_tag_value(self, project_id, group_id, environment_id, key, value):
return self.__get_tag_value(project_id, group_id, environment_id, key, value)
def get_group_tag_values(self, project_id, group_id, environment_id, key):
key = self.__get_tag_key_and_top_values(project_id, group_id, environment_id, key,
limit=None, raise_on_empty=False)
return set(key.top_values)
def get_group_list_tag_value(self, project_id, group_id_list, environment_id, key, value):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': group_id_list,
}
conditions = [
[tag, '=', value]
]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, ['issue'], conditions, filters, aggregations,
referrer='tagstore.get_group_list_tag_value')
return {
issue: GroupTagValue(
group_id=issue,
key=key,
value=value,
**fix_tag_value_data(data)
) for issue, data in six.iteritems(result)
}
def get_group_tag_value_count(self, project_id, group_id, environment_id, key):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': [group_id],
}
conditions = [[tag, '!=', '']]
aggregations = [['count()', '', 'count']]
return snuba.query(start, end, [], conditions, filters, aggregations,
referrer='tagstore.get_group_tag_value_count')
def get_top_group_tag_values(self, project_id, group_id, environment_id, key, limit=3):
tag = self.__get_tag_key_and_top_values(project_id, group_id, environment_id, key, limit)
return tag.top_values
def __get_release(self, project_id, group_id, first=True):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
}
conditions = [['tags[sentry:release]', 'IS NOT NULL', None]]
if group_id is not None:
filters['issue'] = [group_id]
aggregations = [['min' if first else 'max', SEEN_COLUMN, 'seen']]
orderby = 'seen' if first else '-seen'
result = snuba.query(start, end, ['tags[sentry:release]'], conditions, filters,
aggregations, limit=1, orderby=orderby,
referrer='tagstore.__get_release')
if not result:
return None
else:
return result.keys()[0]
def get_first_release(self, project_id, group_id):
return self.__get_release(project_id, group_id, True)
def get_last_release(self, project_id, group_id):
return self.__get_release(project_id, group_id, False)
def get_release_tags(self, project_ids, environment_id, versions):
start, end = self.get_time_range()
filters = {
'project_id': project_ids,
'environment': [environment_id],
}
# NB we add release as a condition rather than a filter because
# this method is already dealing with version strings rather than
# release ids which would need to be translated by the snuba util.
tag = 'sentry:release'
col = 'tags[{}]'.format(tag)
conditions = [[col, 'IN', versions]]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, ['project_id', col],
conditions, filters, aggregations,
referrer='tagstore.get_release_tags')
values = []
for project_data in six.itervalues(result):
for value, data in six.iteritems(project_data):
values.append(
TagValue(
key=tag,
value=value,
**fix_tag_value_data(data)
)
)
return set(values)
def get_group_ids_for_users(self, project_ids, event_users, limit=100):
start, end = self.get_time_range()
filters = {
'project_id': project_ids,
}
conditions = [
['tags[sentry:user]', 'IN', filter(None, [eu.tag_value for eu in event_users])],
]
aggregations = [['max', SEEN_COLUMN, 'last_seen']]
result = snuba.query(start, end, ['issue'], conditions, filters,
aggregations, limit=limit, orderby='-last_seen',
referrer='tagstore.get_group_ids_for_users')
return set(result.keys())
def get_group_tag_values_for_users(self, event_users, limit=100):
start, end = self.get_time_range()
filters = {
'project_id': [eu.project_id for eu in event_users]
}
conditions = [
['tags[sentry:user]', 'IN', filter(None, [eu.tag_value for eu in event_users])]
]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, ['issue', 'user_id'], conditions, filters,
aggregations, orderby='-last_seen', limit=limit,
referrer='tagstore.get_group_tag_values_for_users')
values = []
for issue, users in six.iteritems(result):
for name, data in six.iteritems(users):
values.append(
GroupTagValue(
group_id=issue,
key='sentry:user',
value=name,
**fix_tag_value_data(data)
)
)
return values
def get_groups_user_counts(self, project_id, group_ids, environment_id):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': group_ids,
}
aggregations = [['uniq', 'tags[sentry:user]', 'count']]
result = snuba.query(start, end, ['issue'], None, filters, aggregations,
referrer='tagstore.get_groups_user_counts')
return defaultdict(int, {k: v for k, v in result.items() if v})
def get_tag_value_paginator(self, project_id, environment_id, key, query=None,
order_by='-last_seen'):
from sentry.api.paginator import SequencePaginator
if not order_by == '-last_seen':
raise ValueError("Unsupported order_by: %s" % order_by)
conditions = []
if query:
conditions.append(['tags_value', 'LIKE', '%{}%'.format(query)])
start, end = self.get_time_range()
results = snuba.query(
start=start,
end=end,
groupby=['tags_value'],
filter_keys={
'project_id': [project_id],
'environment': [environment_id],
'tags_key': [key],
},
aggregations=[
['count()', '', 'times_seen'],
['min', 'timestamp', 'first_seen'],
['max', 'timestamp', 'last_seen'],
],
conditions=conditions,
orderby=order_by,
# TODO: This means they can't actually paginate all TagValues.
limit=1000,
referrer='tagstore.get_tag_value_paginator',
)
tag_values = [
TagValue(
key=key,
value=value,
**fix_tag_value_data(data)
) for value, data in six.iteritems(results)
]
desc = order_by.startswith('-')
score_field = order_by.lstrip('-')
return SequencePaginator(
[(int(to_timestamp(getattr(tv, score_field)) * 1000), tv) for tv in tag_values],
reverse=desc
)
def get_group_tag_value_iter(self, project_id, group_id, environment_id, key, callbacks=()):
start, end = self.get_time_range()
results = snuba.query(
start=start,
end=end,
groupby=['tags_value'],
filter_keys={
'project_id': [project_id],
'environment': [environment_id],
'tags_key': [key],
'issue': [group_id],
},
aggregations=[
['count()', '', 'times_seen'],
['min', 'timestamp', 'first_seen'],
['max', 'timestamp', 'last_seen'],
],
orderby='-first_seen', # Closest thing to pre-existing `-id` order
# TODO: This means they can't actually iterate all GroupTagValues.
limit=1000,
referrer='tagstore.get_group_tag_value_iter',
)
group_tag_values = [
GroupTagValue(
group_id=group_id,
key=key,
value=value,
**fix_tag_value_data(data)
) for value, data in six.iteritems(results)
]
for cb in callbacks:
cb(group_tag_values)
return group_tag_values
def get_group_tag_value_paginator(self, project_id, group_id, environment_id, key,
order_by='-id'):
from sentry.api.paginator import SequencePaginator
if order_by in ('-last_seen', '-first_seen'):
pass
elif order_by == '-id':
# Snuba has no unique id per GroupTagValue so we'll substitute `-first_seen`
order_by = '-first_seen'
else:
raise ValueError("Unsupported order_by: %s" % order_by)
group_tag_values = self.get_group_tag_value_iter(
project_id, group_id, environment_id, key
)
desc = order_by.startswith('-')
score_field = order_by.lstrip('-')
return SequencePaginator(
[(int(to_timestamp(getattr(gtv, score_field)) * 1000), gtv)
for gtv in group_tag_values],
reverse=desc
)
def get_group_tag_value_qs(self, project_id, group_id, environment_id, key, value=None):
# This method is not implemented because it is only used by the Django
# search backend.
raise NotImplementedError
def get_event_tag_qs(self, project_id, environment_id, key, value):
# This method is not implemented because it is only used by the Django
# search backend.
raise NotImplementedError
def get_group_event_filter(self, project_id, group_id, environment_id, tags):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': [group_id],
}
conditions = [[['tags[{}]'.format(k), '=', v] for (k, v) in tags.items()]]
result = snuba.raw_query(start, end, selected_columns=['event_id'],
conditions=conditions, orderby='-timestamp', filter_keys=filters,
limit=1000, referrer='tagstore.get_group_event_filter')
event_id_set = set(row['event_id'] for row in result['data'])
if not event_id_set:
return None
return {'event_id__in': event_id_set}
def get_group_ids_for_search_filter(
self, project_id, environment_id, tags, candidates=None, limit=1000):
# This method is not implemented since the `group.id` column doesn't
# exist in Snuba. This logic is implemented in the search backend
# instead.
raise NotImplementedError
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import sys
from importlib import import_module
from saml2.s_utils import factory
from saml2.s_utils import do_ava
from saml2 import saml
from saml2 import extension_elements_to_elements
from saml2 import SAMLError
from saml2.saml import NAME_FORMAT_UNSPECIFIED
import logging
logger = logging.getLogger(__name__)
class UnknownNameFormat(SAMLError):
pass
class ConverterError(SAMLError):
pass
def load_maps(dirspec):
""" load the attribute maps
:param dirspec: a directory specification
:return: a dictionary with the name of the map as key and the
map as value. The map itself is a dictionary with two keys:
"to" and "fro". The values for those keys are the actual mapping.
"""
mapd = {}
if dirspec not in sys.path:
sys.path.insert(0, dirspec)
for fil in os.listdir(dirspec):
if fil.endswith(".py"):
mod = import_module(fil[:-3])
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item, dict) and "to" in item and "fro" in item:
mapd[item["identifier"]] = item
return mapd
def ac_factory(path=""):
"""Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
"""
acs = []
if path:
if path not in sys.path:
sys.path.insert(0, path)
for fil in os.listdir(path):
if fil.endswith(".py"):
mod = import_module(fil[:-3])
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item,
dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
else:
from saml2 import attributemaps
for typ in attributemaps.__all__:
mod = import_module(".%s" % typ, "saml2.attributemaps")
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item, dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
return acs
def ac_factory_II(path):
return ac_factory(path)
# def ava_fro(acs, statement):
# """ Translates attributes according to their name_formats into the local
# names.
#
# :param acs: AttributeConverter instances
# :param statement: A SAML statement
# :return: A dictionary with attribute names replaced with local names.
# """
# if not statement:
# return {}
#
# acsdic = dict([(ac.name_format, ac) for ac in acs])
# acsdic[None] = acsdic[NAME_FORMAT_URI]
# return dict([acsdic[a.name_format].ava_from(a) for a in statement])
def to_local(acs, statement, allow_unknown_attributes=False):
""" Replaces the attribute names in a attribute value assertion with the
equivalent name from a local name format.
:param acs: List of Attribute Converters
:param statement: The Attribute Statement
:param allow_unknown_attributes: If unknown attributes are allowed
:return: A key,values dictionary
"""
if not acs:
acs = [AttributeConverter()]
acsd = {"": acs}
else:
acsd = dict([(a.name_format, a) for a in acs])
ava = {}
for attr in statement.attribute:
try:
_func = acsd[attr.name_format].ava_from
except KeyError:
if attr.name_format == NAME_FORMAT_UNSPECIFIED or \
allow_unknown_attributes:
_func = acs[0].lcd_ava_from
else:
logger.info("Unsupported attribute name format: %s" % (
attr.name_format,))
continue
try:
key, val = _func(attr)
except KeyError:
if allow_unknown_attributes:
key, val = acs[0].lcd_ava_from(attr)
else:
logger.info("Unknown attribute name: %s" % (attr,))
continue
except AttributeError:
continue
try:
ava[key].extend(val)
except KeyError:
ava[key] = val
return ava
def list_to_local(acs, attrlist, allow_unknown_attributes=False):
""" Replaces the attribute names in a attribute value assertion with the
equivalent name from a local name format.
:param acs: List of Attribute Converters
:param attrlist: List of Attributes
:param allow_unknown_attributes: If unknown attributes are allowed
:return: A key,values dictionary
"""
if not acs:
acs = [AttributeConverter()]
acsd = {"": acs}
else:
acsd = dict([(a.name_format, a) for a in acs])
ava = {}
for attr in attrlist:
try:
_func = acsd[attr.name_format].ava_from
except KeyError:
if attr.name_format == NAME_FORMAT_UNSPECIFIED or \
allow_unknown_attributes:
_func = acs[0].lcd_ava_from
else:
logger.info("Unsupported attribute name format: %s" % (
attr.name_format,))
continue
try:
key, val = _func(attr)
except KeyError:
if allow_unknown_attributes:
key, val = acs[0].lcd_ava_from(attr)
else:
logger.info("Unknown attribute name: %s" % (attr,))
continue
except AttributeError:
continue
try:
ava[key].extend(val)
except KeyError:
ava[key] = val
return ava
def from_local(acs, ava, name_format):
for aconv in acs:
#print ac.format, name_format
if aconv.name_format == name_format:
#print "Found a name_form converter"
return aconv.to_(ava)
return None
def from_local_name(acs, attr, name_format):
"""
:param acs: List of AttributeConverter instances
:param attr: attribute name as string
:param name_format: Which name-format it should be translated to
:return: An Attribute instance
"""
for aconv in acs:
#print ac.format, name_format
if aconv.name_format == name_format:
#print "Found a name_form converter"
return aconv.to_format(attr)
return attr
def to_local_name(acs, attr):
"""
:param acs: List of AttributeConverter instances
:param attr: an Attribute instance
:return: The local attribute name
"""
for aconv in acs:
lattr = aconv.from_format(attr)
if lattr:
return lattr
return attr.friendly_name
def get_local_name(acs, attr, name_format):
for aconv in acs:
#print ac.format, name_format
if aconv.name_format == name_format:
return aconv._fro[attr]
def d_to_local_name(acs, attr):
"""
:param acs: List of AttributeConverter instances
:param attr: an Attribute dictionary
:return: The local attribute name
"""
for aconv in acs:
lattr = aconv.d_from_format(attr)
if lattr:
return lattr
# if everything else fails this might be good enough
try:
return attr["friendly_name"]
except KeyError:
raise ConverterError("Could not find local name for %s" % attr)
class AttributeConverter(object):
""" Converts from an attribute statement to a key,value dictionary and
vice-versa """
def __init__(self, name_format=""):
self.name_format = name_format
self._to = None
self._fro = None
def adjust(self):
""" If one of the transformations is not defined it is expected to
be the mirror image of the other.
"""
if self._fro is None and self._to is not None:
self._fro = dict(
[(value.lower(), key) for key, value in self._to.items()])
if self._to is None and self.fro is not None:
self._to = dict(
[(value.lower, key) for key, value in self._fro.items()])
def from_dict(self, mapdict):
""" Import the attribute map from a dictionary
:param mapdict: The dictionary
"""
self.name_format = mapdict["identifier"]
try:
self._fro = dict(
[(k.lower(), v) for k, v in mapdict["fro"].items()])
except KeyError:
pass
try:
self._to = dict([(k.lower(), v) for k, v in mapdict["to"].items()])
except KeyError:
pass
if self._fro is None and self._to is None:
raise ConverterError("Missing specifications")
if self._fro is None or self._to is None:
self.adjust()
def lcd_ava_from(self, attribute):
"""
In nothing else works, this should
:param attribute: An Attribute Instance
:return:
"""
try:
name = attribute.friendly_name.strip()
except AttributeError:
name = attribute.name.strip()
values = []
for value in attribute.attribute_value:
if not value.text:
values.append('')
else:
values.append(value.text.strip())
return name, values
def fail_safe_fro(self, statement):
""" In case there is not formats defined or if the name format is
undefined
:param statement: AttributeStatement instance
:return: A dictionary with names and values
"""
result = {}
for attribute in statement.attribute:
if attribute.name_format and \
attribute.name_format != NAME_FORMAT_UNSPECIFIED:
continue
try:
name = attribute.friendly_name.strip()
except AttributeError:
name = attribute.name.strip()
result[name] = []
for value in attribute.attribute_value:
if not value.text:
result[name].append('')
else:
result[name].append(value.text.strip())
return result
def ava_from(self, attribute, allow_unknown=False):
try:
attr = self._fro[attribute.name.strip().lower()]
except AttributeError:
attr = attribute.friendly_name.strip().lower()
except KeyError:
if allow_unknown:
try:
attr = attribute.name.strip().lower()
except AttributeError:
attr = attribute.friendly_name.strip().lower()
else:
raise
val = []
for value in attribute.attribute_value:
if value.extension_elements:
ext = extension_elements_to_elements(value.extension_elements,
[saml])
for ex in ext:
cval = {}
for key, (name, typ, mul) in ex.c_attributes.items():
exv = getattr(ex, name)
if exv:
cval[name] = exv
if ex.text:
cval["value"] = ex.text.strip()
val.append({ex.c_tag: cval})
elif not value.text:
val.append('')
else:
val.append(value.text.strip())
return attr, val
def fro(self, statement):
""" Get the attributes and the attribute values.
:param statement: The AttributeStatement.
:return: A dictionary containing attributes and values
"""
if not self.name_format:
return self.fail_safe_fro(statement)
result = {}
for attribute in statement.attribute:
if attribute.name_format and self.name_format and \
attribute.name_format != self.name_format:
continue
try:
(key, val) = self.ava_from(attribute)
except (KeyError, AttributeError):
pass
else:
result[key] = val
return result
def to_format(self, attr):
""" Creates an Attribute instance with name, name_format and
friendly_name
:param attr: The local name of the attribute
:return: An Attribute instance
"""
try:
return factory(saml.Attribute,
name=self._to[attr],
name_format=self.name_format,
friendly_name=attr)
except KeyError:
return factory(saml.Attribute, name=attr)
def from_format(self, attr):
""" Find out the local name of an attribute
:param attr: An saml.Attribute instance
:return: The local attribute name or "" if no mapping could be made
"""
if attr.name_format:
if self.name_format == attr.name_format:
try:
return self._fro[attr.name.lower()]
except KeyError:
pass
else: # don't know the name format so try all I have
try:
return self._fro[attr.name.lower()]
except KeyError:
pass
return ""
def d_from_format(self, attr):
""" Find out the local name of an attribute
:param attr: An Attribute dictionary
:return: The local attribute name or "" if no mapping could be made
"""
if attr["name_format"]:
if self.name_format == attr["name_format"]:
try:
return self._fro[attr["name"].lower()]
except KeyError:
pass
else: # don't know the name format so try all I have
try:
return self._fro[attr["name"].lower()]
except KeyError:
pass
return ""
def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
key = key.lower()
try:
attributes.append(factory(saml.Attribute,
name=self._to[key],
name_format=self.name_format,
friendly_name=key,
attribute_value=do_ava(value)))
except KeyError:
attributes.append(factory(saml.Attribute,
name=key,
attribute_value=do_ava(value)))
return attributes
class AttributeConverterNOOP(AttributeConverter):
""" Does a NOOP conversion, that is no conversion is made """
def __init__(self, name_format=""):
AttributeConverter.__init__(self, name_format)
def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
key = key.lower()
attributes.append(factory(saml.Attribute,
name=key,
name_format=self.name_format,
attribute_value=do_ava(value)))
return attributes
| |
# -*- coding: utf-8 -*-
'''
Module with api interface for zerto
'''
import base64
import json
import requests
from errors import ( # NOQA
ZertoError,
ZertoUnsupportedApi,
ZertoServiceError,
ZertoAuthError,
Zerto4xx,
ZertoBadRequest,
ZertoUnauthorized,
ZertoForbidden,
ZertoNotFound,
ZertoMethodNotAllowed,
ZertoFailure,
)
from constants import ( # NOQA
ZertoConstant,
ZertoConstantDict,
AuthenticationMethod,
authentication_method,
CommitPolicy,
commit_policy,
EntityType,
entity_type,
EventCategory,
event_category,
EventType,
event_type,
PairingStatus,
pairing_status,
SiteType,
site_type,
TaskState,
task_state,
VMType,
vm_type,
VPGPriority,
vpg_priority,
VPGStatus,
vpg_status,
VPGSubStatus,
vpg_sub_status,
VRAStatus,
vra_status,
)
from zertoobject import ZertoObject # NOQA
from alert import Alert # NOQA
from localsite import LocalSite # NOQA
from peersite import PeerSite # NOQA
from event import Event # NOQA
from task import Task # NOQA
from vm import VM # NOQA
from vpg import VPG # NOQA
from vra import VRA # NOQA
from zorg import ZORG # NOQA
from serviceprofile import ServiceProfile # NOQA
from virtualization_site import VirtualizationSite # NOQA
class Zerto(object):
def __init__(self, url):
self.url = url
self.session = None
self.paths = ['v1']
def get_url(self, path):
if not self.url:
raise ValueError('Invalid url')
base_path = '/'.join(path.strip('/').split('/')[:2])
if base_path not in self.paths:
raise ZertoUnsupportedApi(path)
url = '{0}/{1}'.format(self.url.rstrip('/'), path.lstrip('/'))
return url
def _do_request(self, method, path, data=None, headers=None, **kwargs):
url = self.get_url(path)
kwargs = dict([(k, v) for k, v in kwargs.iteritems() if v is not None])
if data is not None:
kwargs['data'] = data
if headers is None:
headers = {}
if self.session:
headers = {
'x-zerto-session': self.session,
'content-type': 'application/json',
}
if headers:
kwargs['headers'] = headers
if 'verify' not in kwargs:
kwargs['verify'] = False
req = getattr(requests, method.lower())(url, **kwargs)
if req.status_code == 200:
return req
try:
result = req.json()
except:
result = {}
if isinstance(result, dict):
req.errcode = result.get('errorCode')
req.errmsg = result.get('errorMessage')
else:
req.errcode = None
req.errmsg = '{0}'.format(result)
params = kwargs.get('params')
if 400 <= req.status_code < 500:
if req.status_code == 400:
errcls = ZertoBadRequest
elif req.status_code == 401:
errcls = ZertoUnauthorized
elif req.status_code == 403:
errcls = ZertoForbidden
elif req.status_code == 404:
errcls = ZertoNotFound
elif req.status_code == 405:
errcls = ZertoMethodNotAllowed
else:
errcls = Zerto4xx
raise errcls(
req.status_code, req.errcode, req.errmsg,
method.upper(), path, params, data)
if 500 <= req.status_code < 600:
raise ZertoFailure(
req.status_code, req.errcode, req.errmsg,
method.upper(), path, params, data)
raise ZertoServiceError(
req.status_code, req.errcode, req.errmsg,
method.upper(), path, params, data)
def get_request(self, path, **kwargs):
return self._do_request('GET', path, **kwargs)
def post_request(self, path, data=None, **kwargs):
return self._do_request('POST', path, data, **kwargs)
def put_request(self, path, data=None, **kwargs):
return self._do_request('PUT', path, data, **kwargs)
def delete_request(self, path, **kwargs):
return self._do_request('DELETE', path, **kwargs)
def get_apis(self, **kwargs):
headers = {'content-type': 'application/json'}
if 'headers' in kwargs:
headers.update(kwargs.pop('headers'))
req = self.get_request('v1', headers=headers, **kwargs)
self.paths = list(sorted(['v1'] + [
i['href'].split('/', 3)[-1].strip('/')
for i in req.json()
]))
return req.json()
def get_session(self, user, password, method=None):
if not self.paths:
self.get_apis()
headers = {
'Authorization': base64.b64encode(
'{0}:{1}'.format(user, password))
}
session = None
path = 'v1/session/add'
if method is not None and not isinstance(method, AuthenticationMethod):
try:
method = authentication_method[method]
except KeyError:
raise ZertoAuthError(
'Invalid authentication method {0}'.format(method))
if method is None or method.code == 0:
# Default is windows authentication
try:
req = self.post_request(path, headers=headers)
if req.status_code == requests.codes.ok:
session = req.headers.get('x-zerto-session')
except ZertoUnauthorized:
pass
if not session and (method is None or method.code == 1):
# Try or retry AuthenticationMethod 1 (VirtualizationManager)
headers['content-type'] = 'application/json'
try:
req = self.post_request(
path,
json.dumps({'AuthenticationMethod': 1}),
headers=headers,
)
if req.status_code == requests.codes.ok:
session = req.headers.get('x-zerto-session')
except ZertoUnauthorized:
pass
if not session:
raise ZertoAuthError('Invalid user name and/or password')
self.session = session
def get_localsite(self, status=None):
if status:
req = self.get_request('v1/localsite/pairingstatuses')
return req.json()
req = self.get_request('v1/localsite')
return LocalSite(**req.json())
def get_peersites(self, siteid=None, status=None, **kwargs):
if status:
req = self.get_request('v1/peersites/pairingstatuses')
return req.json()
elif siteid is not None:
req = self.get_request('v1/peersites/{0}'.format(siteid))
return PeerSite(**req.json())
req = self.get_request('v1/peersites', params=(kwargs or None))
return list([PeerSite(**res) for res in req.json()])
def get_alert(self, alert=None):
if alert is not None:
req = self.get_request('v1/alerts/{0}'.format(alert))
return Alert(**req.json())
req = self.get_request('v1/alerts')
return list([Alert(**res) for res in req.json()])
def get_event(self, event=None, **kwargs):
'''Retrieve specific event or all'''
if event is not None:
req = self.get_request('v1/events/{0}'.format(event))
return Event(**req.json())
req = self.get_request('v1/events', params=(kwargs or None))
return list([Event(**res) for res in req.json()])
def get_event_categories(self):
req = self.get_request('v1/events/categories')
return req.json()
def get_event_entities(self):
req = self.get_request('v1/events/entities')
return req.json()
def get_event_types(self):
req = self.get_request('v1/events/types')
return req.json()
def get_serviceprofiles(self, serviceprofile=None, **kwargs):
if serviceprofile is not None:
req = self.get_request(
'v1/serviceprofiles/{0}'.format(serviceprofile))
return ServiceProfile(**req.json())
req = self.get_request(
'v1/serviceprofiles', params=(kwargs or None))
return list([ServiceProfile(**res) for res in req.json()])
def get_task(self, task=None, **kwargs):
if task is not None:
req = self.get_request('v1/tasks/{0}'.format(task))
return Task(**req.json())
req = self.get_request('v1/tasks', params=(kwargs or None))
return list([Task(**res) for res in req.json()])
def get_virtualization_site(self, siteid=None):
if siteid is not None:
req = self.get_request(
'v1/virtualizationsites/{0}'.format(siteid))
return VirtualizationSite(**req.json())
req = self.get_request('v1/virtualizationsites')
return list([VirtualizationSite(**res) for res in req.json()])
def get_vm(self, vmid=None, **kwargs):
'''Retrieve specific vm or all'''
if vmid is not None:
req = self.get_request('v1/vms/{0}'.format(vmid))
return VM(**req.json())
req = self.get_request('v1/vms', params=(kwargs or None))
return list([VM(**res) for res in req.json()])
def get_vpg(self, vpgid=None, **kwargs):
'''Retrieve specific vpg or all'''
if vpgid is not None:
req = self.get_request('v1/vpgs/{0}'.format(vpgid))
return VPG(**req.json())
req = self.get_request('v1/vpgs', params=(kwargs or None))
return list([VPG(**res) for res in req.json()])
def get_vra(self, vraid=None, **kwargs):
if vraid is not None:
req = self.get_request('v1/vras/{0}'.format(vraid))
return VRA(**req.json())
req = self.get_request('v1/vras', params=(kwargs or None))
return list([VRA(**res) for res in req.json()])
def get_zorg(self, zorgid=None):
if zorgid is not None:
req = self.get_request('v1/zorgs/{0}'.format(zorgid))
return ZORG(**req.json())
req = self.get_request('v1/zorgs')
return list([ZORG(**res) for res in req.json()])
def get_resources_report(self, **kwargs):
# fromTimeString={fromTimeString}
# toTimeString={toTimeString}
# startIndex={startIndex}
# count={count}
# filter={filter}
if 'filter' in kwargs:
req = self.get_request(
'v1/ZvmService/ResourcesReport/getSamplesWithFilter',
params=(kwargs or None),
)
else:
req = self.get_request(
'v1/ZvmService/ResourcesReport/getSamples',
params=(kwargs or None),
)
return req.json()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| |
'''
Created on 04.03.2012
@author: michi
'''
from datetime import date
from abc import ABCMeta, abstractmethod, abstractproperty
import datetime
class XType:
__metaclass__ = ABCMeta
CUSTOM = 1
NUMBER = 2
STRING = 3
BOOL = 4
COMPLEX = 5
TEMPORAL = 6
MIXED = 7
def __init__(self, canBeNone=None, defaultValue=None):
if canBeNone is None:
canBeNone = True
self.canBeNone = canBeNone
self.defaultValue = defaultValue
self.canBeEdited = True
self.forceInteraction = False
@abstractproperty
def group(self):
pass
def value2String(self, value):
return unicode(value)
class BoolType(XType):
def __init__(self, boolNames=None):
XType.__init__(self)
self.defaultValue = False
@property
def group(self):
return XType.BOOL
@staticmethod
def castToBool(value):
if isinstance(value, (float, int)):
return bool(value)
if isinstance(value, basestring):
if value.lower() in ('true','yes'):
return True
if value.lower() in ('false','no'):
return False
try:
numeric = float(value)
return bool(numeric)
except ValueError:
pass
if value:
return True
return False
class NumberType(XType):
def __init__(self, pyTypeOfNumber):
XType.__init__(self)
self.pyType = pyTypeOfNumber
self.strNumberFormat = ''
self.strPrefix = ''
self.strSuffix = ''
self.minValue = -100000
self.maxValue = 100000
self.decimalsSeparator = '.'
self.thousandsSeparator = None
self.decimalsCount = None
if self.pyType is float:
self.defaultValue = 0.0
if self.pyType is int:
self.defaultValue = 0
def value2String(self, value):
if self.strNumberFormat:
number = ('{0:' + self.strNumberFormat + '}').format(value)
else:
number = NumberType.formatNumber(value,
self.decimalsCount,
self.decimalsSeparator,
self.thousandsSeparator)
return (self.strPrefix + number + self.strSuffix)
@property
def group(self):
return XType.NUMBER
@staticmethod
def intWithThousandsSeparator(x, sep=None):
if sep is None:
sep = ','
if type(x) not in [type(0), type(0L)]:
raise TypeError("Parameter must be an integer.")
if x < 0:
return '-' + NumberType.intWithThousandsSeparator(-x, sep)
result = ''
while x >= 1000:
x, r = divmod(x, 1000)
result = "%s%03d%s" % (sep, r, result)
return "%d%s" % (x, result)
def viewToModel(self, viewValue):
return viewValue
def modelToView(self, modelValue):
return modelValue
@staticmethod
def formatNumber(x, decimalsCount=None, decimalsSeparator=None,
thousandsSeparator=None, zeroFill=None, decimalsZeroFill=None):
if not isinstance(x, (float, int)):
raise TypeError("formatNumber needs float|int")
if zeroFill is not None or decimalsZeroFill is not None:
raise NotImplementedError("Zerofill and decimalsZeroFill currently not supported")
preDecimals = '0'
decimals = ''
if decimalsCount is None:
strVersion = str(x)
else:
strVersion = ("{0:." + str(decimalsCount) + "f}").format(x)
if "." in strVersion:
preDecimals, decimals = strVersion.split('.')
else:
preDecimals = strVersion
if decimalsCount is None:
decimalsCount = 0
if decimalsSeparator is None:
decimalsSeparator = '.'
if thousandsSeparator:
preDecimals = NumberType.intWithThousandsSeparator(int(preDecimals),
thousandsSeparator)
if not decimals:
return preDecimals
else:
return "{0}{1}{2}".format(preDecimals,decimalsSeparator,decimals)
class StringType(XType):
def __init__(self):
XType.__init__(self)
self.minLength = 0
self.maxLength = 10000000
self.defaultValue = unicode()
self.hints = []
@property
def group(self):
return XType.STRING
def value2String(self, value):
return value
class ColorType(StringType):
pass
class FilesystemPathType(StringType):
def __init__(self):
super(FilesystemPathType, self).__init__()
self.mustExist = False
class FilePathType(FilesystemPathType):
pass
class DirectoryPathType(FilesystemPathType):
pass
class ImagePathType(FilePathType):
pass
class UnitType(NumberType):
PREPEND = 1
APPEND = 2
VALUE_2_UNIT_SPACE = ' '
def __init__(self, unit=None, pyTypeOfNumber=None):
if pyTypeOfNumber is None:
pyTypeOfNumber = float
super(UnitType, self).__init__(pyTypeOfNumber)
self._unit = unit
self._unitStrPosition = UnitType.APPEND
self._value2UnitSpace = 0
if unit is not None:
self.unit = unit
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, unit):
self._unit = unit
if self._unitStrPosition == UnitType.APPEND:
self.strPrefix = ''
self.strSuffix = self.getUnitString(unit, self._value2UnitSpace,
self._unitStrPosition)
elif self._unitStrPosition == UnitType.PREPEND:
self.strSuffix = ''
self.strPrefix = self.getUnitString(unit, self.value2UnitSpace,
self._unitStrPosition)
@staticmethod
def getUnitString(unit,value2UnitSpace, position):
if value2UnitSpace == 0:
return unit
if position == UnitType.APPEND:
parts = []
for i in range(value2UnitSpace):
parts.append(' ')
parts.append(unit)
else:
parts = []
parts.append(unit)
for i in range(value2UnitSpace):
parts.append(' ')
return unicode("").join(parts)
@property
def unitStrPosition(self):
return self._unitStrPosition
@unitStrPosition.setter
def unitStrPosition(self, position):
self._unitStrPosition = position
self.unit = self.unit
@property
def value2UnitSpace(self):
return self._value2UnitSpace
@value2UnitSpace.setter
def value2UnitSpace(self, space):
self._value2UnitSpace = space
self.unit = self.unit
class DateType(XType):
def __init__(self):
XType.__init__(self)
self.minDate = None
self.maxDate = None
self.defaultValue = date.today()
@property
def group(self):
return XType.TEMPORAL
def value2String(self, value):
return unicode(value)
class ComplexType(XType):
def __init__(self, canBeNone=None, defaultValue=None):
XType.__init__(self, canBeNone=canBeNone, defaultValue=defaultValue)
@property
def group(self):
return XType.COMPLEX
class OneOfAListType(XType):
def __init__(self, canBeNone=None, defaultValue=None):
XType.__init__(self, canBeNone=canBeNone, defaultValue=defaultValue)
self.possibleValues = ()
self.xTypeOfItems = None
@property
def group(self):
return XType.MIXED
@property
def itemType(self):
if self.xTypeOfItems:
return self.xTypeOfItems
return native2XType(self.possibleValues[0])
class NamedFieldType(ComplexType):
def __init__(self, canBeNone=None, defaultValue=None):
ComplexType.__init__(self, canBeNone=canBeNone,
defaultValue=defaultValue)
self.defaultValue = {}
self.__xTypeMap = {}
self.__keys = []
def addKey(self, name, xType):
self.__keys.append(name)
self.__xTypeMap[self.__keys.index(name)] = xType
def keyType(self, key):
if isinstance(key, basestring):
return self.__xTypeMap[self.__keys.index(key)]
elif isinstance(key, int):
return self.__xTypeMap[key]
def keys(self):
return self.__keys
def keyName(self, index):
return self.__keys[index]
@property
def xTypeMap(self):
return self.__xTypeMap
def __getitem__(self, key):
return self.__xTypeMap[self.__keys.index(key)]
def __setitem__(self, name, xType):
self.__keys.append(name)
self.__xTypeMap[self.__keys.index(name)] = xType
def __contains__(self, item):
if isinstance(item, XType):
return item in self.__xTypeMap
return item in self.__keys
def __len__(self):
return len(self.__keys)
def __iter__(self):
return self.__keys.__iter__()
@classmethod
def create(cls, keys=None, **kwargs):
keys = kwargs if keys is None else keys
xtype = cls.__new__(cls)
xtype.__init__()
for key in keys:
xtype.addKey(key, keys[key])
return xtype
class SequenceType(ComplexType):
def __init__(self, itemType, canBeNone=None, defaultValue=None):
ComplexType.__init__(self, canBeNone=canBeNone,
defaultValue=defaultValue)
self.defaultValue = []
self.maxLength = None
self.minLength = None
self.defaultLength = 0
self.defaultItem = None
self.itemType = itemType
class DictType(NamedFieldType):
pass
class ObjectInstanceType(NamedFieldType):
def __init__(self, cls, canBeNone=None, defaultValue=None):
NamedFieldType.__init__(self, canBeNone=canBeNone,
defaultValue=None)
self.cls = cls
def native2XType(type_):
if type_ in (int, float):
return NumberType(type_)
if type_ is bool:
return BoolType()
if type_ in (str, unicode):
return StringType()
if type_ in (dict, list, tuple, set):
return ComplexType()
if type_ in (datetime.datetime, datetime.date):
return DateType()
| |
import datetime
import unittest
from decimal import Decimal
import django.test
from django import forms
from django.db import models
from django.core.exceptions import ValidationError
from models import Foo, Bar, Whiz, BigD, BigS, Image, BigInt, Post, NullBooleanModel, BooleanModel
# If PIL available, do these tests.
if Image:
from imagefield import \
ImageFieldTests, \
ImageFieldTwoDimensionsTests, \
ImageFieldNoDimensionsTests, \
ImageFieldOneDimensionTests, \
ImageFieldDimensionsFirstTests, \
ImageFieldUsingFileTests, \
TwoImageFieldTests
class BasicFieldTests(django.test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError, e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
class DecimalFieldTests(django.test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), u'2.0')
self.assertEqual(f._format(f.to_python('2.6')), u'2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
from django.db import connection
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d=u'1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(django.test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
class DateTimeFieldTests(unittest.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
class BooleanFieldTests(unittest.TestCase):
def _test_get_db_prep_lookup(self, f):
from django.db import connection
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertTrue(f.to_python(1) is True)
self.assertTrue(f.to_python(0) is False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, u'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=True)
self.assertEqual(f.formfield().choices, [('', '---------')] + choices)
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertTrue(isinstance(b2.bfield, bool))
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertTrue(isinstance(b4.bfield, bool))
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertTrue(isinstance(b2.nbfield, bool))
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertTrue(isinstance(b4.nbfield, bool))
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_length': 'LENGTH(string)'})[0]
self.assertFalse(isinstance(b5.pk, bool))
class ChoicesTests(django.test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
class SlugFieldTests(django.test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s = 'slug'*50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug'*50)
class ValidationTest(django.test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1, choices=[('a','A'), ('b','B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a','A'), ('b','B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(choices=(('group',((10,'A'),(20,'B'))),(30,'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertEqual(None, f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class BigIntegerFieldTests(django.test.TestCase):
def test_limits(self):
# Ensure that values that are right at the limits can be saved
# and then retrieved without corruption.
maxval = 9223372036854775807
minval = -maxval - 1
BigInt.objects.create(value=maxval)
qs = BigInt.objects.filter(value__gte=maxval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, maxval)
BigInt.objects.create(value=minval)
qs = BigInt.objects.filter(value__lte=minval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, minval)
def test_types(self):
b = BigInt(value = 0)
self.assertTrue(isinstance(b.value, (int, long)))
b.save()
self.assertTrue(isinstance(b.value, (int, long)))
b = BigInt.objects.all()[0]
self.assertTrue(isinstance(b.value, (int, long)))
def test_coercing(self):
BigInt.objects.create(value ='10')
b = BigInt.objects.get(value = '10')
self.assertEqual(b.value, 10)
class TypeCoercionTests(django.test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEquals(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEquals(Post.objects.filter(body=24).count(), 0)
| |
###unique
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains instructions for recalling operating system file and directory paths,
eliminating redundant list entries, removing unecessary file paths from py2app or py2exe
and reading the propper Ensembl database version to allow for version specific access."""
import sys, string
import os.path, platform
import unique ### Import itself as a reference to it's location
dirfile = unique
py2app_adj = '/GO_Elite.app/Contents/Resources/Python/site-packages.zip'
py2app_adj1 = '/GO_Elite.app/Contents/Resources/lib/python2.4/site-packages.zip'
py2app_adj2 = '/GO_Elite.app/Contents/Resources/lib/python2.5/site-packages.zip'
py2app_adj3 = '/GO_Elite.app/Contents/Resources/lib/python2.6/site-packages.zip'
py2app_adj4 = '/GO_Elite.app/Contents/Resources/lib/python2.7/site-packages.zip'
py2exe_adj = '\\library.zip' ###py2exe
cx_Freeze_adj = '/library.zip'
pyinstaller_adj = '/GO_Elite.app/Contents/MacOS'
py2app_ge_dirs = [py2app_adj,py2exe_adj,py2app_adj1,py2app_adj2,py2app_adj3,py2app_adj4,cx_Freeze_adj,pyinstaller_adj]
py2app_adj = '/AltAnalyze.app/Contents/Resources/Python/site-packages.zip'
py2app_adj1 = '/AltAnalyze.app/Contents/Resources/lib/python2.4/site-packages.zip'
py2app_adj2 = '/AltAnalyze.app/Contents/Resources/lib/python2.5/site-packages.zip'
py2app_adj3 = '/AltAnalyze.app/Contents/Resources/lib/python2.6/site-packages.zip'
py2app_adj4 = '/AltAnalyze.app/Contents/Resources/lib/python2.7/site-packages.zip'
py2exe_adj = '\\library.zip' ###py2exe
cx_Freeze_adj = '/library.zip'
pyinstaller_adj = '/AltAnalyze.app/Contents/MacOS'
pyinstaller_adj2 = '/AltAnalyze.app/Contents/Resources'
py2app_aa_dirs = [py2app_adj,py2app_adj1,py2exe_adj,py2app_adj2,py2app_adj3,py2app_adj4,cx_Freeze_adj,pyinstaller_adj,pyinstaller_adj2]
py2app_dirs = py2app_ge_dirs + py2app_aa_dirs
for i in py2app_aa_dirs:
i = string.replace(i,'AltAnalyze.app','AltAnalyzeViewer.app')
py2app_dirs.append(i)
if ('linux' in sys.platform or 'posix' in sys.platform) and getattr(sys, 'frozen', False): ### For PyInstaller
application_path = os.path.dirname(sys.executable)
#application_path = sys._MEIPASS ### should be the same as the above
else:
application_path = os.path.dirname(__file__)
if 'AltAnalyze?' in application_path:
application_path = string.replace(application_path,'//','/')
application_path = string.replace(application_path,'\\','/') ### If /// present
application_path = string.split(application_path,'AltAnalyze?')[0]
if 'GO_Elite?' in application_path:
application_path = string.replace(application_path,'//','/')
application_path = string.replace(application_path,'\\','/') ### If /// present
application_path = string.split(application_path,'GO_Elite?')[0]
def filepath(filename):
#dir=os.path.dirname(dirfile.__file__) #directory file is input as a variable under the main
dir = application_path
if filename== '': ### Windows will actually recognize '' as the AltAnalyze root in certain situations but not others
fn = dir
elif ':' in filename:
fn = filename
else:
try: dir_list = os.listdir(filename); fn = filename ### test to see if the path can be found (then it is the full path)
except Exception: fn=os.path.join(dir,filename)
if '/Volumes/' in filename: filenames = string.split(filename,'/Volumes/'); fn = '/Volumes/'+filenames[-1]
for py2app_dir in py2app_dirs: fn = string.replace(fn,py2app_dir,'')
if 'Databases' in fn or 'AltDatabase' in fn:
getCurrentGeneDatabaseVersion()
fn = correctGeneDatabaseDir(fn)
fn = string.replace(fn,'.txt.txt','.txt')
fn = string.replace(fn,'//','/')
fn = string.replace(fn,'//','/') ### If /// present
return fn
def read_directory(sub_dir):
dir=application_path
for py2app_dir in py2app_dirs: dir = string.replace(dir,py2app_dir,'')
if 'Databases' in sub_dir or 'AltDatabase' in sub_dir:
getCurrentGeneDatabaseVersion()
sub_dir = correctGeneDatabaseDir(sub_dir)
try: dir_list = os.listdir(dir+sub_dir)
except Exception: dir_list = os.listdir(sub_dir) ### For linux
try: dir_list.remove('.DS_Store') ### This is needed on a mac
except Exception: null=[]
return dir_list
def returnDirectories(sub_dir):
dir=application_path
if 'Databases' in sub_dir or 'AltDatabase' in sub_dir:
getCurrentGeneDatabaseVersion()
sub_dir = correctGeneDatabaseDir(sub_dir)
for py2app_dir in py2app_dirs:
dir = string.replace(dir,py2app_dir,'')
try: dir_list = os.listdir(dir + sub_dir)
except Exception:
try: dir_list = os.listdir(sub_dir) ### For linux
except Exception: print dir, sub_dir; bad_exit
return dir_list
def returnDirectoriesNoReplace(sub_dir):
dir=application_path
for py2app_dir in py2app_dirs:
dir = string.replace(dir,py2app_dir,'')
try: dir_list = os.listdir(dir + sub_dir)
except Exception:
try: dir_list = os.listdir(sub_dir) ### For linux
except Exception: dir_list = os.listdir(sub_dir[1:]) ### For linux
return dir_list
def refDir():
reference_dir=application_path #directory file is input as a variable under the main
for py2app_dir in py2app_dirs:
reference_dir = string.replace(reference_dir,py2app_adj,'')
return reference_dir
def whatProgramIsThis():
reference_dir = refDir()
if 'AltAnalyze' in reference_dir: type = 'AltAnalyze'; database_dir = 'AltDatabase/goelite/'
elif 'GO-Elite' in reference_dir: type = 'GO-Elite'; database_dir = 'Databases/'
else: database_dir = 'AltDatabase/goelite/'; type = 'AltAnalyze'
return type,database_dir
def correctGeneDatabaseDir(fn):
try:
proceed = 'no'
alt_version = 'AltDatabase/'+gene_database_dir
elite_version = 'Databases/'+gene_database_dir
fn=string.replace(fn,'//','/'); fn=string.replace(fn,'\\','/')
if (alt_version not in fn) and (elite_version not in fn): proceed = 'yes' ### If the user creates that contains EnsMart
if gene_database_dir not in fn: proceed = 'yes'
if 'EnsMart' in fn: proceed = 'no'
if proceed == 'yes':
fn = string.replace(fn,'Databases','Databases/'+gene_database_dir)
if 'AltDatabase/affymetrix' not in fn and 'NoVersion' not in fn and 'AltDatabase/primer3' not in fn and 'AltDatabase/TreeView' not in fn and 'AltDatabase/kallisto' not in fn:
if 'AltDatabase' in fn:
fn = string.replace(fn,'AltDatabase','AltDatabase/'+gene_database_dir)
fn = string.replace(fn,'NoVersion','') ### When the text 'NoVersion' is in a filepath, is tells the program to ignore it for adding the database version
except Exception: null = ''
return fn
def getCurrentGeneDatabaseVersion():
global gene_database_dir
try:
filename = 'Config/version.txt'; fn=filepath(filename)
for line in open(fn,'r').readlines():
gene_database_dir, previous_date = string.split(line,'\t')
except Exception: gene_database_dir=''
return gene_database_dir
def unique(s):
#we need to remove duplicates from a list, unsuccessfully tried many different methods
#so I found the below function at: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
n = len(s)
if n == 0: return []
u = {}
try:
for x in s: u[x] = 1
except TypeError: del u # move on to the next method
else: return u.keys()
try: t = list(s); t.sort()
except TypeError: del t # move on to the next method
else:
assert n > 0
last = t[0]; lasti = i = 1
while i < n:
if t[i] != last: t[lasti] = last = t[i]; lasti += 1
i += 1
return t[:lasti]
u = []
for x in s:
if x not in u: u.append(x)
return u
def dictionary(s):
d={}
for i in s:
try: d[i]=[]
except TypeError: d[tuple(i)]=[]
return d
def unique_db(s):
d={}; t=[]
for i in s:
try: d[i]=[]
except TypeError: d[tuple(i)]=[]
for i in d: t.append(i)
return t
def list(d):
t=[]
for i in d: t.append(i)
return t
if __name__ == '__main__':
fn = filepath('/home/nsalomonis/Desktop/GO-Elite_v.1.2.4-Ubuntu-1/GO_Elite?42197/GO-Elite_report-20120512-151332.log')
print fn; sys.exit()
fn = filepath('BuildDBs/Amadeus/symbol-Metazoan-Amadeus.txt')
print fn;sys.exit()
unique_db([1,2,3,4,4,4,5])
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.six.moves import xrange
from likert_field.templatetags.likert_star_tools import render_stars
from likert_field.templatetags.likert_fa_stars import fa_stars3, fa_stars4
from likert_field.templatetags.likert_bs_stars import (
bs_stars2, bs_stars3, bs_stars3_bsr)
class StarToolsTestCase(SimpleTestCase):
def setUp(self):
self.star_set = {
'star': 's',
'unlit': 'u',
'noanswer': 'n'
}
def test_render_stars(self):
max_test_stars = 50
for max_stars in xrange(1, max_test_stars + 1):
for num in xrange(max_stars + 1):
stars = render_stars(num, max_stars, self.star_set)
self.assertEqual(len(stars), max_stars)
self.assertEqual(stars.count(self.star_set['star']), num)
self.assertEqual(
stars.count(self.star_set['unlit']), max_stars - num)
def test_render_stars_none(self):
"""
By design items with no answer are stored as NULL which are converted
to None by the ORM. They are rendered as a ban icon which looks
similar enough to the empty set.
"""
stars = render_stars(None, 5, self.star_set)
self.assertEqual(len(stars), 1)
self.assertEqual(stars.count(self.star_set['noanswer']), 1)
def test_render_stars_blank(self):
"""
If your database is storing numbers as strings you might need this.
Empty strings holding non answered items are rendered as a ban symbol.
"""
stars = render_stars('', 5, self.star_set)
self.assertEqual(len(stars), 1)
self.assertEqual(stars.count(self.star_set['noanswer']), 1)
def test_num_greater_than_max_error(self):
"""
When the number of stars scored exceeds the maximum stars displayed,
just display the maximum stars allowed
"""
num = 4
max_stars = 3
self.assertTrue(num > max_stars)
stars = render_stars(4, 3, self.star_set)
self.assertEqual(len(stars), max_stars)
self.assertEqual(stars.count(self.star_set['star']), max_stars)
self.assertEqual(stars.count(self.star_set['unlit']), 0)
def test_render_string_numbers(self):
"""
String representations of integers are rendered in the usual manner
"""
max_test_stars = 50
for max_stars in xrange(1, max_test_stars + 1):
for num in xrange(max_stars + 1):
num = str(num)
max_stars = str(max_stars)
stars = render_stars(num, max_stars, self.star_set)
self.assertEqual(len(stars), int(max_stars))
self.assertEqual(stars.count(self.star_set['star']), int(num))
self.assertEqual(
stars.count(self.star_set['unlit']),
int(max_stars) - int(num))
class BootstrapTestCase(SimpleTestCase):
def test_bs_stars2_render(self):
num = 3
expected_stars = (
"<i class='icon-star likert-star'></i>"
"<i class='icon-star likert-star'></i>"
"<i class='icon-star likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>")
stars = bs_stars2(num)
self.assertEqual(stars, expected_stars)
def test_bs_stars2_render_parameters(self):
num = 1
max_stars = 3
expected_stars = (
"<i class='icon-star likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>")
stars = bs_stars2(num, max_stars)
self.assertEqual(stars, expected_stars)
def test_bs_stars2_render_noanswer(self):
num = None
expected_stars = "<i class='icon-ban-circle likert-star'></i>"
stars = bs_stars2(num)
self.assertEqual(stars, expected_stars)
def test_bs_stars3_render(self):
num = 1
expected_stars = (
"<i class='glyphicon glyphicon-star likert-star'></i>"
"<i class='glyphicon glyphicon-star-empty likert-star'></i>"
"<i class='glyphicon glyphicon-star-empty likert-star'></i>"
"<i class='glyphicon glyphicon-star-empty likert-star'></i>"
"<i class='glyphicon glyphicon-star-empty likert-star'></i>")
stars = bs_stars3(num)
self.assertEqual(stars, expected_stars)
def test_bs_stars3_render_parameters(self):
num = 1
max_stars = 2
expected_stars = (
"<i class='glyphicon glyphicon-star likert-star'></i>"
"<i class='glyphicon glyphicon-star-empty likert-star'></i>")
stars = bs_stars3(num, max_stars)
self.assertEqual(stars, expected_stars)
def test_bs_stars3_render_noanswer(self):
num = None
expected_stars = (
"<i class='glyphicon glyphicon-ban-circle likert-star'></i>")
stars = bs_stars3(num)
self.assertEqual(stars, expected_stars)
def test_bs_stars3_bsr_render_noanswer(self):
"""The BSR variant is the same as bs_stars3 except NULL handler"""
num = None
expected_stars = (
"<i class='glyphicon glyphicon-minus-sign likert-star'></i>")
stars = bs_stars3_bsr(num)
self.assertEqual(stars, expected_stars)
class FontAwesomeTestCase(SimpleTestCase):
def test_fa_stars3_render(self):
num = 2
expected_stars = (
"<i class='icon-star likert-star'></i>"
"<i class='icon-star likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>")
stars = fa_stars3(num)
self.assertEqual(stars, expected_stars)
def test_fa_stars3_render_parameters(self):
num = 1
max_stars = 7
expected_stars = (
"<i class='icon-star likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>"
"<i class='icon-star-empty likert-star'></i>")
stars = fa_stars3(num, max_stars)
self.assertEqual(stars, expected_stars)
def test_fa_stars3_render_noanswer(self):
num = None
expected_stars = "<i class='icon-ban-circle likert-star'></i>"
stars = fa_stars3(num)
self.assertEqual(stars, expected_stars)
def test_fa_stars4_render(self):
num = 2
expected_stars = (
"<i class='fa fa-star likert-star'></i>"
"<i class='fa fa-star likert-star'></i>"
"<i class='fa fa-star-o likert-star'></i>"
"<i class='fa fa-star-o likert-star'></i>"
"<i class='fa fa-star-o likert-star'></i>")
stars = fa_stars4(num)
self.assertEqual(stars, expected_stars)
def test_fa_stars4_render_parameters(self):
num = 1
max_stars = 4
expected_stars = (
"<i class='fa fa-star likert-star'></i>"
"<i class='fa fa-star-o likert-star'></i>"
"<i class='fa fa-star-o likert-star'></i>"
"<i class='fa fa-star-o likert-star'></i>")
stars = fa_stars4(num, max_stars)
self.assertEqual(stars, expected_stars)
def test_fa_stars4_render_noanswer(self):
num = None
expected_stars = "<i class='fa fa-ban likert-star'></i>"
stars = fa_stars4(num)
self.assertEqual(stars, expected_stars)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def get(
self, resource_group_name, virtual_network_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetwork or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_03_01.models.VirtualNetwork or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetwork')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual
network operation
:type parameters:
~azure.mgmt.network.v2017_03_01.models.VirtualNetwork
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetwork or
ClientRawResponse<VirtualNetwork> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_03_01.models.VirtualNetwork]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_03_01.models.VirtualNetwork]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2017_03_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2017_03_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2017_03_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2017_03_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'}
def check_ip_address_availability(
self, resource_group_name, virtual_network_name, ip_address=None, custom_headers=None, raw=False, **operation_config):
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: IPAddressAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2017_03_01.models.IPAddressAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.check_ip_address_availability.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if ip_address is not None:
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IPAddressAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'}
def list_usage(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkUsage
:rtype:
~azure.mgmt.network.v2017_03_01.models.VirtualNetworkUsagePaged[~azure.mgmt.network.v2017_03_01.models.VirtualNetworkUsage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_usage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkUsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkUsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'}
| |
# -*- coding: utf-8 -*-
from flask import current_app as app, g, request, url_for
from flask.ext.restful import abort, fields, marshal
from werkzeug.exceptions import NotFound
from shiva.constants import HTTP
from shiva.exceptions import (InvalidFileTypeError, IntegrityError,
ObjectExistsError)
from shiva.http import Resource
from shiva.models import Album, Artist, db, Track, User, Playlist
from shiva.resources.fields import (ForeignKeyField, InstanceURI, TrackFiles,
ManyToManyField, PlaylistField)
from shiva.utils import parse_bool, get_list, get_by_name
class ArtistResource(Resource):
""" The resource responsible for artists. """
db_model = Artist
def get_resource_fields(self):
return {
'id': fields.String(attribute='pk'),
'name': fields.String,
'slug': fields.String,
'uri': InstanceURI('artists'),
'image': fields.String(default=app.config['DEFAULT_ARTIST_IMAGE']),
'events_uri': fields.String(attribute='events'),
}
def post(self):
name = request.form.get('name', '').strip()
if not name:
abort(HTTP.BAD_REQUEST)
image_url = request.form.get('image_url')
try:
artist = self.create(name, image_url)
except (IntegrityError, ObjectExistsError):
abort(HTTP.CONFLICT)
response = marshal(artist, self.get_resource_fields())
headers = {'Location': url_for('artists', id=artist.pk)}
return response, 201, headers
def create(self, name, image_url):
artist = Artist(name=name, image=image_url)
db.session.add(artist)
db.session.commit()
return artist
def update(self, artist):
if 'name' in request.form:
name = request.form.get('name', '').strip()
if not name:
abort(HTTP.BAD_REQUEST)
artist.name = name
if 'image' in request.form:
artist.image = request.form.get('image_url')
return artist
def get_full_tree(self, artist):
_artist = marshal(artist, self.get_resource_fields())
_artist['albums'] = []
albums = AlbumResource()
for album in artist.albums:
_artist['albums'].append(albums.get_full_tree(album))
no_album = artist.tracks.filter_by(albums=None).all()
track_fields = TrackResource().get_resource_fields()
_artist['no_album_tracks'] = marshal(no_album, track_fields)
return _artist
class AlbumResource(Resource):
""" The resource responsible for albums. """
db_model = Album
def get_resource_fields(self):
return {
'id': fields.String(attribute='pk'),
'name': fields.String,
'slug': fields.String,
'year': fields.Integer,
'uri': InstanceURI('albums'),
'artists': ManyToManyField(Artist, {
'id': fields.String(attribute='pk'),
'uri': InstanceURI('artists'),
}),
'cover': fields.String(default=app.config['DEFAULT_ALBUM_COVER']),
}
def post(self):
params = {
'name': request.form.get('name', '').strip(),
'year': request.form.get('year'),
'cover_url': request.form.get('cover_url'),
}
if not params['name']:
abort(HTTP.BAD_REQUEST)
album = self.create(**params)
response = marshal(album, self.get_resource_fields())
headers = {'Location': url_for('albums', id=album.pk)}
return response, 201, headers
def create(self, name, year, cover_url):
album = Album(name=name, year=year, cover=cover_url)
db.session.add(album)
db.session.commit()
return album
def update(self, album):
"""
Updates an album object with the given attributes. The `artists`
attribute, however, is treated as a calculated value so it cannot be
set through a PUT request. It has to be done through the Track model.
"""
if 'name' in request.form:
name = request.form.get('name', '').strip()
if not name:
abort(HTTP.BAD_REQUEST)
album.name = request.form.get('name')
if 'year' in request.form:
album.year = request.form.get('year')
if 'cover_url' in request.form:
album.cover = request.form.get('cover_url')
return album
def get_filters(self):
return (
('artist', 'artist_filter'),
)
def artist_filter(self, queryset, artist_pk):
try:
pk = artist_pk if int(artist_pk) > 0 else None
except ValueError:
abort(HTTP.BAD_REQUEST)
return queryset.join(Album.artists).filter(Artist.pk == pk)
def get_full_tree(self, album):
_album = marshal(album, self.get_resource_fields())
_album['tracks'] = []
tracks = TrackResource()
for track in album.tracks.order_by(Track.ordinal, Track.title):
_album['tracks'].append(tracks.get_full_tree(track))
return _album
class TrackResource(Resource):
""" The resource responsible for tracks. """
db_model = Track
def get_resource_fields(self):
return {
'id': fields.String(attribute='pk'),
'uri': InstanceURI('tracks'),
'files': TrackFiles,
'bitrate': fields.Integer,
'length': fields.Integer,
'title': fields.String,
'slug': fields.String,
'artists': ManyToManyField(Artist, {
'id': fields.String(attribute='pk'),
'uri': InstanceURI('artists'),
}),
'albums': ManyToManyField(Album, {
'id': fields.String(attribute='pk'),
'uri': InstanceURI('albums'),
}),
'ordinal': fields.Integer,
}
def post(self):
params = {
'title': request.form.get('title', '').strip(),
'artists': request.form.getlist('artist_id'),
'albums': request.form.getlist('album_id'),
'ordinal': request.form.get('ordinal'),
}
if 'track' not in request.files:
abort(HTTP.BAD_REQUEST)
try:
track = self.create(**params)
except (IntegrityError, ObjectExistsError):
abort(HTTP.CONFLICT)
response = marshal(track, self.get_resource_fields())
headers = {'Location': url_for('tracks', id=track.pk)}
return response, 201, headers
def create(self, title, artists, albums, ordinal):
UploadHandler = app.config.get('UPLOAD_HANDLER')
try:
handler = UploadHandler(track=request.files.get('track'))
except InvalidFileTypeError, e:
abort(HTTP.UNSUPPORTED_MEDIA_TYPE)
handler.save()
hash_file = parse_bool(request.args.get('hash_file', True))
no_metadata = parse_bool(request.args.get('no_metadata', False))
track = Track(path=handler.path, hash_file=hash_file,
no_metadata=no_metadata)
db.session.add(track)
# If an artist (or album) is given as argument, it will take precedence
# over whatever the file's metadata say.
artist_list = []
if artists:
try:
artist_list.extend(get_list(Artist, artists))
except ValueError:
abort(HTTP.BAD_REQUEST)
else:
if handler.artist:
artist_list.append(get_by_name(Artist, handler.artist))
album_list = []
if albums:
try:
album_list.extend(get_list(Album, albums))
except ValueError:
abort(HTTP.BAD_REQUEST)
else:
if handler.album:
artist_list.append(get_by_name(Album, handler.album))
for artist in artist_list:
db.session.add(artist)
artist.tracks.append(track)
for album in album_list:
db.session.add(album)
album.tracks.append(track)
db.session.commit()
return track
def update(self, track):
track.title = request.form.get('title')
track.ordinal = request.form.get('ordinal')
# The track attribute cannot be updated. A new track has to be created
# with the new value instead.
if 'track' in request.form:
abort(HTTP.BAD_REQUEST)
for artist_pk in request.form.getlist('artist_id'):
try:
artist = Artist.query.get(artist_pk)
track.artists.append(artist)
except:
pass
for album_pk in request.form.getlist('album_id'):
try:
album = Album.query.get(album_pk)
track.albums.append(album)
except:
pass
return track
def get_filters(self):
return (
('artist', 'artist_filter'),
('album', 'album_filter'),
)
def artist_filter(self, queryset, artist_pk):
try:
pk = artist_pk if int(artist_pk) > 0 else None
except ValueError:
abort(HTTP.BAD_REQUEST)
return queryset.filter(Track.artist_pk == pk)
def album_filter(self, queryset, album_pk):
try:
pk = album_pk if int(album_pk) > 0 else None
except ValueError:
abort(HTTP.BAD_REQUEST)
return queryset.filter_by(album_pk=pk)
def get_full_tree(self, track, include_scraped=False,
include_related=True):
"""
Retrives the full tree for a track. If the include_related option is
not set then a normal track structure will be retrieved. If its set
external resources that need to be scraped, like lyrics, will also be
included. Also related objects like artist and album will be expanded
to provide all their respective information.
This is disabled by default to avois DoS'ing lyrics' websites when
requesting many tracks at once.
"""
resource_fields = self.get_resource_fields()
if include_related:
artist = ArtistResource()
resource_fields['artists'] = ManyToManyField(
Artist,
artist.get_resource_fields())
album = AlbumResource()
resource_fields['albums'] = ManyToManyField(
Album,
album.get_resource_fields())
_track = marshal(track, resource_fields)
if include_scraped:
lyrics = LyricsResource()
try:
_track['lyrics'] = lyrics.get_for(track)
except NotFound:
_track['lyrics'] = None
# tabs = TabsResource()
# _track['tabs'] = tabs.get()
return _track
class PlaylistResource(Resource):
"""
Playlist are just a logical collection of tracks. Tracks must not be
necessarily related between them in any way.
To access a user's playlists filter by user id:
/playlists?user_id=6
"""
db_model = Playlist
def get_resource_fields(self):
return {
'id': fields.String(attribute='pk'),
'name': fields.String,
'user': ForeignKeyField(User, {
'id': fields.String(attribute='pk'),
'uri': InstanceURI('users'),
}),
'read_only': fields.Boolean,
'creation_date': fields.DateTime,
'length': fields.Integer,
'tracks': PlaylistField({
'id': fields.String(attribute='pk'),
'uri': InstanceURI('tracks'),
}),
}
def post(self):
if g.user is None:
abort(HTTP.BAD_REQUEST)
name = request.form.get('name', '').strip()
if not name:
abort(HTTP.BAD_REQUEST)
read_only = request.form.get('read_only', True)
playlist = self.create(name=name, read_only=read_only, user=g.user)
response = marshal(playlist, self.get_resource_fields())
headers = {'Location': url_for('playlists', id=playlist.pk)}
return response, 201, headers
def create(self, name, read_only, user):
playlist = Playlist(name=name, read_only=read_only, user=user)
db.session.add(playlist)
db.session.commit()
return playlist
def update(self, playlist):
if 'name' in request.form:
playlist.name = request.form.get('name')
if 'read_only' in request.form:
playlists.read_only = parse_bool(request.form.get('read_only'))
return playlist
class PlaylistTrackResource(Resource):
def post(self, id, verb):
handler = getattr(self, '%s_track' % verb)
if not handler:
abort(HTTP.BAD_REQUEST)
playlist = self.get_playlist(id)
if not playlist:
abort(HTTP.NOT_FOUND)
return handler(playlist)
def add_track(self, playlist):
if 'track' not in request.form:
abort(HTTP.BAD_REQUEST)
track = self.get_track(request.form.get('track'))
if not track:
abort(HTTP.BAD_REQUEST)
try:
playlist.insert(request.form.get('index'), track)
except ValueError:
abort(HTTP.BAD_REQUEST)
return self.Response('')
def remove_track(self, playlist):
if 'index' not in request.form:
abort(HTTP.BAD_REQUEST)
try:
playlist.remove_at(request.form.get('index'))
except (ValueError, IndexError):
abort(HTTP.BAD_REQUEST)
return self.Response('')
def get_playlist(self, playlist_id):
try:
playlist = Playlist.query.get(playlist_id)
except:
playlist = None
return playlist
def get_track(self, track_id):
try:
track = Track.query.get(track_id)
except:
track = None
return track
class UserResource(Resource):
""" The resource responsible for users. """
db_model = User
def get_resource_fields(self):
return {
'id': fields.String(attribute='pk'),
'display_name': fields.String,
'creation_date': fields.DateTime,
}
def get(self, id=None):
if id == 'me':
return marshal(g.user, self.get_resource_fields())
return super(UserResource, self).get(id)
def get_all(self):
return self.db_model.query.filter_by(is_public=True)
def post(self, id=None):
if id == 'me':
abort(HTTP.METHOD_NOT_ALLOWED)
if g.user is None:
abort(HTTP.METHOD_NOT_ALLOWED)
email = request.form.get('email')
if not email:
abort(HTTP.BAD_REQUEST)
display_name = request.form.get('display_name')
is_active = False
password = request.form.get('password')
if password:
is_active = parse_bool(request.form.get('is_active', False))
# FIXME: Check permissions
is_admin = parse_bool(request.form.get('admin', False))
try:
user = self.create(display_name=display_name, email=email,
password=password, is_active=is_active,
is_admin=is_admin)
except (IntegrityError, ObjectExistsError):
abort(HTTP.CONFLICT)
response = marshal(user, self.get_resource_fields())
headers = {'Location': url_for('users', id=user.pk)}
return response, 201, headers
def create(self, display_name, email, password, is_active, is_admin):
user = User(display_name=display_name, email=email, password=password,
is_active=is_active, is_admin=is_admin)
db.session.add(user)
db.session.commit()
return user
def put(self, id=None):
if id == 'me':
abort(HTTP.METHOD_NOT_ALLOWED)
return super(UserResource, self).put(id)
def update(self, user):
if 'email' in request.form:
email = request.form.get('email', '').strip()
if not email:
abort(HTTP.BAD_REQUEST)
user.email = email
if 'display_name' in request.form:
user.display_name = request.form.get('display_name')
if 'password' in request.form:
user.password = request.form.get('password')
if user.password == '':
user.is_active = False
else:
if 'is_active' in request.form:
user.is_active = parse_bool(request.form.get('is_active'))
if 'is_admin' in request.form:
user.is_admin = parse_bool(request.form.get('is_admin'))
return user
def delete(self, id=None):
if id == 'me':
abort(HTTP.METHOD_NOT_ALLOWED)
return super(UserResource, self).delete(id)
| |
#######################################
### Cleverly Beacon Backend ###
#######################################
import webapp2
from google.appengine.ext import db
import json
import os
from google.appengine.ext.webapp import template
TRIGGER_IMMEDIATE = 0
TRIGGER_NEAR = 1
TRIGGER_FAR = 2
LINK_IMAGE = 0
LINK_VIDEO = 1
LINK_ADVERT = 2
LINK_COUPON = 3
def render_template(self,template_name,template_values):
path = os.path.join(os.path.dirname(__file__), template_name)
self.response.out.write(template.render(path+'.html', template_values))
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Beacon Dev Backend')
class Beacon(db.Model):
nickname = db.StringProperty()
beaconuuid = db.StringProperty()
groupids = db.ListProperty(int)
description = db.TextProperty()
class Group(db.Model):
nickname = db.StringProperty()
triggerids = db.ListProperty(int)
description = db.TextProperty()
class Trigger(db.Model):
nickname = db.StringProperty()
linktype = db.IntegerProperty() # 0 for image, 1 for video, 2 for advert, 3 for coupon
description = db.StringProperty()
triggerlink = db.StringProperty()
triggerwhen = db.IntegerProperty() # 0 for immediate, 1 for near and 2 for far
class SingleBeacon(webapp2.RequestHandler):
def get(self, key):
if key != 'add':
beacon = Beacon.get_by_id(int(key))
if beacon:
beaconjson = {"valid":True,"nickname":beacon.nickname,"id":key,"notnew":True,"uuid":beacon.beaconuuid}
groups = []
for group in Group.all():
groupjson = {"nickname":group.nickname,"id":group.key().id()}
if group.key().id() in beacon.groupids:
groupjson.update({"valid":True})
else:
groupjson.update({"valid":False})
groups.append(groupjson)
beaconjson.update({"groups":groups})
else:
beaconjson = {"valid":True,"nickname":None,"groups":None,"notnew":False}
else:
beaconjson = {"valid":True,"nickname":None,"groups":None,"notnew":False}
final = {"beacon":beaconjson}
render_template(self,"single_beacon",final)
#self.response.write(beaconjson)
class SingleGroup(webapp2.RequestHandler):
def get(self, key):
if key != 'add':
group = Group.get_by_id(int(key))
if group:
groupjson = {"id":key,"nickname":group.nickname,"valid":True,"notnew":True}
triggers = []
for trigger in Trigger.all():
triggerjson = {"nickname":trigger.nickname,"id":trigger.key().id()}
if trigger.key().id() in group.triggerids:
triggerjson.update({"valid":True})
else:
triggerjson.update({"valid":False})
triggers.append(triggerjson)
groupjson.update({"triggers":triggers})
else:
groupjson = {"nickname":None,"triggers":None,"notnew":False,"valid":True}
else:
groupjson = {"nickname":None,"triggers":None,"notnew":False,"valid":True}
final = {"group":groupjson}
render_template(self,"single_group",final)
class SingleTrigger(webapp2.RequestHandler):
def get(self, key):
if key !='add':
trigger = Trigger.get_by_id(int(key))
if trigger:
triggerjson = {"id":key,"nickname":trigger.nickname,"linktype":trigger.linktype,"triggerlink":trigger.triggerlink,"valid":True,"notnew":True}
groups = []
for group in Group.all():
groupjson = {"nickname":group.nickname,"id":group.key().id()}
if key in group.triggerids:
groupjson.update({"valid":True})
else:
groupjson.update({"valid":False})
groups.append(groupjson)
triggerjson.update({"groups":groups})
else:
triggerjson = {"nickname":None,"groups":None,"notnew":False,"valid":True}
else:
triggerjson = {"nickname":None,"groups":None,"notnew":False,"valid":True}
final = {"trigger":triggerjson}
render_template(self,"single_trigger",final)
class ListBeacons(webapp2.RequestHandler):
def get(self):
beacons = Beacon.all()
beacondetails = []
triggers=[]
for beacon in beacons:
valuepair = {}
valuepair.update({'nickname':beacon.nickname})
valuepair.update({'beaconuuid':beacon.beaconuuid})
gs=beacon.groupids
groups=[]
for g in gs:
group={}
gdb=Group.get_by_id(int(g))
group.update({'nickname':gdb.nickname})
group.update({'id':g})
for t in gdb.triggerids:
tdb=Trigger.get_by_id(int(t))
trig={}
trig.update({'nickname':tdb.nickname,'id':t})
triggers.append(trig)
groups.append(group)
valuepair.update({'groups':groups})
valuepair.update({'triggers':triggers})
valuepair.update({'description':beacon.description})
beacondetails.append(valuepair)
final = {}
final.update ({'beacons':beacondetails})
render_template(self,'manage_beacons',final)
class AddBeacon(webapp2.RequestHandler):
def post(self):
beaconid = self.request.get("beaconid")
nickname = self.request.get("nickname")
beaconuuid= self.request.get("beaconuuid")
groupid = self.request.get_all("groupid")
groupids=[]
for g in groupid:
groupids.append(int(g))
description = self.request.get("description")
try:
keyid=int(beaconid)
beacon = Beacon.get_by_id(keyid)
except Exception, e:
beacon = None
if beacon == None:
beacon = Beacon(nickname = nickname, beaconuuid = beaconuuid, groupids = groupids, description = description)
beacon.put()
else:
beacon.nickname=nickname
beacon.beaconuuid=beaconuuid
beacon.groupids=groupids
beacon.description=description
beacon.put()
self.redirect("/beacons")
class ListGroups(webapp2.RequestHandler):
def get(self):
groups = Group.all()
groupdetails=[]
for group in groups:
valuepair={}
valuepair.update({'nickname':group.nickname,'description':group.description,'groupid':group.key().id()})
beacons = Beacon.all()
grpbea = []
for beacon in beacons:
if group.key().id() in beacon.groupids:
beavp = {}
beavp.update({'id':beacon.key().id(),'nickname':beacon.nickname})
grpbea.append(beavp)
grptri=[]
for grtri in group.triggerids:
trigger = Trigger.get_by_id(grtri)
trivp={}
trivp.update({'id':grtri,'nickname':trigger.nickname})
grptri.append(trivp)
valuepair.update({'beacons':grpbea,'triggers':grptri})
groupdetails.append(valuepair)
final = {'groups':groupdetails}
render_template(self,'manage_groups',final)
class AddGroup(webapp2.RequestHandler):
def post(self):
groupid = self.request.get("groupid")
nickname = self.request.get("nickname")
ts = self.request.get_all("triggerid")
beaconids=[]
triggerids=[]
for t in ts:
triggerids.append(int(t))
description = self.request.get("description")
try:
keyid=int(groupid)
group = Group.get_by_id(keyid)
except Exception, e:
group = None
if group == None:
group = Group(nickname = nickname, description = description, triggerids=triggerids)
group.put()
else:
group.nickname=nickname
group.triggerids=triggerids
group.description=description
group.put()
self.redirect("/groups")
class ListTriggers(webapp2.RequestHandler):
def get(self):
triggers = Trigger.all()
triggerdetails = []
for trigger in triggers:
valuepair = {}
valuepair.update({'id':trigger.key().id(),'description':trigger.description,'nickname':trigger.nickname})
groups = Group.all()
trigrp = []
for group in groups:
if trigger.key().id() in group.triggerids:
grpvp = {'id':group.key().id(),'nickname':group.nickname}
trigrp.append(grpvp)
tribea = []
beacons = Beacon.all()
for beacon in beacons:
for groupid in beacon.groupids:
if trigger.key().id() in Group.get_by_id(groupid).triggerids:
beavp = {'id':beacon.key().id(),'nickname':beacon.nickname}
tribea.append(beavp)
valuepair.update({'groups':trigrp,'beacons':tribea})
triggerdetails.append(valuepair)
final={'triggers':triggerdetails}
render_template(self,'manage_triggers',final)
class AddTrigger(webapp2.RequestHandler):
def post(self):
triggerid = self.request.get("triggerid")
nickname = self.request.get("nickname")
description = self.request.get("description")
trigtype = self.request.get("linktype")
triglink = self.request.get("link")
trigwhen = self.request.get("triggerwhen")
try:
keyid=int(triggerid)
trigger = Trigger.get_by_id(keyid)
except Exception, e:
trigger = None
if trigger == None:
trigger = Trigger(nickname = nickname, description = description, triggerlink=triglink, linktype=int(trigtype), triggerwhen = int(trigwhen))
trigger.put()
else:
trigger.nickname=nickname
trigger.description=description
trigger.triggertype=trigtype
trigger.triggerlink=triglink
trigger.triggerwhen=trigwhen
trigger.put()
self.redirect("/triggers")
class DumpData(webapp2.RequestHandler):
def get(self):
beacons = []
for beacon in Beacon.all():
beajson = {"id":beacon.beaconuuid,"groupids":beacon.groupids}
beacons.append(beajson)
groups = []
for group in Group.all():
groupjson = {"id":beacon.key().id(),"triggerids":group.triggerids}
groups.append(groupjson)
triggers = []
for trigger in Trigger.all():
trigjson = {"id":trigger.key().id(),"linktype":trigger.linktype,"triggerlink":trigger.triggerlink,"triggerwhen":trigger.triggerwhen}
triggers.append(trigjson)
data = {"beacons":beacons,"groups":groups,"triggers":triggers}
self.response.write(data)
class TestTemplate(webapp2.RequestHandler):
def get(self):
values={}
groups=[{'id':'1213','nickname':'Fiction','valid':False},{'id':'1431','name':'Entrance'}]
triggers=[{'id':'1213','nickname':'Discount'},{'id':'1213','name':'New Dan Brown'}]
beacons=[{'nickname':'Awesome Beacon #1','description':'Some description here','groups':groups,'grouptriggers':triggers},
{'nickname':'Awesome Beacon #2','description':'Some description here','groups':triggers,'grouptriggers':groups}]
#values.update({
# 'beacons':beacons
# })
beacon={'valid':True,'notnew':True, 'nickname':'beacon 1','uuid':'1213123','groups':groups,'id':5136918324969472}
values={'beacon':beacon}
render_template(self,'single_beacon',values)
app = webapp2.WSGIApplication([
('/', MainHandler),
('/addtrigger',AddTrigger),
('/addgroup',AddGroup),
('/addbeacon',AddBeacon),
('/beacons/(.*)',SingleBeacon),
('/groups/(.*)',SingleGroup),
('/triggers/(.*)',SingleTrigger),
('/beacons',ListBeacons),
('/groups',ListGroups),
('/triggers',ListTriggers),
('/test',TestTemplate),
('/api/all',DumpData)
], debug=True)
| |
from __future__ import absolute_import
from __future__ import print_function
import argparse
from mimic3models import parse_utils
import json
import numpy as np
def check_decreasing(a, k, eps):
if k >= len(a):
return False
pos = len(a) - 1
for i in range(k):
if a[pos] > a[pos - 1] + eps:
return False
pos -= 1
return True
def process_single(filename, verbose, select):
if verbose:
print("Processing log file: {}".format(filename))
with open(filename, 'r') as fin:
log = fin.read()
task = parse_utils.parse_task(log)
if task is None:
print("Task is not detected: {}".format(filename))
return None
if verbose:
print("\ttask = {}".format(task))
if task == 'multitask' or task == 'pheno':
metric = 'ave_auc_macro'
elif task == 'ihm' or task == 'decomp':
metric = 'AUC of ROC'
elif task == 'los':
metric = 'Cohen kappa score'
else:
assert False
train_metrics, val_metrics = parse_utils.parse_metrics(log, metric)
if len(train_metrics) == 0:
print("Less than one epoch: {}".format(filename))
return None
last_train = train_metrics[-1]
last_val = val_metrics[-1]
if verbose:
print("\tlast train = {}, last val = {}".format(last_train, last_val))
rerun = True
if task == 'ihm':
if last_val < 0.83 and last_train > 0.88:
rerun = False
if last_val < 0.84 and last_train > 0.89:
rerun = False
if last_val < 0.85 and last_train > 0.9:
rerun = False
elif task == 'decomp':
if last_val < 0.85 and last_train > 0.89:
rerun = False
if last_val < 0.87 and last_train > 0.9:
rerun = False
if last_val < 0.88 and last_train > 0.92:
rerun = False
elif task == 'pheno' or task == 'multitask':
if last_val < 0.75 and last_train > 0.77:
rerun = False
if last_val < 0.76 and last_train > 0.79:
rerun = False
elif task == 'los':
if last_val < 0.35 and last_train > 0.42:
rerun = False
if last_val < 0.38 and last_train > 0.44:
rerun = False
else:
assert False
# check if val_metrics is decreasing
if task in ['ihm', 'decomp', 'pheno', 'multitask']:
n_decreases = 3
else: # 'los'
n_decreases = 5
if check_decreasing(val_metrics, n_decreases, 0.001):
rerun = False
# check if maximum value for validation was very early
if task in ['ihm', 'decomp', 'pheno', 'multitask']:
tol = 0.01
else: # 'los'
tol = 0.03
val_max = max(val_metrics)
val_max_pos = np.argmax(val_metrics)
if len(val_metrics) - val_max_pos >= 8 and val_max - last_val > tol:
rerun = False
if not select:
rerun = True
if verbose:
print("\trerun = {}".format(rerun))
if not rerun:
return None
# need to rerun
last_state = parse_utils.parse_last_state(log)
if last_state is None:
print("Last state is not parsed: {}".format(filename))
return None
n_epochs = parse_utils.parse_epoch(last_state)
if verbose:
print("\tlast state = {}".format(last_state))
network = parse_utils.parse_network(log)
prefix = parse_utils.parse_prefix(log)
if prefix == '':
prefix = 'r2'
elif not str.isdigit(prefix[-1]):
prefix += '2'
else:
prefix = prefix[:-1] + str(int(prefix[-1]) + 1)
dim = parse_utils.parse_dim(log)
size_coef = parse_utils.parse_size_coef(log)
depth = parse_utils.parse_depth(log)
ihm_C = parse_utils.parse_ihm_C(log)
decomp_C = parse_utils.parse_decomp_C(log)
los_C = parse_utils.parse_los_C(log)
pheno_C = parse_utils.parse_pheno_C(log)
dropout = parse_utils.parse_dropout(log)
partition = parse_utils.parse_partition(log)
deep_supervision = parse_utils.parse_deep_supervision(log)
target_repl_coef = parse_utils.parse_target_repl_coef(log)
batch_size = parse_utils.parse_batch_size(log)
command = "python -u main.py --network {} --prefix {} --dim {}"\
" --depth {} --epochs 100 --batch_size {} --timestep 1.0"\
" --load_state {}".format(network, prefix, dim, depth, batch_size, last_state)
if network.find('channel') != -1:
command += ' --size_coef {}'.format(size_coef)
if ihm_C:
command += ' --ihm_C {}'.format(ihm_C)
if decomp_C:
command += ' --decomp_C {}'.format(decomp_C)
if los_C:
command += ' --los_C {}'.format(los_C)
if pheno_C:
command += ' --pheno_C {}'.format(pheno_C)
if dropout > 0.0:
command += ' --dropout {}'.format(dropout)
if partition:
command += ' --partition {}'.format(partition)
if deep_supervision:
command += ' --deep_supervision'
if (target_repl_coef is not None) and target_repl_coef > 0.0:
command += ' --target_repl_coef {}'.format(target_repl_coef)
return {"command": command,
"train_max": np.max(train_metrics),
"train_max_pos": np.argmax(train_metrics),
"val_max": np.max(val_metrics),
"val_max_pos": np.argmax(val_metrics),
"last_train": last_train,
"last_val": last_val,
"n_epochs": n_epochs,
"filename": filename}
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('logs', type=str, nargs='+')
argparser.add_argument('--verbose', type=int, default=0)
argparser.add_argument('--select', dest='select', action='store_true')
argparser.add_argument('--no-select', dest='select', action='store_false')
argparser.set_defaults(select=True)
args = argparser.parse_args()
if not isinstance(args.logs, list):
args.logs = [args.logs]
rerun = []
for log in args.logs:
if log.find(".log") == -1: # not a log file or is a not renamed log file
continue
ret = process_single(log, args.verbose, args.select)
if ret:
rerun += [ret]
rerun = sorted(rerun, key=lambda x: x["last_val"], reverse=True)
print("Need to rerun {} / {} models".format(len(rerun), len(args.logs)))
print("Saving the results in rerun_output.json")
with open("rerun_output.json", 'w') as fout:
json.dump(rerun, fout)
print("Saving commands in rerun_commands.sh")
with open("rerun.sh", 'w') as fout:
for a in rerun:
fout.write(a['command'] + '\n')
print("Saving filenames in rerun_filenames.txt")
with open("rerun_filenames.txt", 'w') as fout:
for a in rerun:
fout.write(a['filename'] + '\n')
if __name__ == '__main__':
main()
| |
from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import sys
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _DEFAULT_CA_CERTS
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.web import RequestHandler, Application, asynchronous, url
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "7")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
io_loop2 = IOLoop()
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_DEFAULT_CA_CERTS).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
response = self.fetch('/trigger?wake=false', request_timeout=0.1)
self.assertEqual(response.code, 599)
self.assertTrue(0.099 < response.request_time < 0.15, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
def test_ipv6(self):
try:
self.http_server.listen(self.get_http_port(), address='::1')
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = self.get_url("/hello").replace("localhost", "[::1]")
# ipv6 is currently disabled by default and must be explicitly requested
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop, allow_ipv6=True)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def test_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
server_socket, port = bind_unused_port()
server_socket.close()
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://localhost:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
self.assertTrue(str(errno.ECONNREFUSED) in str(response.error),
response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
self.assertEqual(res.body, b'A')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
| |
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import types as sqltypes
from sqlalchemy import schema
from ..util.compat import string_types
from .. import util
from .impl import DefaultImpl
from .base import ColumnNullable, ColumnName, ColumnDefault, \
ColumnType, AlterColumn, format_column_name, \
format_server_default
from .base import alter_table
from ..autogenerate import compare
class MySQLImpl(DefaultImpl):
__dialect__ = 'mysql'
transactional_ddl = False
def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
type_=None,
schema=None,
existing_type=None,
existing_server_default=None,
existing_nullable=None,
autoincrement=None,
existing_autoincrement=None,
**kw
):
if name is not None:
self._exec(
MySQLChangeColumn(
table_name, column_name,
schema=schema,
newname=name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif nullable is not None or \
type_ is not None or \
autoincrement is not None:
self._exec(
MySQLModifyColumn(
table_name, column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif server_default is not False:
self._exec(
MySQLAlterDefault(
table_name, column_name, server_default,
schema=schema,
)
)
def compare_server_default(self, inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default):
# partially a workaround for SQLAlchemy issue #3023; if the
# column were created without "NOT NULL", MySQL may have added
# an implicit default of '0' which we need to skip
if metadata_column.type._type_affinity is sqltypes.Integer and \
inspector_column.primary_key and \
not inspector_column.autoincrement and \
not rendered_metadata_default and \
rendered_inspector_default == "'0'":
return False
else:
return rendered_inspector_default != rendered_metadata_default
def correct_for_autogen_constraints(self, conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes):
# TODO: if SQLA 1.0, make use of "duplicates_index"
# metadata
removed = set()
for idx in list(conn_indexes):
if idx.unique:
continue
# MySQL puts implicit indexes on FK columns, even if
# composite and even if MyISAM, so can't check this too easily.
# the name of the index may be the column name or it may
# be the name of the FK constraint.
for col in idx.columns:
if idx.name == col.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
for fk in col.foreign_keys:
if fk.name == idx.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
if idx.name in removed:
break
# then remove indexes from the "metadata_indexes"
# that we've removed from reflected, otherwise they come out
# as adds (see #202)
for idx in list(metadata_indexes):
if idx.name in removed:
metadata_indexes.remove(idx)
# then dedupe unique indexes vs. constraints, since MySQL
# doesn't really have unique constraints as a separate construct.
# but look in the metadata and try to maintain constructs
# that already seem to be defined one way or the other
# on that side. See #276
metadata_uq_names = set([
cons.name for cons in metadata_unique_constraints
if cons.name is not None])
unnamed_metadata_uqs = set([
compare._uq_constraint_sig(cons).sig
for cons in metadata_unique_constraints
if cons.name is None
])
metadata_ix_names = set([
cons.name for cons in metadata_indexes if cons.unique])
conn_uq_names = dict(
(cons.name, cons) for cons in conn_unique_constraints
)
conn_ix_names = dict(
(cons.name, cons) for cons in conn_indexes if cons.unique
)
for overlap in set(conn_uq_names).intersection(conn_ix_names):
if overlap not in metadata_uq_names:
if compare._uq_constraint_sig(conn_uq_names[overlap]).sig \
not in unnamed_metadata_uqs:
conn_unique_constraints.discard(conn_uq_names[overlap])
elif overlap not in metadata_ix_names:
conn_indexes.discard(conn_ix_names[overlap])
def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
conn_fk_by_sig = dict(
(compare._fk_constraint_sig(fk).sig, fk) for fk in conn_fks
)
metadata_fk_by_sig = dict(
(compare._fk_constraint_sig(fk).sig, fk) for fk in metadata_fks
)
for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig):
mdfk = metadata_fk_by_sig[sig]
cnfk = conn_fk_by_sig[sig]
# MySQL considers RESTRICT to be the default and doesn't
# report on it. if the model has explicit RESTRICT and
# the conn FK has None, set it to RESTRICT
if mdfk.ondelete is not None and \
mdfk.ondelete.lower() == 'restrict' and \
cnfk.ondelete is None:
cnfk.ondelete = 'RESTRICT'
if mdfk.onupdate is not None and \
mdfk.onupdate.lower() == 'restrict' and \
cnfk.onupdate is None:
cnfk.onupdate = 'RESTRICT'
class MySQLAlterDefault(AlterColumn):
def __init__(self, name, column_name, default, schema=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.default = default
class MySQLChangeColumn(AlterColumn):
def __init__(self, name, column_name, schema=None,
newname=None,
type_=None,
nullable=None,
default=False,
autoincrement=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.nullable = nullable
self.newname = newname
self.default = default
self.autoincrement = autoincrement
if type_ is None:
raise util.CommandError(
"All MySQL CHANGE/MODIFY COLUMN operations "
"require the existing type."
)
self.type_ = sqltypes.to_instance(type_)
class MySQLModifyColumn(MySQLChangeColumn):
pass
@compiles(ColumnNullable, 'mysql')
@compiles(ColumnName, 'mysql')
@compiles(ColumnDefault, 'mysql')
@compiles(ColumnType, 'mysql')
def _mysql_doesnt_support_individual(element, compiler, **kw):
raise NotImplementedError(
"Individual alter column constructs not supported by MySQL"
)
@compiles(MySQLAlterDefault, "mysql")
def _mysql_alter_default(element, compiler, **kw):
return "%s ALTER COLUMN %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
)
@compiles(MySQLModifyColumn, "mysql")
def _mysql_modify_column(element, compiler, **kw):
return "%s MODIFY %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
@compiles(MySQLChangeColumn, "mysql")
def _mysql_change_column(element, compiler, **kw):
return "%s CHANGE %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
def _render_value(compiler, expr):
if isinstance(expr, string_types):
return "'%s'" % expr
else:
return compiler.sql_compiler.process(expr)
def _mysql_colspec(compiler, nullable, server_default, type_,
autoincrement):
spec = "%s %s" % (
compiler.dialect.type_compiler.process(type_),
"NULL" if nullable else "NOT NULL"
)
if autoincrement:
spec += " AUTO_INCREMENT"
if server_default is not False and server_default is not None:
spec += " DEFAULT %s" % _render_value(compiler, server_default)
return spec
@compiles(schema.DropConstraint, "mysql")
def _mysql_drop_constraint(element, compiler, **kw):
"""Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
if isinstance(constraint, (schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint)
):
return compiler.visit_drop_constraint(element, **kw)
elif isinstance(constraint, schema.CheckConstraint):
raise NotImplementedError(
"MySQL does not support CHECK constraints.")
else:
raise NotImplementedError(
"No generic 'DROP CONSTRAINT' in MySQL - "
"please specify constraint type")
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import utils
def build_signature_def(inputs=None, outputs=None, method_name=None):
"""Utility function to build a SignatureDef protocol buffer.
Args:
inputs: Inputs of the SignatureDef defined as a proto map of string to
tensor info.
outputs: Outputs of the SignatureDef defined as a proto map of string to
tensor info.
method_name: Method name of the SignatureDef as a string.
Returns:
A SignatureDef protocol buffer constructed based on the supplied arguments.
"""
signature_def = meta_graph_pb2.SignatureDef()
if inputs is not None:
for item in inputs:
signature_def.inputs[item].CopyFrom(inputs[item])
if outputs is not None:
for item in outputs:
signature_def.outputs[item].CopyFrom(outputs[item])
if method_name is not None:
signature_def.method_name = method_name
return signature_def
def regression_signature_def(examples, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
predictions: `Tensor`.
Returns:
A regression-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None for regression.')
if predictions is None:
raise ValueError('predictions cannot be None for regression.')
input_tensor_info = utils.build_tensor_info(examples)
signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info}
output_tensor_info = utils.build_tensor_info(predictions)
signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.REGRESS_METHOD_NAME)
return signature_def
def classification_signature_def(examples, classes, scores):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
classes: `Tensor`.
scores: `Tensor`.
Returns:
A classification-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None for classification.')
if classes is None and scores is None:
raise ValueError('classes and scores cannot both be None for '
'classification.')
input_tensor_info = utils.build_tensor_info(examples)
signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info}
signature_outputs = {}
if classes is not None:
classes_tensor_info = utils.build_tensor_info(classes)
signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = (
classes_tensor_info)
if scores is not None:
scores_tensor_info = utils.build_tensor_info(scores)
signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = (
scores_tensor_info)
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.CLASSIFY_METHOD_NAME)
return signature_def
def predict_signature_def(inputs, outputs):
"""Creates prediction signature from given inputs and outputs.
Args:
inputs: dict of string to `Tensor`.
outputs: dict of string to `Tensor`.
Returns:
A prediction-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
"""
if inputs is None or not inputs:
raise ValueError('inputs cannot be None or empty for prediction.')
if outputs is None:
raise ValueError('outputs cannot be None or empty for prediction.')
signature_inputs = {key: utils.build_tensor_info(tensor)
for key, tensor in inputs.items()}
signature_outputs = {key: utils.build_tensor_info(tensor)
for key, tensor in outputs.items()}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
def _get_shapes_from_tensor_info_dict(tensor_info_dict):
"""Returns a map of keys to TensorShape objects.
Args:
tensor_info_dict: map with TensorInfo proto as values.
Returns:
Map with corresponding TensorShape objects as values.
"""
return {
key: tensor_shape.TensorShape(tensor_info.tensor_shape)
for key, tensor_info in tensor_info_dict.items()
}
def _get_types_from_tensor_info_dict(tensor_info_dict):
"""Returns a map of keys to DType objects.
Args:
tensor_info_dict: map with TensorInfo proto as values.
Returns:
Map with corresponding DType objects as values.
"""
return {
key: dtypes.DType(tensor_info.dtype)
for key, tensor_info in tensor_info_dict.items()
}
def get_signature_def_input_shapes(signature):
"""Returns map of parameter names to their shapes.
Args:
signature: SignatureDef proto.
Returns:
Map from string to TensorShape objects.
"""
return _get_shapes_from_tensor_info_dict(signature.inputs)
def get_signature_def_input_types(signature):
"""Returns map of output names to their types.
Args:
signature: SignatureDef proto.
Returns:
Map from string to DType objects.
"""
return _get_types_from_tensor_info_dict(signature.inputs)
def get_signature_def_output_shapes(signature):
"""Returns map of output names to their shapes.
Args:
signature: SignatureDef proto.
Returns:
Map from string to TensorShape objects.
"""
return _get_shapes_from_tensor_info_dict(signature.outputs)
def get_signature_def_output_types(signature):
"""Returns map of output names to their types.
Args:
signature: SignatureDef proto.
Returns:
Map from string to DType objects.
"""
return _get_types_from_tensor_info_dict(signature.outputs)
| |
# -*- coding: utf-8 -*-
"""Utilities for text input preprocessing.
May benefit from a fast Cython rewrite.
"""
from __future__ import absolute_import
from __future__ import division
import string
import sys
import numpy as np
from six.moves import range
from six.moves import zip
from collections import OrderedDict
import warnings
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
def text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" "):
"""Converts a text to a sequence of words (or tokens).
# Arguments
text: Input text (string).
filters: Sequence of characters to filter out.
lower: Whether to convert the input to lowercase.
split: Sentence split marker (string).
# Returns
A list of words (or tokens).
"""
if lower:
text = text.lower()
text = text.translate(maketrans(filters, split * len(filters)))
seq = text.split(split)
return [i for i in seq if i]
def one_hot(text, n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' '):
seq = text_to_word_sequence(text,
filters=filters,
lower=lower,
split=split)
return [(abs(hash(w)) % (n - 1) + 1) for w in seq]
class Tokenizer(object):
"""Text tokenization utility class.
This class allows to vectorize a text corpus, by turning each
text into either a sequence of integers (each integer being the index
of a token in a dictionary) or into a vector where the coefficient
for each token could be binary, based on word count, based on tf-idf...
# Arguments
num_words: the maximum number of words to keep, based
on word frequency. Only the most common `num_words` words will
be kept.
filters: a string where each element is a character that will be
filtered from the texts. The default is all punctuation, plus
tabs and line breaks, minus the `'` character.
lower: boolean. Whether to convert the texts to lowercase.
split: character or string to use for token splitting.
char_level: if True, every character will be treated as a token.
By default, all punctuation is removed, turning the texts into
space-separated sequences of words
(words maybe include the `'` character). These sequences are then
split into lists of tokens. They will then be indexed or vectorized.
`0` is a reserved index that won't be assigned to any word.
"""
def __init__(self, num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ',
char_level=False,
**kwargs):
# Legacy support
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `Tokenizer` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
self.word_counts = OrderedDict()
self.word_docs = {}
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = 0
self.char_level = char_level
def fit_on_texts(self, texts):
"""Updates internal vocabulary based on a list of texts.
Required before using `texts_to_sequences` or `texts_to_matrix`.
# Arguments
texts: can be a list of strings,
or a generator of strings (for memory-efficiency)
"""
self.document_count = 0
for text in texts:
self.document_count += 1
seq = text if self.char_level else text_to_word_sequence(text,
self.filters,
self.lower,
self.split)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
if w in self.word_docs:
self.word_docs[w] += 1
else:
self.word_docs[w] = 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
sorted_voc = [wc[0] for wc in wcounts]
# note that index 0 is reserved, never assigned to an existing word
self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))
self.index_docs = {}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def fit_on_sequences(self, sequences):
"""Updates internal vocabulary based on a list of sequences.
Required before using `sequences_to_matrix`
(if `fit_on_texts` was never called).
# Arguments
sequences: A list of sequence.
A "sequence" is a list of integer word indices.
"""
self.document_count = len(sequences)
self.index_docs = {}
for seq in sequences:
seq = set(seq)
for i in seq:
if i not in self.index_docs:
self.index_docs[i] = 1
else:
self.index_docs[i] += 1
def texts_to_sequences(self, texts):
"""Transforms each text in texts in a sequence of integers.
Only top "num_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
texts: A list of texts (strings).
# Returns
A list of sequences.
"""
res = []
for vect in self.texts_to_sequences_generator(texts):
res.append(vect)
return res
def texts_to_sequences_generator(self, texts):
"""Transforms each text in texts in a sequence of integers.
Only top "num_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
texts: A list of texts (strings).
# Yields
Yields individual sequences.
"""
num_words = self.num_words
for text in texts:
seq = text if self.char_level else text_to_word_sequence(text,
self.filters,
self.lower,
self.split)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if num_words and i >= num_words:
continue
else:
vect.append(i)
yield vect
def texts_to_matrix(self, texts, mode='binary'):
"""Convert a list of texts to a Numpy matrix.
# Arguments
texts: list of strings.
mode: one of "binary", "count", "tfidf", "freq".
# Returns
A Numpy matrix.
"""
sequences = self.texts_to_sequences(texts)
return self.sequences_to_matrix(sequences, mode=mode)
def sequences_to_matrix(self, sequences, mode='binary'):
"""Converts a list of sequences into a Numpy matrix.
# Arguments
sequences: list of sequences
(a sequence is a list of integer word indices).
mode: one of "binary", "count", "tfidf", "freq"
# Returns
A Numpy matrix.
# Raises
ValueError: In case of invalid `mode` argument,
or if the Tokenizer requires to be fit to sample data.
"""
if not self.num_words:
if self.word_index:
num_words = len(self.word_index) + 1
else:
raise ValueError('Specify a dimension (num_words argument), '
'or fit on some text data first.')
else:
num_words = self.num_words
if mode == 'tfidf' and not self.document_count:
raise ValueError('Fit the Tokenizer on some data '
'before using tfidf mode.')
x = np.zeros((len(sequences), num_words))
for i, seq in enumerate(sequences):
if not seq:
continue
counts = {}
for j in seq:
if j >= num_words:
continue
if j not in counts:
counts[j] = 1.
else:
counts[j] += 1
for j, c in list(counts.items()):
if mode == 'count':
x[i][j] = c
elif mode == 'freq':
x[i][j] = c / len(seq)
elif mode == 'binary':
x[i][j] = 1
elif mode == 'tfidf':
# Use weighting scheme 2 in
# https://en.wikipedia.org/wiki/Tf%E2%80%93idf
tf = 1 + np.log(c)
idf = np.log(1 + self.document_count /
(1 + self.index_docs.get(j, 0)))
x[i][j] = tf * idf
else:
raise ValueError('Unknown vectorization mode:', mode)
return x
| |
# -*- coding: utf-8 -*-
import os,sys
import numpy as np
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import SIGNAL, SLOT, QObject
from slitlets import Slitlets, ra_read, dec_read
from slitmask import SlitMask
from rsmt_gui import Ui_MainWindow
from infotab import InfoTab
from catalogtab import CatalogTab
from slittab import SlitTab
from reftab import RefTab
from optimizetab import OptimizeTab
from finalizetab import FinalizeTab
# added these two import to avoid a seg fault
from pyraf import iraf
from iraf import pysalt
from ImageDisplay import ImageDisplay
class SlitMaskGui(QtGui.QMainWindow, InfoTab, CatalogTab, OptimizeTab, SlitTab, RefTab, FinalizeTab):
def __init__(self, parent=None, infile=None, inimage=None, center_ra=None, center_dec=None, position_angle=None):
QtGui.QWidget.__init__(self, parent)
#set up the main UI
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# OptimizeTab.__init__(self,self.ui)
#set up the slitmask
self.slitmask=SlitMask(center_ra=center_ra, center_dec=center_dec, position_angle=position_angle )
self.slitlets=self.slitmask.slitlets
#setup the image interaction
self.imagedisplay=ImageDisplay(target='pySlitMask:5909')
self.position_angle=position_angle
if inimage:
self.loadimage(inimage)
#set up some variables that will be needed later
self.xmlfile=None
self.fcfile=None
#read in the input data if available
self.infile=infile
if infile:
self.ui.radioButtonInfo_Catalogue.setChecked(True)
self.setmode2cat()
self.entercatalog(infile)
print self.slitmask.center_ra, self.slitmask.center_dec
if self.slitmask.center_ra is None and self.slitmask.center_dec is None:
self.slitmask.set_MaskPosition()
self.displayfootprint()
#if self.slitmask.center_ra and self.slitmask.center_dec:
#self.imagedisplay.rssregion(self.slitmask.center_ra, self.slitmask.center_dec)
self.ui.lineEditMain_CenRA.setText(str(self.slitmask.center_ra))
self.ui.lineEditMain_CenDEC.setText(str(self.slitmask.center_dec))
self.ui.lineEditMain_PA.setText(str(self.slitmask.position_angle))
self.ui.lineEditMain_Equinox.setText(str(self.slitmask.equinox))
self.ui.lineEditMain_TargetName.setText(self.slitmask.target_name)
self.ui.lineEditMain_MaskName.setText(self.slitmask.mask_name)
self.ui.lineEditInfo_Creator.setText(self.slitmask.creator)
self.ui.lineEditInfo_Proposer.setText(self.slitmask.proposer)
self.ui.lineEditInfo_ProposalCode.setText(self.slitmask.proposal_code)
else:
self.ui.radioButtonInfo_Catalogue.setChecked(False)
self.ui.radioButtonInfo_Manual.setChecked(True)
self.setmode2manual()
self.ui.toolButtonCat_Load.setEnabled(True)
#self.displayfootprint()
# self.opttab = OptimizeTab()
# setup default values for the optimizer
self.opt_yspacing = 1.
self.opt_iter = 10
self.ui.lineEditOpt_Yspacing.setText(str(self.opt_yspacing))
self.ui.lineEditOpt_Niter.setText(str(self.opt_iter))
# self.slitmask.outFoV()
# print self.slitlets.data['fov_flag']
# self.updatetabs()
#Listen to different signals
#menu items
QtCore.QObject.connect(self.ui.actionLoad_Catalogue, QtCore.SIGNAL("triggered()"), self.loadcatalog)
QtCore.QObject.connect(self.ui.actionLoad_Image, QtCore.SIGNAL("triggered()"), self.loadimage)
#main tabs
QtCore.QObject.connect(self.ui.lineEditMain_CenRA, QtCore.SIGNAL("editingFinished()"), self.loadCenRA)
QtCore.QObject.connect(self.ui.lineEditMain_CenDEC, QtCore.SIGNAL("editingFinished()"), self.loadCenDEC)
QtCore.QObject.connect(self.ui.lineEditMain_PA, QtCore.SIGNAL("editingFinished()"), self.loadpositionangle)
QtCore.QObject.connect(self.ui.lineEditMain_Equinox, QtCore.SIGNAL("editingFinished()"), self.loadequinox)
QtCore.QObject.connect(self.ui.lineEditMain_TargetName, QtCore.SIGNAL("editingFinished()"), self.loadtargetname)
QtCore.QObject.connect(self.ui.lineEditMain_MaskName, QtCore.SIGNAL("editingFinished()"), self.loadmaskname)
#info tabs
QtCore.QObject.connect(self.ui.lineEditInfo_ProposalCode, QtCore.SIGNAL("editingFinished()"), self.loadproposalcode)
QtCore.QObject.connect(self.ui.lineEditInfo_Proposer, QtCore.SIGNAL("editingFinished()"), self.loadproposer)
QtCore.QObject.connect(self.ui.lineEditInfo_Creator, QtCore.SIGNAL("editingFinished()"), self.loadcreator)
# QtCore.QObject.connect(self.slitmask, SIGNAL('xmlloaded'), self.setcreator)
QtCore.QObject.connect(self.ui.radioButtonInfo_Catalogue, QtCore.SIGNAL("clicked()"), self.setmode2cat)
QtCore.QObject.connect(self.ui.radioButtonInfo_Manual, QtCore.SIGNAL("clicked()"), self.setmode2manual)
QtCore.QObject.connect(self.ui.checkBoxInfo_CentroidOn, QtCore.SIGNAL("clicked()"), self.setmodecentroiding)
#catalog tabs
QtCore.QObject.connect(self.ui.toolButtonCat_Load, QtCore.SIGNAL("clicked(bool)"), self.loadcatalog)
QtCore.QObject.connect(self.ui.pushButtonCat_AddSlits, QtCore.SIGNAL("clicked(bool)"), self.addslitfromcatalog)
QtCore.QObject.connect(self.ui.pushButtonCat_Clear, QtCore.SIGNAL("clicked()"), self.clearContents)
#slit tab
QtCore.QObject.connect(self.ui.pushButtonSlit_ClearSlits, QtCore.SIGNAL("clicked()"), self.clearslittable)
QtCore.QObject.connect(self.ui.pushButtonSlit_AddSlitImage, QtCore.SIGNAL("clicked()"), self.addslitletsfromimage)
QtCore.QObject.connect(self.ui.pushButtonSlit_AddSlitfromCat, QtCore.SIGNAL("clicked()"), self.addslitletsfromcatalogue)
QtCore.QObject.connect(self.ui.pushButtonSlit_AddSlit, QtCore.SIGNAL("clicked()"), self.addslitmanually)
QtCore.QObject.connect(self.ui.pushButtonSlit_DeleteSlit, QtCore.SIGNAL("clicked()"), self.deleteslitmanually)
QtCore.QObject.connect(self.ui.pushButtonSlit_DeleteSlitImage, QtCore.SIGNAL("clicked()"), self.deleteslitfromimage)
QtCore.QObject.connect(self.ui.tableWidgetSlits, QtCore.SIGNAL("itemSelectionChanged()"), self.setposition)
QtCore.QObject.connect(self.ui.tableWidgetSlits, QtCore.SIGNAL("cellChanged(int, int)"), self.slitchanged)
#optimize tab
QtCore.QObject.connect(self.ui.pushButtonOpt_Optimize, QtCore.SIGNAL("clicked()"), self.optimize)
QtCore.QObject.connect(self.ui.lineEditOpt_Yspacing, QtCore.SIGNAL("editingFinished()"), self.setoptimizer_yspacing)
QtCore.QObject.connect(self.ui.lineEditOpt_Niter, QtCore.SIGNAL("editingFinished()"), self.setoptimizer_iter)
QtCore.QObject.connect(self.ui.checkBoxOpt_IncRefstars, QtCore.SIGNAL("stateChanged(int)"), self.includerefstars)
QtCore.QObject.connect(self.ui.lineEditOpt_NumRefstars, QtCore.SIGNAL("editingFinished()"), self.setnumrefstars)
#ref stars
QtCore.QObject.connect(self.ui.pushButtonRef_ClearRefstars, QtCore.SIGNAL("clicked()"), self.clearrefstartable)
QtCore.QObject.connect(self.ui.pushButtonRef_AddRefstarImage, QtCore.SIGNAL("clicked()"), self.addslitletsfromimage)
QtCore.QObject.connect(self.ui.pushButtonRef_AddRefstarsfromCat, QtCore.SIGNAL("clicked()"), self.addrefstarsfromcatalogue)
QtCore.QObject.connect(self.ui.pushButtonRef_AddRefstar, QtCore.SIGNAL("clicked()"), self.addrefstarmanually)
QtCore.QObject.connect(self.ui.pushButtonRef_DeleteRefstar, QtCore.SIGNAL("clicked()"), self.deleterefstarmanually)
QtCore.QObject.connect(self.ui.pushButtonRef_DeleteRefstar_2, QtCore.SIGNAL("clicked()"), self.deleteslitfromimage)
QtCore.QObject.connect(self.ui.tableWidgetRefstars, QtCore.SIGNAL(" itemSelectionChanged()"), self.setrefposition)
QtCore.QObject.connect(self.ui.tableWidgetRefstars, QtCore.SIGNAL("cellChanged(int, int)"), self.refchanged)
# finalize tab
QtCore.QObject.connect(self.ui.pushButtonFin_Validate, QtCore.SIGNAL("clicked(bool)"),self.validator)
QtCore.QObject.connect(self.ui.pushButtonFin_WriteXML, QtCore.SIGNAL("clicked(bool)"), self.writexml)
QtCore.QObject.connect(self.ui.toolButtonFin_WriteRSMT, QtCore.SIGNAL("clicked(bool)"), self.writersmt)
QtCore.QObject.connect(self.ui.pushButtonFin_CreateFChart_Current, QtCore.SIGNAL("clicked(bool)"), self.writeFC_Current)
QtCore.QObject.connect(self.ui.pushButtonFin_CreateFChart_DSS, QtCore.SIGNAL("clicked(bool)"), self.writeFC_DSS)
self.ui.tabWidget.setCurrentIndex(0)
def clearContents(self):
self.slitlets.data = None
self.ui.tableWidgetCat.clearContents()
self.ui.tableWidgetCat.setRowCount(0)
#TODO: Set the number of rows to the current data length
#print 'nope not doing it'
# loads mask coordinates
def loadCenRA(self):
self.slitmask.validated = False
self.slitmask.add_center_ra(ra_read(self.ui.lineEditMain_CenRA.text()))
if self.slitmask.center_ra == None:
palette = self.setPalette('error')
self.ui.lineEditMain_CenRA.setPalette(palette)
else:
palette = self.setPalette('normal')
self.ui.lineEditMain_CenRA.setPalette(palette)
self.ui.lineEditMain_CenRA.setText(str(self.slitmask.center_ra))
self.slitmask.outFoV()
self.updatetabs()
def loadCenDEC(self):
self.slitmask.validated = False
self.slitmask.add_center_dec(dec_read(self.ui.lineEditMain_CenDEC.text()))
if self.slitmask.center_dec == None:
palette = self.setPalette('error')
self.ui.lineEditMain_CenDEC.setPalette(palette)
else:
palette = self.setPalette('normal')
self.ui.lineEditMain_CenDEC.setPalette(palette)
self.ui.lineEditMain_CenDEC.setText(str(self.slitmask.center_dec))
self.slitmask.outFoV()
self.updatetabs()
def loadpositionangle(self):
#print self.ui.lineEditMain_PA.text()
self.slitmask.validated = False
self.slitmask.add_position_angle(dec_read(self.ui.lineEditMain_PA.text()))
if self.slitmask.position_angle == None:
palette = self.setPalette('error')
self.ui.lineEditMain_PA.setPalette(palette)
else:
palette = self.setPalette('normal')
self.ui.lineEditMain_PA.setPalette(palette)
self.ui.lineEditMain_PA.setText(str(self.slitmask.position_angle))
self.slitmask.outFoV()
self.imagedisplay.rotate(self.slitmask.position_angle)
self.updatetabs()
def loadequinox(self):
self.slitmask.validated = False
self.slitmask.add_equinox(dec_read(self.ui.lineEditMain_Equinox.text()))
if self.slitmask.equinox == None:
palette = self.setPalette('error')
self.ui.lineEditMain_Equinox.setPalette(palette)
else:
palette = self.setPalette('normal')
self.ui.lineEditMain_Equinox.setPalette(palette)
self.ui.lineEditMain_Equinox.setText(str(self.slitmask.equinox))
self.slitmask.outFoV()
# load info from the main window
def loadtargetname(self):
self.slitmask.target_name=str(self.ui.lineEditMain_TargetName.text()).strip()
if self.slitmask.validated:
if len(self.slitmask.target_name)==0: self.slitmask.validated=False
def loadmaskname(self):
self.slitmask.mask_name=str(self.ui.lineEditMain_MaskName.text()).strip()
if self.slitmask.validated:
if len(self.slitmask.mask_name)==0: self.slitmask.validated=False
def loadValue(self):
self.updatetabs()
def updatetabs(self):
"""Update all of the information after changes to the slitlet class"""
print 'Updating tabs'
self.updatecatalogtable()
self.updateslittable()
self.updaterefstartable()
self.imagedisplay.deleteregions()
self.displayslits()
# self.displayfootprint()
def displayslits(self):
"""Add the slits to the image """
ids = np.where(self.slitlets.data['inmask_flag']==1)[0]
fout = open('tmp.reg', 'w')
fout.write('# Region file format: DS9 version 4.1\n# Filename: sgpR.fits\n')
fout.write('global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n')
fout.write('fk5\n')
for i in ids:
fout.write(self.slitlets.asregion(i,self.slitmask.position_angle)+'\n')
# show spectral footprint:
#fout.write(self.slitlets.asregionspec(i,self.slitmask.position_angle)+'\n')
fout.close()
self.imagedisplay.regionfromfile('tmp.reg')
def displayall(self):
"""Add the slits to the image """
ids=np.where(self.slitlets.data['inmask_flag']==1)[0]
fout=open('oth.reg', 'w')
fout.write('# Region file format: DS9 version 4.1\n# Filename: sgpR.fits\n')
fout.write('global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n')
fout.write('fk5\n')
for i in ids:
#fout.write(self.slitlets.asregion(i,self.slitmask.position_angle)+'\n')
# show spectral footprint:
fout.write(self.slitlets.asregionspec(i,self.slitmask.position_angle)+'\n')
# **** also stars, need to be careful about shape
fout.close()
self.imagedisplay.regionfromfile('tmp.reg')
def displayfootprint(self):
"""Add the RSS footprint to the image"""
# fout=open('tmpfp.reg', 'w')
# fout.write('# Region file format: DS9 version 4.1\n# Filename: sgpR.fits\n')
#
## fout.write('global color=yellow dashlist=8 3 width=2 font="helvetica 10 normal roman" select=0 highlite=0 dash=1 fixed=0 edit=0 move=0 delete=0 include=1 source=1\n')
# fout.write('fk5\n')
# fout.write('circle(%f,%f,4\') # color=yellow dashlist=8 3 width=2 select=0 highlite=0 dash=1 fixed=0 edit=0 move=1 delete=0 background include=1 source=0\n' % (self.slitmask.center_ra, self.slitmask.center_dec))
# fout.close()
# self.imagedisplay.regionfromfile('tmpfp.reg')
self.imagedisplay.rssregion(self.slitmask.center_ra, self.slitmask.center_dec)
def loadimage(self, inimage=None):
if not inimage:
#launch a file IO dialog
ldir = os.getcwd()
inimage = QtGui.QFileDialog.getOpenFileName(caption="Open Catalog", directory=ldir)
self.inimage=str(inimage)
self.imagedisplay.display(inimage, pa=self.position_angle)
def deleteslitfromimage(self):
#download the slits in the image
newslits=self.imagedisplay.getregions()
#loop through the list and see which one is missing
try:
index=np.where(self.slitlets.data['inmask_flag']==1)[0]
except:
return
#check to see if it is in the mask
for i in index:
sra=self.slitlets.data['targ_ra'][i]
sdec=self.slitlets.data['targ_dec'][i]
found=False
for k in newslits.keys():
ra = float(newslits[k][0][0])
dec = float(newslits[k][0][1])
if abs(sra-ra) < 0.0003 and abs(sdec-dec) < 0.0003:
found=True
if not found:
self.slitlets.data['inmask_flag'][i]=0
#update the tabs
self.updatetabs()
def addslitletsfromimage(self):
"""Download the slits from the image and add them to the slitlet or catalog
If catalog is selected, it will search the catalog for a corresponding object and center the slit on that object
**TODO**
If manual centroided is selected, it will centroid around that value in the image and use that position
If manual uncentroided is selected, it will just use the slit position
"""
#download the slits in the image
newslits=self.imagedisplay.getregions()
#loop through the objects--if they are already in the catalog check to see if they
#need updating. If not, then add them to the catalog
#print "Sorting through regions"
for i in newslits:
if newslits[i][1]:
#if it is tagged we assume it is already in the slitmask
#print newslits[i]
pass
else:
ra = float(newslits[i][0][0])
dec = float(newslits[i][0][1])
#print i,ra,dec
#width=str(newslits[i][0])
#height=str(newslits[i][0])
#tilt=str(newslits[i][0])
#TODO: This searches that catalog and adds the target that matches the slit drawn
sid=self.slitlets.findtarget(ra,dec)
self.slitlets.addtomask(sid)
self.updatetabs()
def setPalette(self,mode):
palette = QtGui.QPalette()
if mode == 'error':
brush = QtGui.QBrush(QtGui.QColor(255, 148, 148))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
return palette
if mode == 'normal':
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
return palette
if __name__ == "__main__":
infile = None
inimage= None
if len(sys.argv)>1:
infile=sys.argv[1]
if len(sys.argv)==3:
inimage=sys.argv[2]
app = QtGui.QApplication([])
myapp = SlitMaskGui(infile=infile, inimage=inimage)
myapp.show()
sys.exit(app.exec_())
| |
import mock
import unittest
import urllib
import urlparse
from dropbox.session import DropboxSession, DropboxSessionOAuth2
class ConfigurableSessionTest(unittest.TestCase):
def _create_generic_session(self, rest_client, consumer_key='a',
consumer_secret='b', access_type='app_folder'):
return self.Session(consumer_key, consumer_secret, access_type, rest_client=rest_client)
class OAuth1Mixin(ConfigurableSessionTest):
Session = DropboxSession
class OAuth2Mixin(ConfigurableSessionTest):
Session = DropboxSessionOAuth2
class BaseTestClientUsage(object):
def test_API_CONTENT_HOST(self):
a = self._create_generic_session(None)
self.assertEqual(a.API_CONTENT_HOST, 'api-content.dropbox.com')
def test_API_HOST(self):
a = self._create_generic_session(None)
self.assertEqual(a.API_HOST, 'api.dropbox.com')
def test_build_url_simple(self):
a = self._create_generic_session(None)
base = a.build_url('api.dropbox.com', '/dropbox/metadata')
self.assertEqual(base, 'https://api.dropbox.com/1/dropbox/metadata')
def test_build_url_params(self):
a = self._create_generic_session(None)
params = {'foo': 'bar', 'baz': '1 2'}
base = a.build_url('api.dropbox.com', '/dropbox/metadata', params)
self.assertEqual(
base, 'https://api.dropbox.com/1/dropbox/metadata?' + urllib.urlencode(params))
def test_root_app_folder(self):
a = self._create_generic_session(None)
self.assertEqual(a.root, 'sandbox')
def test_root_dropbox(self):
a = self._create_generic_session(None, access_type='dropbox')
self.assertEqual(a.root, 'dropbox')
class TestClientUsageOAuth1(OAuth1Mixin, BaseTestClientUsage):
Session = DropboxSession
def test_set_token(self):
access_token = ('c', 'd')
mock_rest_client = mock.Mock()
a = self._create_generic_session(mock_rest_client)
a.set_token(*access_token)
self.assertEqual(a.token.key, access_token[0])
self.assertEqual(a.token.secret, access_token[1])
def test_set_request_token(self):
request_token = ('c', 'd')
mock_rest_client = mock.Mock()
a = self._create_generic_session(mock_rest_client)
a.set_request_token(*request_token)
self.assertEqual(a.request_token.key, request_token[0])
self.assertEqual(a.request_token.secret, request_token[1])
class TestClientUsageOAuth2(OAuth2Mixin, BaseTestClientUsage):
Session = DropboxSessionOAuth2
def test_build_authorize_url(self):
sess = self._create_generic_session(mock.Mock())
url = sess.build_authorize_url('hello', 'melon')
q = urlparse.parse_qs(urlparse.urlsplit(url).query)
expected = {'response_type': ['code'],
'client_id': ['a'],
'redirect_uri': ['hello'],
'state': ['melon']}
self.assertEqual(q, expected)
def test_obtain_request_token(self):
m = mock.Mock()
sess = self._create_generic_session(m)
m.POST.return_value = {'access_token': 'a token'}
tok = sess.obtain_access_token('a code', 'redir')
self.assertEqual(tok, 'a token')
self.assertEqual(sess.token, 'a token')
self.assertTrue(m.POST.called)
params = m.POST.call_args[1]["params"]
expected = {'grant_type': 'authorization_code',
'code': 'a code',
'client_id': 'a',
'client_secret': 'b',
'redirect_uri': 'redir'}
self.assertEqual(params, expected)
def test_linked(self):
sess = self._create_generic_session(mock.Mock())
self.assertFalse(sess.is_linked())
sess.set_token('key')
self.assertTrue(sess.is_linked())
class TestOAuth1Session(OAuth1Mixin):
def test_obtain_access_token_no_request_token(self):
mock_rest_client = mock.Mock()
sess = self._create_generic_session(mock_rest_client)
self.assertRaises(Exception, sess.obtain_access_token)
def test_obtain_request_token(self):
# setup mocks
request_token = ('a', 'b')
new_request_token_res = dict(oauth_token=request_token[0],
oauth_token_secret=request_token[1])
mock_rest_client = mock.Mock()
mock_response = mock.Mock()
mock_rest_client.POST.return_value = mock_response
mock_response.read.return_value = urllib.urlencode(
new_request_token_res)
# make call
sess = self._create_generic_session(mock_rest_client)
rt = sess.obtain_request_token()
# assert correctness
url = 'https://api.dropbox.com/1/oauth/request_token'
mock_rest_client.POST.assert_called_with(
url, headers={}, params=mock.ANY,
raw_response=True)
# TODO: maybe we can be less strict about the exact oauth headers
_, kwargs = mock_rest_client.POST.call_args
params = kwargs['params']
self.assertEqual(params['oauth_consumer_key'], sess.consumer_creds.key)
self.assertEqual(params['oauth_version'], '1.0')
self.assertEqual(params['oauth_signature_method'], 'PLAINTEXT')
self.assertEqual(
params['oauth_signature'], '%s&' % sess.consumer_creds.secret)
self.assertEqual(rt.key, request_token[0])
self.assertEqual(rt.secret, request_token[1])
self.assertEqual(sess.request_token.key, request_token[0])
self.assertEqual(sess.request_token.secret, request_token[1])
def _obtain_access_token(self, call):
class request_token:
key = 'a'
secret = 'b'
access_token = ('a', 'b')
new_access_token_res = dict(oauth_token=access_token[0],
oauth_token_secret=access_token[1])
mock_rest_client = mock.Mock()
mock_response = mock.Mock()
mock_rest_client.POST.return_value = mock_response
mock_response.read.return_value = urllib.urlencode(
new_access_token_res)
# make call
sess = self._create_generic_session(mock_rest_client)
at = call(sess, request_token)
# assert correctness
url = 'https://api.dropbox.com/1/oauth/access_token'
mock_rest_client.POST.assert_called_with(
url, headers={}, params=mock.ANY,
raw_response=True)
# TODO: maybe we can be less strict about the exact oauth headers
_, kwargs = mock_rest_client.POST.call_args
params = kwargs['params']
self.assertEqual(params['oauth_consumer_key'], sess.consumer_creds.key)
self.assertEqual(params['oauth_version'], '1.0')
self.assertEqual(params['oauth_signature_method'], 'PLAINTEXT')
self.assertEqual(params['oauth_signature'],
'%s&%s' % (sess.consumer_creds.secret, request_token.secret))
self.assertEqual(params['oauth_token'], request_token.key)
self.assertEqual(frozenset(params),
frozenset(['oauth_consumer_key',
'oauth_timestamp',
'oauth_nonce',
'oauth_version',
'oauth_signature_method',
'oauth_signature',
'oauth_token']))
self.assertEqual(at.key, access_token[0])
self.assertEqual(at.secret, access_token[1])
self.assertEqual(sess.token.key, access_token[0])
self.assertEqual(sess.token.secret, access_token[1])
def test_obtain_access_token_passed_in_request_token(self):
def call(sess, request_token):
return sess.obtain_access_token(request_token=request_token)
self._obtain_access_token(call)
def test_obtain_access_token_set_request_token(self):
def call(sess, request_token):
sess.set_request_token(request_token.key, request_token.secret)
return sess.obtain_access_token()
self._obtain_access_token(call)
def test_build_authorize_url(self):
mock_rest_client = mock.Mock()
sess = self._create_generic_session(mock_rest_client)
class request_token:
key = 'a'
secret = 'b'
callback = 'http://www.dropbox.com/callback'
ret = sess.build_authorize_url(request_token, callback)
# TODO: a better test would be to parse out the encoded params
# and compare, or compare url objects
self.assertEqual('https://www.dropbox.com/1/oauth/authorize?' +
urllib.urlencode({'oauth_token': request_token.key,
'oauth_callback': callback}),
ret)
def test_is_linked(self):
sess = self._create_generic_session(None)
self.assertFalse(sess.is_linked())
sess.set_token('a', 'b')
self.assertTrue(sess.is_linked())
def _parse_token_fail(self, return_value):
mock_rest_client = mock.Mock()
mock_response = mock.Mock()
mock_rest_client.POST.return_value = mock_response
mock_response.read.return_value = return_value
sess = self._create_generic_session(mock_rest_client)
self.assertRaises(Exception, sess.obtain_request_token)
def test_parse_token_fail_empty(self):
self._parse_token_fail('')
def test_parse_token_fail_empty(self):
self._parse_token_fail(urllib.urlencode({}))
def test_parse_token_fail_no_oauth_token(self):
self._parse_token_fail(urllib.urlencode({'something': '1'}))
def test_parse_token_fail_no_oauth_token_secret(self):
self._parse_token_fail(urllib.urlencode({'oauth_token': '1'}))
| |
'''
Unit tests for yedit
'''
import os
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place yedit in our path
yedit_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, yedit_path)
from yedit import Yedit, YeditException # noqa: E402
# pylint: disable=too-many-public-methods
# Silly pylint, moar tests!
class YeditTest(unittest.TestCase):
'''
Test class for yedit
'''
data = {'a': 'a',
'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
} # noqa: E124
filename = 'yedit_test.yml'
def setUp(self):
''' setup method will create a file and set to known configuration '''
yed = Yedit(YeditTest.filename)
yed.yaml_dict = YeditTest.data
yed.write()
def test_load(self):
''' Testing a get '''
yed = Yedit('yedit_test.yml')
self.assertEqual(yed.yaml_dict, self.data)
def test_write(self):
''' Testing a simple write '''
yed = Yedit('yedit_test.yml')
yed.put('key1', 1)
yed.write()
self.assertTrue('key1' in yed.yaml_dict)
self.assertEqual(yed.yaml_dict['key1'], 1)
def test_write_x_y_z(self):
'''Testing a write of multilayer key'''
yed = Yedit('yedit_test.yml')
yed.put('x.y.z', 'modified')
yed.write()
yed.load()
self.assertEqual(yed.get('x.y.z'), 'modified')
def test_delete_a(self):
'''Testing a simple delete '''
yed = Yedit('yedit_test.yml')
yed.delete('a')
yed.write()
yed.load()
self.assertTrue('a' not in yed.yaml_dict)
def test_delete_b_c(self):
'''Testing delete of layered key '''
yed = Yedit('yedit_test.yml', separator=':')
yed.delete('b:c')
yed.write()
yed.load()
self.assertTrue('b' in yed.yaml_dict)
self.assertFalse('c' in yed.yaml_dict['b'])
def test_create(self):
'''Testing a create '''
os.unlink(YeditTest.filename)
yed = Yedit('yedit_test.yml')
yed.create('foo', 'bar')
yed.write()
yed.load()
self.assertTrue('foo' in yed.yaml_dict)
self.assertTrue(yed.yaml_dict['foo'] == 'bar')
def test_create_content(self):
'''Testing a create with content '''
content = {"foo": "bar"}
yed = Yedit("yedit_test.yml", content)
yed.write()
yed.load()
self.assertTrue('foo' in yed.yaml_dict)
self.assertTrue(yed.yaml_dict['foo'], 'bar')
def test_array_insert(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[0]') == 'inject')
def test_array_insert_first_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[1]') == 'f')
def test_array_insert_second_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[2]') == 'g')
def test_dict_array_dict_access(self):
'''Testing a create with content'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
def test_dict_array_dict_replace(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.put('b:c:d[0]:[0]:x:y', 'testing')
self.assertTrue('b' in yed.yaml_dict)
self.assertTrue('c' in yed.yaml_dict['b'])
self.assertTrue('d' in yed.yaml_dict['b']['c'])
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertTrue('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'] == 'testing') # noqa: E501
def test_dict_array_dict_remove(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.delete('b:c:d[0]:[0]:x:y')
self.assertTrue('b' in yed.yaml_dict)
self.assertTrue('c' in yed.yaml_dict['b'])
self.assertTrue('d' in yed.yaml_dict['b']['c'])
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertFalse('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
def test_key_exists_in_dict(self):
'''Testing exist in dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c', 'd'))
def test_key_exists_in_list(self):
'''Testing exist in list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
def test_update_to_list_with_index(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], index=2)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list_with_curr_value(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], curr_value=3)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list(self):
'''Testing update to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_append_twice_to_list(self):
'''Testing append to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.append('x:y:z', [5, 6])
yed.append('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
self.assertFalse(yed.exists('x:y:z', 4))
def test_add_item_to_dict(self):
'''Testing update to dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', {'a': 1, 'b': 2})
yed.update('x:y:z', {'c': 3, 'd': 4})
self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
self.assertTrue(yed.exists('x:y:z', {'c': 3}))
def test_first_level_dict_with_none_value(self):
'''test dict value with none value'''
yed = Yedit(content={'a': None}, separator=":")
yed.put('a:b:c', 'test')
self.assertTrue(yed.get('a:b:c') == 'test')
self.assertTrue(yed.get('a:b'), {'c': 'test'})
def test_adding_yaml_variable(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z:y', '{{test}}')
self.assertTrue(yed.get('z:y') == '{{test}}')
def test_keys_with_underscore(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z_:y_y', {'test': '{{test}}'})
self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
def test_first_level_array_update(self):
'''test update on top level array'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.update('', {'c': 4})
self.assertTrue({'c': 4} in yed.get(''))
def test_first_level_array_delete(self):
'''test remove top level key'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.delete('')
self.assertTrue({'b': 3} not in yed.get(''))
def test_first_level_array_get(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.get('')
self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.pop('', {'b': 2})
self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item_2(self):
'''test dict value with none value'''
z = list(range(10))
yed = Yedit(content=z, separator=':')
yed.pop('', 5)
z.pop(5)
self.assertTrue(z == yed.yaml_dict)
def test_pop_dict_key(self):
'''test dict value with none value'''
yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
yed.pop('a#b', 'c')
self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
def test_accessing_path_with_unexpected_objects(self):
'''test providing source path objects that differ from current object state'''
yed = Yedit(content={'a': {'b': {'c': ['d', 'e']}}})
with self.assertRaises(YeditException):
yed.put('a.b.c.d', 'x')
def test_creating_new_objects_with_embedded_list(self):
'''test creating new objects with an embedded list in the creation path'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff[0].here', 'value')
def test_creating_new_objects_with_trailing_list(self):
'''test creating new object(s) where the final piece is a list'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff.here[0]', 'item')
def test_empty_key_with_int_value(self):
'''test editing top level with not list or dict'''
yed = Yedit(content={'a': {'b': 12}})
result = yed.put('', 'b')
self.assertFalse(result[0])
def test_setting_separator(self):
'''test editing top level with not list or dict'''
yed = Yedit(content={'a': {'b': 12}})
yed.separator = ':'
self.assertEqual(yed.separator, ':')
def test_remove_all(self):
'''test removing all data'''
data = Yedit.remove_entry({'a': {'b': 12}}, '')
self.assertTrue(data)
def test_remove_list_entry(self):
'''test removing list entry'''
data = {'a': {'b': [{'c': 3}]}}
results = Yedit.remove_entry(data, 'a.b[0]')
self.assertTrue(results)
self.assertTrue(data, {'a': {'b': []}})
def test_parse_value_string_true(self):
'''test parse_value'''
results = Yedit.parse_value('true', 'str')
self.assertEqual(results, 'true')
def test_parse_value_bool_true(self):
'''test parse_value'''
results = Yedit.parse_value('true', 'bool')
self.assertTrue(results)
def test_parse_value_bool_exception(self):
'''test parse_value'''
with self.assertRaises(YeditException):
Yedit.parse_value('TTT', 'bool')
@mock.patch('yedit.Yedit.write')
def test_run_ansible_basic(self, mock_write):
'''test parse_value'''
params = {
'src': None,
'backup': False,
'separator': '.',
'state': 'present',
'edits': [],
'value': None,
'key': None,
'content': {'a': {'b': {'c': 1}}},
'content_type': '',
}
results = Yedit.run_ansible(params)
mock_write.side_effect = [
(True, params['content']),
]
self.assertFalse(results['changed'])
@mock.patch('yedit.Yedit.write')
def test_run_ansible_and_write(self, mock_write):
'''test parse_value'''
params = {
'src': '/tmp/test',
'backup': False,
'separator': '.',
'state': 'present',
'edits': [],
'value': None,
'key': None,
'content': {'a': {'b': {'c': 1}}},
'content_type': '',
}
results = Yedit.run_ansible(params)
mock_write.side_effect = [
(True, params['content']),
]
self.assertTrue(results['changed'])
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
| |
from common_fixtures import * # NOQA
import traceback
import logging
DEFAULT_TIMEOUT = 900
# Digital Ocean configurations
access_key = os.environ.get('DIGITALOCEAN_KEY')
image_name = "ubuntu-14-10-x64"
region = "sfo1"
size = "1gb"
# Digital Ocean default configurations
default_size = "512mb"
default_image_name = "ubuntu-14-04-x64"
default_region = "nyc3"
# Digital Ocean Error Messages
error_msg_auth_failure = "401"
error_msg_invalid_region = "digitalocean requires a valid region"
if_machine_digocean = pytest.mark.skipif(
not os.environ.get('DIGITALOCEAN_KEY'),
reason='DIGITALOCEAN_KEY is not set')
# Get logger
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session', autouse=True)
def register_host(admin_client):
test_url = cattle_url()
start = test_url.index("//") + 2
api_host = test_url[start:]
admin_client.create_setting(name="api.host", value=api_host)
@if_machine_digocean
def test_machine_labels(client):
name = random_str()
labels = {"abc": "def",
"foo": "bar",
"spam": "eggs"}
create_args = {"name": name,
"digitaloceanConfig": {"accessToken": access_key,
"image": image_name,
"region": region,
"size": size
},
"labels": labels
}
expected_values = {"image": image_name,
"region": region,
"size": size,
}
try:
digital_ocean_machine_life_cycle(client,
create_args,
expected_values,
labels)
finally:
delete_host_in_digital_ocean(name)
@if_machine_digocean
def test_digital_ocean_machine_all_params(client):
name = random_str()
create_args = {"name": name,
"digitaloceanConfig": {"accessToken": access_key,
"image": image_name,
"region": region,
"size": size
}
}
expected_values = {"image": image_name,
"region": region,
"size": size
}
try:
digital_ocean_machine_life_cycle(client, create_args, expected_values)
finally:
delete_host_in_digital_ocean(name)
@if_machine_digocean
def test_digital_ocean_machine_accesstoken(client):
name = random_str()
create_args = {"name": random_str(),
"digitaloceanConfig": {"accessToken": access_key,
}
}
expected_values = {"image": default_image_name,
"region": default_region,
"size": default_size
}
try:
digital_ocean_machine_life_cycle(client, create_args, expected_values)
finally:
delete_host_in_digital_ocean(name)
@if_machine_digocean
def test_digital_ocean_machine_parallel(client):
create_args = {"name": None,
"digitaloceanConfig": {"accessToken": access_key
}
}
machines = []
try:
# Create 2 Digital Ocean Machines in parallel
for n in range(0, 2):
name = random_str() + "-parallel-" + str(n)
create_args["name"] = name
machine = client.create_machine(**create_args)
machines.append(machine)
# Check if both the machine and host get to "active" state
for machine in machines:
machine = client.wait_success(machine, timeout=DEFAULT_TIMEOUT)
assert machine.state == 'active'
machine = wait_for_host(client, machine)
host = machine.hosts()[0]
assert host.state == 'active'
for machine in machines:
machine = client.wait_success(machine.remove())
assert machine.state == 'removed'
host = machine.hosts()[0]
assert host.state == 'removed'
wait_for_host_destroy_in_digital_ocean(
host.ipAddresses()[0].address)
finally:
for machine in machines:
delete_host_in_digital_ocean(machine.name)
@if_machine_digocean
def test_digital_ocean_machine_invalid_access_token(client):
name = random_str()
create_args = {"name": name,
"digitaloceanConfig": {"accessToken": "1234abcdefg",
"image": image_name,
"region": region,
"size": size
}
}
# Create a Digital Ocean Machine with invalid access token
machine = client.create_machine(**create_args)
machine = wait_for_condition(client,
machine,
lambda x: x.state == 'error',
lambda x: 'Machine state is ' + x.state
)
assert error_msg_auth_failure in machine.transitioningMessage
hosts = machine.hosts()
assert len(hosts) == 0
machine = client.wait_success(machine.remove())
assert machine.state == 'removed'
@if_machine_digocean
def test_digital_ocean_machine_invalid_region(client):
name = random_str()
create_args = {"name": name,
"digitaloceanConfig": {"accessToken": access_key,
"image": image_name,
"region": "abc",
"size": size
}
}
# Create a Digital Ocean Machine with invalid access token
machine = client.create_machine(**create_args)
machine = wait_for_condition(client,
machine,
lambda x: x.state == 'error',
lambda x: 'Machine state is ' + x.state
)
assert error_msg_invalid_region in machine.transitioningMessage
hosts = machine.hosts()
assert len(hosts) == 0
machine = client.wait_success(machine.remove())
assert machine.state == 'removed'
def digital_ocean_machine_life_cycle(client, configs, expected_values,
labels=None):
# Create a Digital Ocean Machine
machine = client.create_machine(**configs)
machine = client.wait_success(machine, timeout=DEFAULT_TIMEOUT)
assert machine.state == 'active'
# Wait until host shows up with some physicalHostId
machine = wait_for_host(client, machine)
host = machine.hosts()[0]
assert host.state == 'active'
assert machine.accountId == host.accountId
# Check that the droplet that is being created in Digital Ocean has the
# correct configurations
droplet = check_host_in_digital_ocean(host.ipAddresses()[0].address)
if labels is not None:
for label in host.hostLabels():
assert label.key in labels
assert labels[label.key] == label.value
assert droplet is not None
assert droplet["name"] == machine.name
assert droplet["image"]["slug"] == expected_values["image"]
assert droplet["size_slug"] == expected_values["size"]
assert droplet["region"]["slug"] == expected_values["region"]
# Remove the machine and make sure that the host
# and the machine get removed
machine = client.wait_success(machine.remove())
assert machine.state == 'removed'
host = client.reload(machine.hosts()[0])
assert host.state == 'removed'
wait_for_host_destroy_in_digital_ocean(host.ipAddresses()[0].address)
def wait_for_host(client, machine):
wait_for_condition(client,
machine,
lambda x: len(x.hosts()) == 1,
lambda x: 'Number of hosts associated with machine ' +
str(len(x.hosts())),
DEFAULT_TIMEOUT)
host = machine.hosts()[0]
host = wait_for_condition(client,
host,
lambda x: x.state == 'active',
lambda x: 'Host state is ' + x.state
)
return machine
def check_host_in_digital_ocean(ipaddress):
url = 'https://api.digitalocean.com/v2/droplets'
headers = {'Authorization': "Bearer " + access_key}
r = requests.get(url, headers=headers)
response = r.json()
r.close()
droplet_list = response["droplets"]
matched_droplet = None
for droplet in droplet_list:
if droplet["networks"]["v4"][0]["ip_address"] == ipaddress:
matched_droplet = droplet
break
return matched_droplet
def delete_host_in_digital_ocean(name):
try:
url = 'https://api.digitalocean.com/v2/droplets'
headers = {'Authorization': "Bearer " + access_key}
r = requests.get(url, headers=headers)
response = r.json()
r.close()
droplet_list = response["droplets"]
for droplet in droplet_list:
if droplet["name"] == name:
url = 'https://api.digitalocean.com/v2/droplets/' + \
str(droplet["id"])
headers = {'Authorization': "Bearer " + access_key}
try:
r = requests.delete(url, headers=headers)
finally:
r.close()
except Exception:
error_msg = "Error encountered when trying to delete machine - " + name
logger.error(msg=error_msg)
logger.error(msg=traceback.format_exc())
def wait_for_host_destroy_in_digital_ocean(ipaddress, timeout=300):
start = time.time()
time_elapsed = 0
host = check_host_in_digital_ocean(ipaddress)
while host is not None:
assert host["locked"] is True
time.sleep(2)
host = check_host_in_digital_ocean(ipaddress)
time_elapsed = time.time() - start
if time_elapsed > timeout:
time_elapsed_msg = "Timeout waiting for host to be created " \
"- str(time_elapsed)" + " seconds"
logger.error(msg=time_elapsed_msg)
raise Exception(time_elapsed_msg)
| |
# Copyright 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from novaclient import exceptions as nova_exception
from novaclient import utils
from novaclient.v2 import servers as nova_servers
from manila.compute import nova
from manila import context
from manila import exception
from manila import test
from manila.volume import cinder
class Volume(object):
def __init__(self, volume_id):
self.id = volume_id
self.name = volume_id
class Network(object):
def __init__(self, net_id):
self.id = net_id
self.label = 'fake_label_%s' % net_id
class FakeNovaClient(object):
class Servers(object):
def get(self, instance_id):
return {'id': instance_id}
def list(self, *args, **kwargs):
return [{'id': 'id1'}, {'id': 'id2'}]
def create(self, *args, **kwargs):
return {'id': 'created_id'}
def __getattr__(self, item):
return None
class Volumes(object):
def get(self, volume_id):
return Volume(volume_id)
def list(self, detailed, *args, **kwargs):
return [{'id': 'id1'}, {'id': 'id2'}]
def create(self, *args, **kwargs):
return {'id': 'created_id'}
def __getattr__(self, item):
return None
class Networks(object):
def get(self, net_id):
return Network(net_id)
class FixedIPs(object):
def get(self, fixed_ip):
return dict(address=fixed_ip)
def reserve(self, fixed_ip):
return None
def unreserve(self, fixed_ip):
return None
def __init__(self):
self.servers = self.Servers()
self.volumes = self.Volumes()
self.keypairs = self.servers
self.networks = self.Networks()
self.fixed_ips = self.FixedIPs()
@nova.translate_server_exception
def decorated_by_translate_server_exception(self, context, instance_id, exc):
if exc:
raise exc(instance_id)
else:
return 'OK'
@ddt.ddt
class TranslateServerExceptionTestCase(test.TestCase):
def test_translate_server_exception(self):
result = decorated_by_translate_server_exception(
'foo_self', 'foo_ctxt', 'foo_instance_id', None)
self.assertEqual('OK', result)
def test_translate_server_exception_not_found(self):
self.assertRaises(
exception.InstanceNotFound,
decorated_by_translate_server_exception,
'foo_self', 'foo_ctxt', 'foo_instance_id', nova_exception.NotFound)
def test_translate_server_exception_bad_request(self):
self.assertRaises(
exception.InvalidInput,
decorated_by_translate_server_exception,
'foo_self', 'foo_ctxt', 'foo_instance_id',
nova_exception.BadRequest)
@ddt.data(
nova_exception.HTTPNotImplemented,
nova_exception.RetryAfterException,
nova_exception.Unauthorized,
nova_exception.Forbidden,
nova_exception.MethodNotAllowed,
nova_exception.OverLimit,
nova_exception.RateLimit,
)
def test_translate_server_exception_other_exception(self, exc):
self.assertRaises(
exception.ManilaException,
decorated_by_translate_server_exception,
'foo_self', 'foo_ctxt', 'foo_instance_id', exc)
@ddt.ddt
class NovaApiTestCase(test.TestCase):
def setUp(self):
super(NovaApiTestCase, self).setUp()
self.api = nova.API()
self.novaclient = FakeNovaClient()
self.ctx = context.get_admin_context()
self.mock_object(nova, 'novaclient',
mock.Mock(return_value=self.novaclient))
self.mock_object(nova, '_untranslate_server_summary_view',
lambda server: server)
def test_server_create(self):
result = self.api.server_create(self.ctx, 'server_name', 'fake_image',
'fake_flavor', None, None, None)
self.assertEqual('created_id', result['id'])
def test_server_delete(self):
self.mock_object(self.novaclient.servers, 'delete')
self.api.server_delete(self.ctx, 'id1')
self.novaclient.servers.delete.assert_called_once_with('id1')
def test_server_get(self):
instance_id = 'instance_id1'
result = self.api.server_get(self.ctx, instance_id)
self.assertEqual(instance_id, result['id'])
def test_server_get_by_name_or_id(self):
instance_id = 'instance_id1'
server = {'id': instance_id, 'fake_key': 'fake_value'}
self.mock_object(utils, 'find_resource',
mock.Mock(return_value=server))
result = self.api.server_get_by_name_or_id(self.ctx, instance_id)
self.assertEqual(instance_id, result['id'])
utils.find_resource.assert_called_once_with(mock.ANY, instance_id)
@ddt.data(
{'nova_e': nova_exception.NotFound(404),
'manila_e': exception.InstanceNotFound},
{'nova_e': nova_exception.BadRequest(400),
'manila_e': exception.InvalidInput},
)
@ddt.unpack
def test_server_get_failed(self, nova_e, manila_e):
nova.novaclient.side_effect = nova_e
instance_id = 'instance_id'
self.assertRaises(manila_e, self.api.server_get, self.ctx, instance_id)
def test_server_list(self):
self.assertEqual([{'id': 'id1'}, {'id': 'id2'}],
self.api.server_list(self.ctx))
def test_server_pause(self):
self.mock_object(self.novaclient.servers, 'pause')
self.api.server_pause(self.ctx, 'id1')
self.novaclient.servers.pause.assert_called_once_with('id1')
def test_server_unpause(self):
self.mock_object(self.novaclient.servers, 'unpause')
self.api.server_unpause(self.ctx, 'id1')
self.novaclient.servers.unpause.assert_called_once_with('id1')
def test_server_suspend(self):
self.mock_object(self.novaclient.servers, 'suspend')
self.api.server_suspend(self.ctx, 'id1')
self.novaclient.servers.suspend.assert_called_once_with('id1')
def test_server_resume(self):
self.mock_object(self.novaclient.servers, 'resume')
self.api.server_resume(self.ctx, 'id1')
self.novaclient.servers.resume.assert_called_once_with('id1')
def test_server_reboot_hard(self):
self.mock_object(self.novaclient.servers, 'reboot')
self.api.server_reboot(self.ctx, 'id1')
self.novaclient.servers.reboot.assert_called_once_with(
'id1', nova_servers.REBOOT_HARD)
def test_server_reboot_soft(self):
self.mock_object(self.novaclient.servers, 'reboot')
self.api.server_reboot(self.ctx, 'id1', True)
self.novaclient.servers.reboot.assert_called_once_with(
'id1', nova_servers.REBOOT_SOFT)
def test_server_rebuild(self):
self.mock_object(self.novaclient.servers, 'rebuild')
self.api.server_rebuild(self.ctx, 'id1', 'fake_image')
self.novaclient.servers.rebuild.assert_called_once_with('id1',
'fake_image',
None)
def test_instance_volume_attach(self):
self.mock_object(self.novaclient.volumes, 'create_server_volume')
self.api.instance_volume_attach(self.ctx, 'instance_id',
'vol_id', 'device')
self.novaclient.volumes.create_server_volume.\
assert_called_once_with('instance_id', 'vol_id', 'device')
def test_instance_volume_detach(self):
self.mock_object(self.novaclient.volumes, 'delete_server_volume')
self.api.instance_volume_detach(self.ctx, 'instance_id',
'att_id')
self.novaclient.volumes.delete_server_volume.\
assert_called_once_with('instance_id', 'att_id')
def test_instance_volumes_list(self):
self.mock_object(
self.novaclient.volumes, 'get_server_volumes',
mock.Mock(return_value=[Volume('id1'), Volume('id2')]))
self.cinderclient = self.novaclient
self.mock_object(cinder, 'cinderclient',
mock.Mock(return_value=self.novaclient))
result = self.api.instance_volumes_list(self.ctx, 'instance_id')
self.assertEqual(2, len(result))
self.assertEqual('id1', result[0].id)
self.assertEqual('id2', result[1].id)
def test_server_update(self):
self.mock_object(self.novaclient.servers, 'update')
self.api.server_update(self.ctx, 'id1', 'new_name')
self.novaclient.servers.update.assert_called_once_with('id1',
name='new_name')
def test_update_server_volume(self):
self.mock_object(self.novaclient.volumes, 'update_server_volume')
self.api.update_server_volume(self.ctx, 'instance_id', 'att_id',
'new_vol_id')
self.novaclient.volumes.update_server_volume.\
assert_called_once_with('instance_id', 'att_id', 'new_vol_id')
def test_keypair_create(self):
self.mock_object(self.novaclient.keypairs, 'create')
self.api.keypair_create(self.ctx, 'keypair_name')
self.novaclient.keypairs.create.assert_called_once_with('keypair_name')
def test_keypair_import(self):
self.mock_object(self.novaclient.keypairs, 'create')
self.api.keypair_import(self.ctx, 'keypair_name', 'fake_pub_key')
self.novaclient.keypairs.create.\
assert_called_once_with('keypair_name', 'fake_pub_key')
def test_keypair_delete(self):
self.mock_object(self.novaclient.keypairs, 'delete')
self.api.keypair_delete(self.ctx, 'fake_keypair_id')
self.novaclient.keypairs.delete.\
assert_called_once_with('fake_keypair_id')
def test_keypair_list(self):
self.assertEqual([{'id': 'id1'}, {'id': 'id2'}],
self.api.keypair_list(self.ctx))
def test_fixed_ip_get(self):
fixed_ip = 'fake_fixed_ip'
result = self.api.fixed_ip_get(self.ctx, fixed_ip)
self.assertIsInstance(result, dict)
self.assertEqual(fixed_ip, result['address'])
def test_fixed_ip_reserve(self):
fixed_ip = 'fake_fixed_ip'
result = self.api.fixed_ip_reserve(self.ctx, fixed_ip)
self.assertIsNone(result)
def test_fixed_ip_unreserve(self):
fixed_ip = 'fake_fixed_ip'
result = self.api.fixed_ip_unreserve(self.ctx, fixed_ip)
self.assertIsNone(result)
def test_network_get(self):
net_id = 'fake_net_id'
net = self.api.network_get(self.ctx, net_id)
self.assertIsInstance(net, dict)
self.assertEqual(net_id, net['id'])
class ToDictTestCase(test.TestCase):
def test_dict_provided(self):
fake_dict = {'foo_key': 'foo_value', 'bar_key': 'bar_value'}
result = nova._to_dict(fake_dict)
self.assertEqual(fake_dict, result)
def test_obj_provided_with_to_dict_method(self):
expected = {'foo': 'bar'}
class FakeObj(object):
def __init__(self):
self.fake_attr = 'fake_attr_value'
def to_dict(self):
return expected
fake_obj = FakeObj()
result = nova._to_dict(fake_obj)
self.assertEqual(expected, result)
def test_obj_provided_without_to_dict_method(self):
expected = {'foo': 'bar'}
class FakeObj(object):
def __init__(self):
self.foo = expected['foo']
fake_obj = FakeObj()
result = nova._to_dict(fake_obj)
self.assertEqual(expected, result)
| |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from test_framework.blocktools import send_to_witness
from test_framework.test_framework import VergeTestFramework
from test_framework import blocktools
from test_framework.messages import BIP125_SEQUENCE_NUMBER
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(VergeTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 XSH (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# (32-byte p2sh-pwpkh output size + 148 p2pkh spend estimate) * 10k(discard_rate) / 1000 = 1800
# P2SH outputs are slightly "over-discarding" due to the IsDust calculation assuming it will
# be spent as a P2PKH.
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000-1800})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
blocktools.add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| |
"""Method for sharing global sensor and port configuration values between
modules.
"""
import serial
portdefaults = {
"port_baud": 9600,
"port_stopbits": serial.STOPBITS_ONE,
"port_parity": serial.PARITY_NONE,
"port_timeout": 0.01,
"virtual": False,
"delimiter": r"\s",
"encoding": "UTF-8"
}
"""dict: default parameters passed to the :class:`serial.Serial` constructor for
communicating with the serial port.
"""
def _get_parser(config):
"""Returns a :class:`~configparser.ConfigParser` instance for the specified
file.
Args:
config (ConfigParser): instance from which to extract the sensor list
and port information. `str` is also allowed, in which case it
should be the path to the config file to load.
"""
try:
from configparser import ConfigParser
except ImportError: # pragma: no cover
#Renaming of modules to lower case in python 3.
from ConfigParser import ConfigParser
if isinstance(config, str):
parser = ConfigParser()
parser.readfp(open(config))
else: # pragma: no cover
parser = config
return parser
_sensors = {}
"""dict: keys are sensor names, values are :class:`liveserial.config.Sensor`
instances.
"""
_sensors_parsed = False
"""bool: when True, we have already scanned the config file for sensor
settings.
"""
def _load_sensors(config):
"""Loads all the sensors from the specified config file.
Args:
config (ConfigParser): instance from which to extract the sensor list.
`str` is also allowed, in which case it
should be the path to the config file to load.
"""
global _sensors, _sensors_parsed
if not _sensors_parsed:
parser = _get_parser(config)
if parser is not None:
#Now that we have the thread, we can add configuration for each of the
#sensors in the config file.
from fnmatch import fnmatch
for section in parser.sections():
if fnmatch(section, "sensor.*"):
name = section[len("sensor."):]
_sensors[name] = Sensor(None,name,**dict(parser.items(section)))
_sensors_parsed = True
def sensors(config, sensor=None, port=None, monitor=None):
"""Returns the list of :class:`liveserial.config.Sensor` instances that have
the specified port.
Args:
config (ConfigParser): instance from which to extract the sensor list
and port information. `str` is also allowed, in which case it
should be the path to the config file to load.
port (str): name of the port to configure for.
"""
_load_sensors(config)
global _sensors
if port is not None:
result = {}
for sensor_n, instance in _sensors.items():
if instance.port == port:
instance.monitor = monitor
if monitor is not None:
instance.port = monitor.port
result[sensor_n] = instance
elif sensor is not None:
if sensor in _sensors:
result = _sensors[sensor]
else:
result = None
return result
_ports = {}
"""dict: keys are port names, values are updated parameter dictionaries that can
be passed to the :class:`liveserial.monitor.ComMonitorThread` constructor.
"""
_ports_parsed = False
"""bool: when True, we have already examined the config file for port settings.
"""
def _load_ports(config):
"""Loads the configured port information from the specified configuraton.
"""
global _ports, _ports_parsed
if not _ports_parsed:
parser = _get_parser(config)
if parser is None: # pragma: no cover
_ports_parsed = True
return
from fnmatch import fnmatch
for section in parser.sections():
if fnmatch(section, "port.*"):
name = section[len("port."):]
else: # pragma: no cover
continue
params = portdefaults.copy()
for option, value in params.items():
#Override the value using the config value unless it doesn't
#exist.
if parser.has_option(section, option):
params[option] = parser.get(section, option)
#Python's bool is interesting because bool('0') => True. So, we test
#explicitly here for the option value the user set.
import re
if not isinstance(params["virtual"], bool):
if re.match(r"\b\d+\b", params["virtual"]):
params["virtual"] = bool(int(params["virtual"]))
elif re.match(r"[a-z]", params["virtual"][0], re.I):
params["virtual"] = params["virtual"][0].lower() == 't'
_ports[name] = params
_ports_parsed = True
def ports(config, port):
"""Returns the port configuration dictionary for the specified port name.
Args:
config (ConfigParser): instance from which to extract the port
information. `str` is also allowed, in which case it
should be the path to the config file to load.
port (str): name of the port to return configuration for.
"""
_load_ports(config)
if port in _ports:
return _ports[port]
else: # pragma: no cover
return portdefaults.copy()
_script = {}
"""dict: keys are command-line arguments usually accepted by the script when it
is run. Values are configured option values from the config file.
"""
_script_parsed = False
"""bool: when True, the script options have been parsed already.
"""
def script(config):
"""Returns the config options configured globally for the script.
Args:
config (ConfigParser): instance from which to extract the port
information. `str` is also allowed, in which case it
should be the path to the config file to load.
"""
global _script, _script_parsed
if not _script_parsed:
parser = _get_parser(config)
if parser is not None:
for section in parser.sections():
if section == "global":
_script = dict(parser.items("global"))
#We also need to handle the types, since all the options are just
#strings by default.
from liveserial.livemon import script_options
for name in _script:
optname = "-{}".format(name)
if optname in script_options and "type" in script_options[optname]:
caster = script_options[optname]["type"]
_script[name] = caster(_script[name])
_script_parsed = True
return _script
def _config_split(value, delim, cast=None):
"""Splits the specified value using `delim` and optionally casting the
resulting items.
Args:
value (str): config option to split.
delim (str): string to split the option value on.
cast (function): to apply to each item after the split operation.
"""
if value is None:
return
if delim is None: # pragma: no cover
vals = value.split()
else:
vals = value.split(delim)
if cast is not None:
return list(map(cast, vals))
else:
return vals
_plot = {}
"""dict: of plot options; keys are ['line', 'axes', 'figure', 'label', 'ticks'];
values are dicts of matplotlib option values.
"""
_plot_parsed = False
"""bool: when True, we have parsed plot options already.
"""
def plot(config, element):
"""Returns the matplotlib configuration options for the specified plotting
element.
Args:
config (ConfigParser): instance from which to extract the port
information. `str` is also allowed, in which case it
should be the path to the config file to load.
element (str): one of ['line', 'axes', 'figure', 'label', 'ticks'];
specifies which part of the plot the options will apply to.
"""
global _plot, _plot_parsed
if not _plot_parsed:
parser = _get_parser(config)
if parser is not None:
from fnmatch import fnmatch
for section in parser.sections():
if fnmatch(section, "plot.*"):
name = section[len("plot."):]
else: # pragma: no cover
continue
_plot[name] = dict(parser.items(section))
_plot_parsed = True
if element in _plot:
return _plot[element]
else:
return {}
def reset_config():
"""Resets the global config variables so that a session can be continued
with a new config file.
"""
global _sensors, _ports, _plot, _script
global _sensors_parsed, _ports_parsed, _plot_parsed, _script_parsed
_sensors = {}
_sensors_parsed = False
_ports = {}
_ports_parsed = False
_plot = {}
_plot_parsed = False
_script = {}
_script_parsed = False
def _parse_transform(function):
"""Parses the transform function's fqdn to return the function that can
actually transform the data.
"""
if "numpy" in function: # pragma: no cover
import numpy as np
return eval(function)
class Sensor(object):
"""Represents the configuration of a sensor on the serial port.
Args:
monitor (ComMonitorThread): parent instance that this sensor is being logged
with.
name (str): name of the sensor in the configuration file.
key (str): sensor key as it will be written to the serial stream, or `None`
if there isn't a key in the stream (i.e., only values).
value_index (list): column index/indices of the value that will
be plotted.
dtype (list): of `str` or `type`; items must belong to ['key', int, float,
str]. Represents the order in which values are found in a single line of
data read from the serial port. Thus `W 129229 0.928379` would be given by
["key", int, float].
label (str): for plots, what to put on the y-axis. Defaults to `name`.
port (str): name of the port to read this sensor from. Defaults to
:data:`ComMonitorThread.port`.
logging (str): comma-separated list of columns indices (zero-based) to
include in the log file. If not specified, then the default is to
include *all* data columns in the log file.
columns (str): comma-separated list of columns headings for the CSV file;
these are written in the first row of the file. If excluded, they
default to `Time` and `Value1`, `Value2`, etc.
legends (str): if the comma-separated list in `value_index` includes more
than one index, multiple lines are plotted on the same subplot. In that
case, `legends` allows a comma-separated list of legend labels to be
provided for each of those lines.
sensors (str): comma-separated list of sensor names to include in the data
vector that will be passed to `function` to be aggregated to a single
value. This only applies to the case of aggregate sensors.
function (str): name of a function to use to transform the data. Only
applies to the case of aggregate sensors.
kwargs (dict): additional keyword arguments supported that do not require
special processing (i.e., are just simple string values).
Attributes:
options (dict): additional keyword arguments (or configurable options) for
the sensor.
"""
def __init__(self, monitor, name, key=None, value_index=None,
dtype=["key", "int", "float"], label=None, port=None,
logging=None, columns=None, legends=None, function=None,
sensors=None, **kwargs):
self.monitor = monitor
self.name = name
self.key = key
#We analyze the string values set for the dtypes to return the python
#`types` that can cast strings to actual type values.
self.dtype = []
self._keyloc = None
from six import string_types
if isinstance(dtype, string_types):
dtype = dtype.split(',')
for i, sentry in enumerate(dtype):
if sentry == "key":
self._keyloc = i
if key is None and port != "aggregate":
#For aggregate ports, we relax this condition since the
#sensors being aggregated have the dtypes specified
#correctly.
raise ValueError("You must specify a sensor key if 'key' "
"is in the 'dtype' option list.")
else:
caster = eval(sentry)
self.dtype.append(caster)
self.value_index = _config_split(value_index, ',', int)
self.label = name if label is None else label
self.port = monitor.port if port is None else port
self.logging = _config_split(logging, ',', int)
self.columns = _config_split(columns, ',')
self.legends = _config_split(legends, ',')
self.sensors = _config_split(sensors, ',')
if function is not None:
self.transform = _parse_transform(function)
else:
self.transform = None
self.options = kwargs
def _cast(self, raw):
"""Casts all the values in the given list to their relevant data
types. Assumes that the list has the correct format.
Args:
vals (list): string values from the split line to cast.
"""
if (len(raw) != len(self.dtype)
+ (1 if self._keyloc is not None else 0)): # pragma: no cover
return
try:
vals = []
for iv, v in enumerate(raw):
if iv != self._keyloc:
vals.append(self.dtype[len(vals)](v))
#Previously, we were changing the order of the columns based on the
#value index to make it easier for the plotter. Since we allow
#multiple values to be plotted on the same subplot now, it is easier
#to just not mangle them in the first place.
return vals
except ValueError: # pragma: no cover
return None
def parse(self, raw):
"""Parses a single line read from the serial port and returns a tuple of
values.
Args:
raw (list): of split ASCII-encoded strings from the serial port.
Returns:
list: of values parsed using :attr:`Sensor.dtype` casting.
None: if the `key` was not found in the correct location.
"""
result = None
if self._keyloc is not None:
if raw[self._keyloc] == self.key:
result = self._cast(raw)
elif self.key is None:
result = self._cast(raw)
return result
| |
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from .coding import strings, times, variables
from .coding.variables import SerializationWarning
from .core import duck_array_ops, indexing
from .core.pycompat import dask_array_type
from .core.variable import IndexVariable, Variable, as_variable
class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from non-native to native endianness
This is useful for decoding arrays from netCDF3 files (which are all
big endian) into native endianness, so they can be used with Cython
functions, such as those found in bottleneck and pandas.
>>> x = np.arange(5, dtype='>i2')
>>> x.dtype
dtype('>i2')
>>> NativeEndianArray(x).dtype
dtype('int16')
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from integer to boolean datatype
This is useful for decoding boolean arrays from integer typed netCDF
variables.
>>> x = np.array([1, 0, 1, 1, 0], dtype='i1')
>>> x.dtype
dtype('>i2')
>>> BoolTypeArray(x).dtype
dtype('bool')
>>> BoolTypeArray(x)[:].dtype
dtype('bool')
"""
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype('bool')
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
def _var_as_tuple(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def maybe_encode_nonstring_dtype(var, name=None):
if ('dtype' in var.encoding and
var.encoding['dtype'] not in ('S1', str)):
dims, data, attrs, encoding = _var_as_tuple(var)
dtype = np.dtype(encoding.pop('dtype'))
if dtype != var.dtype:
if np.issubdtype(dtype, np.integer):
if (np.issubdtype(var.dtype, np.floating) and
'_FillValue' not in var.attrs):
warnings.warn('saving variable %s with floating '
'point data as an integer dtype without '
'any _FillValue to use for NaNs' % name,
SerializationWarning, stacklevel=10)
data = duck_array_ops.around(data)[...]
data = data.astype(dtype=dtype)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_default_fill_value(var):
# make NaN the fill value for float types:
if ('_FillValue' not in var.attrs and
'_FillValue' not in var.encoding and
np.issubdtype(var.dtype, np.floating)):
var.attrs['_FillValue'] = var.dtype.type(np.nan)
return var
def maybe_encode_bools(var):
if ((var.dtype == np.bool) and
('dtype' not in var.encoding) and ('dtype' not in var.attrs)):
dims, data, attrs, encoding = _var_as_tuple(var)
attrs['dtype'] = 'bool'
data = data.astype(dtype='i1', copy=True)
var = Variable(dims, data, attrs, encoding)
return var
def _infer_dtype(array, name=None):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.dtype.kind != 'O':
raise TypeError('infer_type must be called on a dtype=object array')
if array.size == 0:
return np.dtype(float)
element = array[(0,) * array.ndim]
if isinstance(element, (bytes, str)):
return strings.create_vlen_dtype(type(element))
dtype = np.array(element).dtype
if dtype.kind != 'O':
return dtype
raise ValueError('unable to infer dtype on variable {!r}; xarray '
'cannot serialize arbitrary Python objects'
.format(name))
def ensure_not_multiindex(var, name=None):
if (isinstance(var, IndexVariable) and
isinstance(var.to_index(), pd.MultiIndex)):
raise NotImplementedError(
'variable {!r} is a MultiIndex, which cannot yet be '
'serialized to netCDF files '
'(https://github.com/pydata/xarray/issues/1077). Use '
'reset_index() to convert MultiIndex levels into coordinate '
'variables instead.'.format(name))
def _copy_with_dtype(data, dtype):
"""Create a copy of an array with the given dtype.
We use this instead of np.array() to ensure that custom object dtypes end
up on the resulting array.
"""
result = np.empty(data.shape, dtype)
result[...] = data
return result
def ensure_dtype_not_object(var, name=None):
# TODO: move this from conventions to backends? (it's not CF related)
if var.dtype.kind == 'O':
dims, data, attrs, encoding = _var_as_tuple(var)
if isinstance(data, dask_array_type):
warnings.warn(
'variable {} has data in the form of a dask array with '
'dtype=object, which means it is being loaded into memory '
'to determine a data type that can be safely stored on disk. '
'To avoid this, coerce this variable to a fixed-size dtype '
'with astype() before saving it.'.format(name),
SerializationWarning)
data = data.compute()
missing = pd.isnull(data)
if missing.any():
# nb. this will fail for dask.array data
non_missing_values = data[~missing]
inferred_dtype = _infer_dtype(non_missing_values, name)
# There is no safe bit-pattern for NA in typical binary string
# formats, we so can't set a fill_value. Unfortunately, this means
# we can't distinguish between missing values and empty strings.
if strings.is_bytes_dtype(inferred_dtype):
fill_value = b''
elif strings.is_unicode_dtype(inferred_dtype):
fill_value = u''
else:
# insist on using float for numeric values
if not np.issubdtype(inferred_dtype, np.floating):
inferred_dtype = np.dtype(float)
fill_value = inferred_dtype.type(np.nan)
data = _copy_with_dtype(data, dtype=inferred_dtype)
data[missing] = fill_value
else:
data = _copy_with_dtype(data, dtype=_infer_dtype(data, name))
assert data.dtype.kind != 'O' or data.dtype.metadata
var = Variable(dims, data, attrs, encoding)
return var
def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [times.CFDatetimeCoder(),
times.CFTimedeltaCoder(),
variables.CFScaleOffsetCoder(),
variables.CFMaskCoder(),
variables.UnsignedIntegerCoder()]:
var = coder.encode(var, name=name)
# TODO(shoyer): convert all of these to use coders, too:
var = maybe_encode_nonstring_dtype(var, name=name)
var = maybe_default_fill_value(var)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var, name=name)
return var
def decode_cf_variable(name, var, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_endianness=True,
stack_char_dim=True, use_cftime=None):
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
name: str
Name of the variable. Used for better error messages.
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue). If the _Unsigned attribute is present
treat integer arrays as unsigned.
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
stack_char_dim : bool
Whether to stack characters into bytes along the last dimension of this
array. Passed as an argument because we need to look at the full
dataset to figure out if this is appropriate.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var.
"""
var = as_variable(var)
original_dtype = var.dtype
if concat_characters:
if stack_char_dim:
var = strings.CharacterArrayCoder().decode(var, name=name)
var = strings.EncodedStringCoder().decode(var)
if mask_and_scale:
for coder in [variables.UnsignedIntegerCoder(),
variables.CFMaskCoder(),
variables.CFScaleOffsetCoder()]:
var = coder.decode(var, name=name)
if decode_times:
for coder in [times.CFTimedeltaCoder(),
times.CFDatetimeCoder(use_cftime=use_cftime)]:
var = coder.decode(var, name=name)
dimensions, data, attributes, encoding = (
variables.unpack_for_decoding(var))
# TODO(shoyer): convert everything below to use coders
if decode_endianness and not data.dtype.isnative:
# do this last, so it's only done if we didn't already unmask/scale
data = NativeEndiannessArray(data)
original_dtype = data.dtype
encoding.setdefault('dtype', original_dtype)
if 'dtype' in attributes and attributes['dtype'] == 'bool':
del attributes['dtype']
data = BoolTypeArray(data)
if not isinstance(data, dask_array_type):
data = indexing.LazilyOuterIndexedArray(data)
return Variable(dimensions, data, attributes, encoding=encoding)
def _update_bounds_attributes(variables):
"""Adds time attributes to time bounds variables.
Variables handling time bounds ("Cell boundaries" in the CF
conventions) do not necessarily carry the necessary attributes to be
decoded. This copies the attributes from the time variable to the
associated boundaries.
See Also:
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/
cf-conventions.html#cell-boundaries
https://github.com/pydata/xarray/issues/2565
"""
# For all time variables with bounds
for v in variables.values():
attrs = v.attrs
has_date_units = 'units' in attrs and 'since' in attrs['units']
if has_date_units and 'bounds' in attrs:
if attrs['bounds'] in variables:
bounds_attrs = variables[attrs['bounds']].attrs
bounds_attrs.setdefault('units', attrs['units'])
if 'calendar' in attrs:
bounds_attrs.setdefault('calendar', attrs['calendar'])
def decode_cf_variables(variables, attributes, concat_characters=True,
mask_and_scale=True, decode_times=True,
decode_coords=True, drop_variables=None,
use_cftime=None):
"""
Decode several CF encoded variables.
See: decode_cf_variable
"""
dimensions_used_by = defaultdict(list)
for v in variables.values():
for d in v.dims:
dimensions_used_by[d].append(v)
def stackable(dim):
# figure out if a dimension can be concatenated over
if dim in variables:
return False
for v in dimensions_used_by[dim]:
if v.dtype.kind != 'S' or dim != v.dims[-1]:
return False
return True
coord_names = set()
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
elif drop_variables is None:
drop_variables = []
drop_variables = set(drop_variables)
# Time bounds coordinates might miss the decoding attributes
if decode_times:
_update_bounds_attributes(variables)
new_vars = OrderedDict()
for k, v in variables.items():
if k in drop_variables:
continue
stack_char_dim = (concat_characters and v.dtype == 'S1' and
v.ndim > 0 and stackable(v.dims[-1]))
new_vars[k] = decode_cf_variable(
k, v, concat_characters=concat_characters,
mask_and_scale=mask_and_scale, decode_times=decode_times,
stack_char_dim=stack_char_dim, use_cftime=use_cftime)
if decode_coords:
var_attrs = new_vars[k].attrs
if 'coordinates' in var_attrs:
coord_str = var_attrs['coordinates']
var_coord_names = coord_str.split()
if all(k in variables for k in var_coord_names):
new_vars[k].encoding['coordinates'] = coord_str
del var_attrs['coordinates']
coord_names.update(var_coord_names)
if decode_coords and 'coordinates' in attributes:
attributes = OrderedDict(attributes)
coord_names.update(attributes.pop('coordinates').split())
return new_vars, attributes, coord_names
def decode_cf(obj, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_coords=True, drop_variables=None,
use_cftime=None):
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since 'hours since 2000-01-01') to
np.datetime64.
decode_coords : bool, optional
Use the 'coordinates' attribute on variable (or the dataset itself) to
identify coordinates.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
decoded : Dataset
"""
from .core.dataset import Dataset
from .backends.common import AbstractDataStore
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
file_obj = obj._file_obj
encoding = obj.encoding
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
file_obj = obj
encoding = obj.get_encoding()
else:
raise TypeError('can only decode Dataset or DataStore objects')
vars, attrs, coord_names = decode_cf_variables(
vars, attrs, concat_characters, mask_and_scale, decode_times,
decode_coords, drop_variables=drop_variables, use_cftime=use_cftime)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))
ds._file_obj = file_obj
ds.encoding = encoding
return ds
def cf_decoder(variables, attributes,
concat_characters=True, mask_and_scale=True,
decode_times=True):
"""
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times)
return variables, attributes
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
for name in list(non_dim_coord_names):
if isinstance(name, str) and ' ' in name:
warnings.warn(
'coordinate {!r} has a space in its name, which means it '
'cannot be marked as a coordinate on disk and will be '
'saved as a data variable instead'.format(name),
SerializationWarning, stacklevel=6)
non_dim_coord_names.discard(name)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (k not in non_dim_coord_names and k not in v.dims and
set(target_dims) <= set(v.dims)):
variable_coordinates[k].add(coord_name)
global_coordinates.discard(coord_name)
variables = OrderedDict((k, v.copy(deep=False))
for k, v in variables.items())
# These coordinates are saved according to CF conventions
for var_name, coord_names in variable_coordinates.items():
attrs = variables[var_name].attrs
if 'coordinates' in attrs:
raise ValueError('cannot serialize coordinates because variable '
"%s already has an attribute 'coordinates'"
% var_name)
attrs['coordinates'] = ' '.join(map(str, coord_names))
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/057771.html
if global_coordinates:
attributes = OrderedDict(attributes)
if 'coordinates' in attributes:
raise ValueError('cannot serialize coordinates because the global '
"attribute 'coordinates' already exists")
attributes['coordinates'] = ' '.join(map(str, global_coordinates))
return variables, attributes
def encode_dataset_coordinates(dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(dataset._variables, dataset.attrs,
non_dim_coord_names=non_dim_coord_names)
def cf_encoder(variables, attributes):
"""
A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable
"""
new_vars = OrderedDict((k, encode_cf_variable(v, name=k))
for k, v in variables.items())
return new_vars, attributes
| |
# This file is part of beets.
# Copyright 2013, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Converts tracks or albums to external directory
"""
import logging
import os
import threading
from subprocess import Popen
import tempfile
from string import Template
import pipes
from beets.plugins import BeetsPlugin
from beets import ui, util
from beetsplug.embedart import _embed
from beets import config
log = logging.getLogger('beets')
DEVNULL = open(os.devnull, 'wb')
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
u'wma': u'windows media',
u'vorbis': u'ogg',
}
def _destination(dest_dir, item, keep_new, path_formats):
"""Return the path under `dest_dir` where the file should be placed
(possibly after conversion).
"""
dest = item.destination(basedir=dest_dir, path_formats=path_formats)
if keep_new:
# When we're keeping the converted file, no extension munging
# occurs.
return dest
else:
# Otherwise, replace the extension.
_, ext = get_format()
return os.path.splitext(dest)[0] + ext
def get_format():
"""Get the currently configured format command and extension.
"""
format = config['convert']['format'].get(unicode).lower()
format = ALIASES.get(format, format)
format_info = config['convert']['formats'][format].get(dict)
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
format_info['command'] = config['convert']['command'].get(unicode)
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
format_info['command'] = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode)
)
if 'extension' in keys:
format_info['extension'] = config['convert']['extension'].get(unicode)
try:
return [a.encode('utf8') for a in format_info['command'].split()], \
(u'.' + format_info['extension']).encode('utf8')
except KeyError:
raise ui.UserError(
u'convert: format {0} needs "command" and "extension" fields'
.format(format)
)
def encode(source, dest):
quiet = config['convert']['quiet'].get()
if not quiet:
log.info(u'Started encoding {0}'.format(util.displayable_path(source)))
command, _ = get_format()
opts = []
for arg in command:
opts.append(Template(arg).safe_substitute({
'source': source,
'dest': dest,
}))
log.debug(u'convert: executing: {0}'.format(
u' '.join(pipes.quote(o.decode('utf8', 'ignore')) for o in opts)
))
encode = Popen(opts, close_fds=True, stderr=DEVNULL)
encode.wait()
if encode.returncode != 0:
# Something went wrong (probably Ctrl+C), remove temporary files
log.info(u'Encoding {0} failed. Cleaning up...'
.format(util.displayable_path(source)))
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
return
if not quiet:
log.info(u'Finished encoding {0}'.format(
util.displayable_path(source))
)
def should_transcode(item):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
maxbr = config['convert']['max_bitrate'].get(int)
format_name = config['convert']['format'].get(unicode)
return format_name.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
def convert_item(dest_dir, keep_new, path_formats):
while True:
item = yield
dest = _destination(dest_dir, item, keep_new, path_formats)
if os.path.exists(util.syspath(dest)):
log.info(u'Skipping {0} (target file exists)'.format(
util.displayable_path(item.path)
))
continue
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
with _fs_lock:
util.mkdirall(dest)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
log.info(u'Moving to {0}'.
format(util.displayable_path(dest)))
util.move(item.path, dest)
if not should_transcode(item):
# No transcoding necessary.
log.info(u'Copying {0}'.format(util.displayable_path(item.path)))
if keep_new:
util.copy(dest, item.path)
else:
util.copy(item.path, dest)
else:
if keep_new:
_, ext = get_format()
item.path = os.path.splitext(item.path)[0] + ext
encode(dest, item.path)
else:
encode(item.path, dest)
# Write tags from the database to the converted file.
if not keep_new:
item.path = dest
item.write()
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
if keep_new:
item.read()
item.store() # Store new path and audio data.
if config['convert']['embed']:
album = item.get_album()
if album:
artpath = album.artpath
if artpath:
_embed(artpath, [item])
def convert_on_import(lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
if should_transcode(item):
_, ext = get_format()
fd, dest = tempfile.mkstemp(ext)
os.close(fd)
_temp_files.append(dest) # Delete the transcode later.
encode(item.path, dest)
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
def convert_func(lib, opts, args):
dest = opts.dest if opts.dest is not None else \
config['convert']['dest'].get()
if not dest:
raise ui.UserError('no convert destination set')
dest = util.bytestring_path(dest)
threads = opts.threads if opts.threads is not None else \
config['convert']['threads'].get(int)
keep_new = opts.keep_new
if not config['convert']['paths']:
path_formats = ui.get_path_formats()
else:
path_formats = ui.get_path_formats(config['convert']['paths'])
ui.commands.list_items(lib, ui.decargs(args), opts.album, None)
if not ui.input_yn("Convert? (Y/n)"):
return
if opts.album:
items = (i for a in lib.albums(ui.decargs(args)) for i in a.items())
else:
items = iter(lib.items(ui.decargs(args)))
convert = [convert_item(dest, keep_new, path_formats)
for i in range(threads)]
pipe = util.pipeline.Pipeline([items, convert])
pipe.run_parallel()
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super(ConvertPlugin, self).__init__()
self.config.add({
u'dest': None,
u'threads': util.cpu_count(),
u'format': u'mp3',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -acodec libfaac '
u'-aq 100 $dest',
u'extension': u'm4a',
},
u'alac': {
u'command': u'ffmpeg -i $source -y -acodec alac $dest',
u'extension': u'm4a',
},
u'flac': {
u'command': u'ffmpeg -i $source -y -acodec flac $dest',
u'extension': u'flac',
},
u'mp3': {
u'command': u'ffmpeg -i $source -y -aq 2 $dest',
u'extension': u'mp3',
},
u'opus': {
u'command': u'ffmpeg -i $source -y -acodec libopus -vn '
u'-ab 96k $dest',
u'extension': u'opus',
},
u'ogg': {
u'command': u'ffmpeg -i $source -y -acodec libvorbis -vn '
u'-aq 2 $dest',
u'extension': u'ogg',
},
u'windows media': {
u'command': u'ffmpeg -i $source -y -acodec wmav2 '
u'-vn $dest',
u'extension': u'wma',
},
},
u'max_bitrate': 500,
u'auto': False,
u'quiet': False,
u'embed': True,
u'paths': {},
})
self.import_stages = [self.auto_convert]
def commands(self):
cmd = ui.Subcommand('convert', help='convert to external location')
cmd.parser.add_option('-a', '--album', action='store_true',
help='choose albums instead of tracks')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help='change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help='keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help='set the destination directory')
cmd.func = convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
for item in task.imported_items():
convert_on_import(config.lib, item)
@ConvertPlugin.listen('import_task_files')
def _cleanup(task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import commands
import ConfigParser
import os
import urlparse
from migrate.versioning import repository
import sqlalchemy
import nova.db.migration as migration
import nova.db.sqlalchemy.migrate_repo
from nova.db.sqlalchemy.migration import versioning_api as migration_api
from nova import log as logging
from nova import test
LOG = logging.getLogger(__name__)
def _mysql_get_connect_string(user="openstack_citest",
passwd="openstack_citest",
database="openstack_citest"):
"""
Try to get a connection with a very specfic set of values, if we get
these then we'll run the mysql tests, otherwise they are skipped
"""
return "mysql://%(user)s:%(passwd)s@localhost/%(database)s" % locals()
def _is_mysql_avail(user="openstack_citest",
passwd="openstack_citest",
database="openstack_citest"):
try:
connect_uri = _mysql_get_connect_string(
user=user, passwd=passwd, database=database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intential catch all to handle exceptions even if we don't
# have mysql code loaded at all.
return False
else:
connection.close()
return True
def _have_mysql():
present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
if present is None:
return _is_mysql_avail()
return present.lower() in ('', 'true')
class TestMigrations(test.TestCase):
"""Test sqlalchemy-migrate migrations"""
TEST_DATABASES = {}
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
DEFAULT_CONFIG_FILE)
MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
REPOSITORY = repository.Repository(
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
def setUp(self):
super(TestMigrations, self).setUp()
self.snake_walk = False
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
if not TestMigrations.TEST_DATABASES:
if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(TestMigrations.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
TestMigrations.TEST_DATABASES[key] = value
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
except ConfigParser.ParsingError, e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in TestMigrations.TEST_DATABASES.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
# remove these from the list so they aren't used in the migration tests
if "mysqlcitest" in self.engines:
del self.engines["mysqlcitest"]
if "mysqlcitest" in TestMigrations.TEST_DATABASES:
del TestMigrations.TEST_DATABASES["mysqlcitest"]
super(TestMigrations, self).tearDown()
def _reset_databases(self):
def execute_cmd(cmd=None):
status, output = commands.getstatusoutput(cmd)
LOG.debug(output)
self.assertEqual(0, status)
for key, engine in self.engines.items():
conn_string = TestMigrations.TEST_DATABASES[key]
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p\"%s\"" % auth_pieces[1]
sql = ("drop database if exists %(database)s; "
"create database %(database)s;") % locals()
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"") % locals()
execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = auth_pieces[1]
cmd = ("touch ~/.pgpass;"
"chmod 0600 ~/.pgpass;"
"sed -i -e"
"'1{s/^.*$/\*:\*:\*:%(user)s:%(password)s/};"
"1!d' ~/.pgpass") % locals()
execute_cmd(cmd)
sql = ("UPDATE pg_catalog.pg_database SET datallowconn=false "
"WHERE datname='%(database)s';") % locals()
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
execute_cmd(cmd)
sql = ("SELECT pg_catalog.pg_terminate_backend(procpid) "
"FROM pg_catalog.pg_stat_activity "
"WHERE datname='%(database)s';") % locals()
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
execute_cmd(cmd)
sql = ("drop database if exists %(database)s;") % locals()
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
execute_cmd(cmd)
sql = ("create database %(database)s;") % locals()
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
execute_cmd(cmd)
def test_walk_versions(self):
"""
Walks all version scripts for each tested database, ensuring
that there are no errors in the version scripts for each engine
"""
for key, engine in self.engines.items():
self._walk_versions(engine, self.snake_walk)
def test_mysql_connect_fail(self):
"""
Test that we can trigger a mysql connection failure and we fail
gracefully to ensure we don't break people without mysql
"""
if _is_mysql_avail(user="openstack_cifail"):
self.fail("Shouldn't have connected")
@test.skip_unless(_have_mysql(), "mysql not available")
def test_mysql_innodb(self):
"""
Test that table creation on mysql only builds InnoDB tables
"""
# add this to the global lists to make reset work with it, it's removed
# automaticaly in tearDown so no need to clean it up here.
connect_string = _mysql_get_connect_string()
engine = sqlalchemy.create_engine(connect_string)
self.engines["mysqlcitest"] = engine
TestMigrations.TEST_DATABASES["mysqlcitest"] = connect_string
# build a fully populated mysql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
uri = _mysql_get_connect_string(database="information_schema")
connection = sqlalchemy.create_engine(uri).connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest'")
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
migration_api.version_control(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION)
self.assertEqual(migration.INIT_VERSION,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION + 1)
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
for version in xrange(migration.INIT_VERSION + 2,
TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version)
if snake_walk:
self._migrate_down(engine, version - 1)
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(
xrange(migration.INIT_VERSION + 1,
TestMigrations.REPOSITORY.latest)):
# downgrade -> upgrade -> downgrade
self._migrate_down(engine, version)
if snake_walk:
self._migrate_up(engine, version + 1)
self._migrate_down(engine, version)
def _migrate_down(self, engine, version):
migration_api.downgrade(engine,
TestMigrations.REPOSITORY,
version)
self.assertEqual(version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
def _migrate_up(self, engine, version):
migration_api.upgrade(engine,
TestMigrations.REPOSITORY,
version)
self.assertEqual(version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
| |
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
A plugin for generating WCS axes overlay in the loaded image.
**Plugin Type: Local**
``WCSAxes`` is a local plugin, which means it is associated with a channel.
An instance can be opened for each channel.
**Usage**
As long as image as a valid WCS, WCS axes will be displayed.
Use plugin GUI or configuration file to customize axes display.
"""
import numpy as np
from ginga import colors
from ginga.GingaPlugin import LocalPlugin
from ginga.gw import Widgets
__all__ = ['WCSAxes']
class WCSAxes(LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(WCSAxes, self).__init__(fv, fitsimage)
self.layertag = 'wcsaxes-canvas'
self.colornames = colors.get_colors()
self.linestyles = ['solid', 'dash']
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_WCSAxes')
self.settings.add_defaults(linecolor='cyan', alpha=1,
linestyle='solid', linewidth=1,
n_ra_lines=10, n_dec_lines=10,
show_label=True, fontsize=8, label_offset=4)
self.settings.load(onError='silent')
linecolor = self.settings.get('linecolor', 'cyan')
alpha = self.settings.get('alpha', 1)
linestyle = self.settings.get('linestyle', 'solid')
linewidth = self.settings.get('linewidth', 1)
num_ra = self.settings.get('n_ra_lines', 10)
num_dec = self.settings.get('n_dec_lines', 10)
show_label = self.settings.get('show_label', True)
fontsize = self.settings.get('fontsize', 8)
txt_off = self.settings.get('label_offset', 4)
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(False)
canvas.set_surface(self.fitsimage)
self.canvas = canvas
self.axes = self.dc.WCSAxes(
linewidth=linewidth, linestyle=linestyle, color=linecolor,
alpha=alpha, fontsize=fontsize)
self.axes.num_ra = num_ra
self.axes.num_dec = num_dec
self.axes.show_label = show_label
self.axes.txt_off = txt_off
self.canvas.add(self.axes)
self.gui_up = False
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container,
orientation=self.settings.get('orientation', None))
vbox.set_border_width(4)
vbox.set_spacing(2)
fr = Widgets.Frame('General')
captions = (('Line color:', 'label', 'Line colors', 'combobox'),
('Alpha:', 'label', 'Alpha', 'entryset'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
combobox = b.line_colors
for name in self.colornames:
combobox.append_text(name)
combobox.set_index(self.colornames.index(self.axes.color))
combobox.add_callback('activated', self.set_linecolor_cb)
b.alpha.set_text(str(self.axes.alpha))
b.alpha.set_tooltip('Line transparency (alpha)')
b.alpha.add_callback('activated', lambda *args: self.set_alpha())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame('Lines')
captions = (('Line style:', 'label', 'Line styles', 'combobox'),
('# RA lines:', 'label', 'Num RA', 'entryset'),
('# DEC lines:', 'label', 'Num DEC', 'entryset'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
combobox = b.line_styles
for name in self.linestyles:
combobox.append_text(name)
combobox.set_index(self.linestyles.index(self.axes.linestyle))
combobox.add_callback('activated', self.set_linestyle_cb)
b.num_ra.set_text(str(self.axes.num_ra))
b.num_ra.set_tooltip('Number of lines drawn for RA')
b.num_ra.add_callback('activated', lambda *args: self.set_num_ra())
b.num_dec.set_text(str(self.axes.num_dec))
b.num_dec.set_tooltip('Number of lines drawn for DEC')
b.num_dec.add_callback('activated', lambda *args: self.set_num_dec())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame('Labels')
captions = (('Show label', 'checkbutton'),
('Font size:', 'label', 'Font size', 'entryset'),
('Text offset:', 'label', 'Text offset', 'entryset'),
('RA angle:', 'label', 'RA angle', 'entryset'),
('DEC angle:', 'label', 'DEC angle', 'entryset'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.show_label.set_state(self.axes.show_label)
b.show_label.set_tooltip('Show/hide label')
b.show_label.add_callback('activated', self.toggle_label_cb)
b.font_size.set_text(str(self.axes.fontsize))
b.font_size.set_tooltip('Labels font size')
b.font_size.add_callback(
'activated', lambda *args: self.set_fontsize())
b.text_offset.set_text(str(self.axes.txt_off))
b.text_offset.set_tooltip('Labels text offset in pixels')
b.text_offset.add_callback(
'activated', lambda *args: self.set_txt_off())
b.ra_angle.set_text(str(self.axes.ra_angle))
b.ra_angle.set_tooltip('Orientation in deg of RA labels')
b.ra_angle.add_callback('activated', lambda *args: self.set_ra_angle())
b.dec_angle.set_text(str(self.axes.dec_angle))
b.dec_angle.set_tooltip('Orientation in deg of DEC labels')
b.dec_angle.add_callback(
'activated', lambda *args: self.set_dec_angle())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def redo(self):
if not self.gui_up:
return
# Need this here so GUI accurately updates values from drawing.
self.w.ra_angle.set_text(str(self.axes.ra_angle))
self.w.dec_angle.set_text(str(self.axes.dec_angle))
def set_linecolor_cb(self, w, index):
self.axes.color = self.colornames[index]
self.axes.sync_state()
self.canvas.update_canvas()
return True
def set_alpha(self):
try:
a = float(self.w.alpha.get_text())
if (a < 0) or (a > 1):
raise ValueError
except ValueError:
self.w.alpha.set_text(str(self.axes.alpha))
else:
self.axes.alpha = a
self.axes.sync_state()
self.canvas.update_canvas()
return True
def set_linestyle_cb(self, w, index):
self.axes.linestyle = self.linestyles[index]
self.axes.sync_state()
self.canvas.update_canvas()
return True
def set_num_ra(self):
try:
n = int(self.w.num_ra.get_text())
if n < 1 or n > 50:
raise ValueError
except ValueError:
self.w.num_ra.set_text(str(self.axes.num_ra))
else:
self.axes.num_ra = n
self.axes._cur_image = None # Force redraw
self.canvas.update_canvas()
return True
def set_num_dec(self):
try:
n = int(self.w.num_dec.get_text())
if n < 1 or n > 50:
raise ValueError
except ValueError:
self.w.num_dec.set_text(str(self.axes.num_dec))
else:
self.axes.num_dec = n
self.axes._cur_image = None # Force redraw
self.canvas.update_canvas()
return True
def toggle_label_cb(self, w, val):
self.axes.show_label = val
# Toggling label off and switch image causes axes not to have labels
# at all, which causes toggling it back on to not work without complete
# rebuild.
if (val and not np.any([obj.kind == 'text'
for obj in self.axes.objects])):
self.axes._cur_image = None # Force redraw
else:
self.axes.sync_state()
self.canvas.update_canvas()
def set_fontsize(self):
try:
val = int(self.w.font_size.get_text())
if val < 8 or val > 72:
raise ValueError
except ValueError:
self.w.font_size.set_text(str(self.axes.fontsize))
else:
self.axes.fontsize = val
self.axes.sync_state()
self.canvas.update_canvas()
return True
def set_txt_off(self):
try:
val = int(self.w.text_offset.get_text())
if abs(val) > 50: # No point putting the label so far away
raise ValueError
except ValueError:
self.w.text_offset.set_text(str(self.axes.txt_off))
else:
self.axes.txt_off = val
self.axes._cur_image = None # Force redraw
self.canvas.update_canvas()
return True
def set_ra_angle(self):
s = self.w.ra_angle.get_text()
if s.lower() == 'none':
a = None
else:
try:
a = float(s)
except ValueError:
self.w.ra_angle.set_text(str(self.axes.ra_angle))
return
self.axes.ra_angle = a
self.axes.sync_state()
self.canvas.update_canvas()
return True
def set_dec_angle(self):
s = self.w.dec_angle.get_text()
if s.lower() == 'none':
a = None
else:
try:
a = float(s)
except ValueError:
self.w.dec_angle.set_text(str(self.axes.dec_angle))
return
self.axes.dec_angle = a
self.axes.sync_state()
self.canvas.update_canvas()
return True
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
# insert canvas, if not already
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add ruler layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def stop(self):
# so we don't hang on to a large image
self.axes._cur_image = None
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except Exception:
pass
self.gui_up = False
self.fv.show_status("")
def __str__(self):
return 'wcsaxes'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_WCSAxes', package='ginga')
# END
| |
# Copyright 2004 by James Casbon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to deal with COMPASS output, a program for profile/profile comparison.
Compass is described in:
Sadreyev R, Grishin N. COMPASS: a tool for comparison of multiple protein
alignments with assessment of statistical significance. J Mol Biol. 2003 Feb
7;326(1):317-36.
Tested with COMPASS 1.24.
"""
import re
def read(handle):
"""Reads a COMPASS file containing one COMPASS record."""
record = None
try:
line = next(handle)
record = Record()
__read_names(record, line)
line = next(handle)
__read_threshold(record, line)
line = next(handle)
__read_lengths(record, line)
line = next(handle)
__read_profilewidth(record, line)
line = next(handle)
__read_scores(record, line)
except StopIteration:
if not record:
raise ValueError("No record found in handle")
else:
raise ValueError("Unexpected end of stream.")
for line in handle:
if not line.strip(): # skip empty lines
continue
__read_query_alignment(record, line)
try:
line = next(handle)
__read_positive_alignment(record, line)
line = next(handle)
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
return record
def parse(handle):
"""Iterates over records in a COMPASS file."""
record = None
try:
line = next(handle)
except StopIteration:
return
while True:
try:
record = Record()
__read_names(record, line)
line = next(handle)
__read_threshold(record, line)
line = next(handle)
__read_lengths(record, line)
line = next(handle)
__read_profilewidth(record, line)
line = next(handle)
__read_scores(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
for line in handle:
if not line.strip():
continue
if "Ali1:" in line:
yield record
break
__read_query_alignment(record, line)
try:
line = next(handle)
__read_positive_alignment(record, line)
line = next(handle)
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
else:
yield record
break
class Record(object):
"""Hold information from one compass hit.
Ali1 is the query, Ali2 the hit.
"""
def __init__(self):
self.query = ''
self.hit = ''
self.gap_threshold = 0
self.query_length = 0
self.query_filtered_length = 0
self.query_nseqs = 0
self.query_neffseqs = 0
self.hit_length = 0
self.hit_filtered_length = 0
self.hit_nseqs = 0
self.hit_neffseqs = 0
self.sw_score = 0
self.evalue = -1
self.query_start = -1
self.hit_start = -1
self.query_aln = ''
self.hit_aln = ''
self.positives = ''
def query_coverage(self):
"""Return the length of the query covered in the alignment."""
s = self.query_aln.replace("=", "")
return len(s)
def hit_coverage(self):
"""Return the length of the hit covered in the alignment."""
s = self.hit_aln.replace("=", "")
return len(s)
# Everything below is private
__regex = {"names": re.compile("Ali1:\s+(\S+)\s+Ali2:\s+(\S+)\s+"),
"threshold": re.compile("Threshold of effective gap content in columns: (\S+)"),
"lengths": re.compile("length1=(\S+)\s+filtered_length1=(\S+)\s+length2=(\S+)\s+filtered_length2=(\S+)"),
"profilewidth": re.compile("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\S+)\s+Neff2=(\S+)"),
"scores": re.compile("Smith-Waterman score = (\S+)\s+Evalue = (\S+)"),
"start": re.compile("(\d+)"),
"align": re.compile("^.{15}(\S+)"),
"positive_alignment": re.compile("^.{15}(.+)"),
}
def __read_names(record, line):
# Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
# ------query----- -------hit-------------
if "Ali1:" not in line:
raise ValueError("Line does not contain 'Ali1:':\n%s" % line)
m = __regex["names"].search(line)
record.query = m.group(1)
record.hit = m.group(2)
def __read_threshold(record, line):
if not line.startswith("Threshold"):
raise ValueError("Line does not start with 'Threshold':\n%s" % line)
m = __regex["threshold"].search(line)
record.gap_threshold = float(m.group(1))
def __read_lengths(record, line):
if not line.startswith("length1="):
raise ValueError("Line does not start with 'length1=':\n%s" % line)
m = __regex["lengths"].search(line)
record.query_length = int(m.group(1))
record.query_filtered_length = float(m.group(2))
record.hit_length = int(m.group(3))
record.hit_filtered_length = float(m.group(4))
def __read_profilewidth(record, line):
if "Nseqs1" not in line:
raise ValueError("Line does not contain 'Nseqs1':\n%s" % line)
m = __regex["profilewidth"].search(line)
record.query_nseqs = int(m.group(1))
record.query_neffseqs = float(m.group(2))
record.hit_nseqs = int(m.group(3))
record.hit_neffseqs = float(m.group(4))
def __read_scores(record, line):
if not line.startswith("Smith-Waterman"):
raise ValueError("Line does not start with 'Smith-Waterman':\n%s" % line)
m = __regex["scores"].search(line)
if m:
record.sw_score = int(m.group(1))
record.evalue = float(m.group(2))
else:
record.sw_score = 0
record.evalue = -1.0
def __read_query_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.query_start = int(m.group(1))
m = __regex["align"].match(line)
assert m is not None, "invalid match"
record.query_aln += m.group(1)
def __read_positive_alignment(record, line):
m = __regex["positive_alignment"].match(line)
assert m is not None, "invalid match"
record.positives += m.group(1)
def __read_hit_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.hit_start = int(m.group(1))
m = __regex["align"].match(line)
assert m is not None, "invalid match"
record.hit_aln += m.group(1)
| |
"""
Data Objects for sending to/from user interface.
The objects are pickable so that they can be sent between processes.
Therefore they contain no logic except for static factory methods.
"""
import util
import messages
import entities
## Objects to be sent through interface
## Are *Pickable* for multiprocessing
## Contains minimal logic
class Complaint(object):
"""An object representing a single complaint.
Attributes:
coordinates: A 2-tuple of integers
agent_id: The unique identifier of the agent complaining
"""
@classmethod
def create(c, agent, world, vote):
"""Factory method for creating direct Complaint objects.
Parameters:
agent: The agent.IdentifyingAgent who cast the vote
world: A world.Map instance
vote: A vote.Vote instance
"""
obj = c()
obj.coordinates = world.get_structure().get_cell_position(vote.cell)
obj.agent_id = agent.get_id()
return obj
class InterfaceConfig(object):
"""Configuration information for the simulation run that the interface
needs to know about.
Attributes:
print_messages: Boolean
"""
@classmethod
def from_dictionary(c, d):
obj = c()
obj.print_messages = d["print messages"]
return obj
class Simulation(object):
"""Complete information about the simulation.
Attributes:
map: The world map
fishermen: A list of fisherman agent objects
aquaculture_agents: A list of aquaculture agent objects
civilians: A list of civilians
tourists: A list of tourists
interface_config: An InterfaceConfig instance
num_max_complaints: Integer
"""
@classmethod
def from_simulation_info(c, info, cfg):
def to_dos(type, transform):
return map(transform, info.directory.get_agents(type = type))
obj = c()
obj.map = Map.from_world_map(info.map)
obj.fishermen = to_dos(entities.Fisherman, Fisherman.from_object)
obj.aquacultures = to_dos(entities.Aquaculture, Aquaculture.from_object)
obj.civilians = to_dos(entities.Civilian, Civilian.from_object)
obj.tourists = to_dos(entities.Tourist, Tourist.from_object)
obj.interface_config = InterfaceConfig.from_dictionary(cfg["interface"])
obj.num_max_complaints = (cfg["global"]["num max complaints"] or 1) * \
(cfg["global"]["max hearing rounds"] or 1)
return obj
class WorkingAgent(object):
"""Working agents have names and capital.
Attributes:
id: Unique identifier string
capital: Float representation of capital
"""
@classmethod
def from_object(c, object):
obj = c()
obj.id = object.get_id()
obj.capital = object.capital
return obj
class Fisherman(WorkingAgent):
pass
class Civilian(WorkingAgent):
pass
class Tourist(WorkingAgent):
pass
class Aquaculture(WorkingAgent):
pass
class Map(object):
"""A structured container for all the cells in the world.
Attributes:
grid: A two-dimensional list of cells
"""
def __init__(self, grid):
self.grid = grid
@classmethod
def from_world_map(c, world_map, cells=None):
return c([
[Slot.from_world_slot(d) if cells is None or d in cells else None
for d in r] ###TODO::FIIIX
for r in world_map.get_structure().get_grid()])
class Slot(object):
"""
Attributes:
spawning Boolean
aquaculture Boolean
fisherman Boolean
land Boolean
blocked Boolean
fishermen List<Fisherman>
num_fishermen Integer
quality Float
"""
def __str__(self):
bool_attributes_text = {
"Land": self.land,
"Spawn": self.spawning,
"Aqltr": self.aquaculture,
"Fisher": self.fisherman,
"Blocked": self.blocked
}
return "CELL[%s]" % (
", ".join(
["%s: %s" % (key, "YES" if bool_attributes_text[key] else "NO")
for key in bool_attributes_text])
)
@classmethod
def from_world_slot(c, world_slot):
occupants = world_slot.get_occupants()
obj = c()
obj.spawning = world_slot.fish_spawning()
obj.aquaculture = next((o for o in occupants if
o.__class__ is entities.Aquaculture), None) is not None
obj.fisherman = next((o for o in occupants if
o.__class__ is entities.Fisherman), None) is not None
obj.land = world_slot.is_land()
obj.blocked = world_slot.is_blocked()
obj.fishermen = [Fisherman.from_object(e) for e in occupants if
e.__class__ == entities.Fisherman]
obj.num_fishermen = len(occupants) if obj.fisherman else 0
obj.quality = world_slot.get_fish_quantity()
return obj
class Message(object):
"""Direct object representation of Message
Attributes:
sender: String
recipient: String or None
recipients: List of Strings or None
contents: String
type: String: "broadcast" or "single"
"""
def __str__(self):
recipient_line = ("Recipient: %s" % self.recipient) if \
self.type == "single" else \
("Recipients:%s" %
(util.smart_line_sep(self.recipients[:4],
", ", 70, "\n" + " "*12) +
("..." if len(self.recipients) > 4 else "")))
return "Message:\n\t" + '\n\t'.join([
"Type: %s" % self.type ,
"Sender: %s" % self.sender ,
recipient_line ,
"Time: %d" % self.timestamp,
"Contents: %s" % self.contents
])
@classmethod
def from_message(c, world_map, msg):
assert msg.metainfo.type in ["broadcast", "single"], \
"Unknown message type: %s" % msg.metainfo.type
message = c()
message.sender = msg.metainfo.source.get_id()
message.type = msg.metainfo.type
if message.type == "broadcast":
message.recipients = [a.get_id() for a in msg.metainfo.targets]
message.recipient = None
else:
message.recipient = msg.metainfo.target.get_id()
message.recipients = None
message.timestamp = msg.metainfo.timestmap
message.contents = msg.get_str_summary(world_map)
return message
class PhaseReport(object):
"""
Public members:
phase String
messages List<Message>
map Map
new_round: Boolean
data: A field where non-standard component data can be sent.
Used through the phases module.
next_phase: String
round: Integer representing the number of the current round
complaints: List<Complaint>
"""
@classmethod
def from_step_result(c, result, next):
obj = c()
obj.phase = result.phase.name
obj.messages = [Message.from_message(result.world_map, m)
for m in result.messages]
obj.map = Map.from_world_map(
result.world_map, cells=result.cells_changed)
obj.data = result.data
obj.next_phase = next
obj.complaints = [Complaint.create(a, result.world_map, v) for a in
result.votes for v in result.votes[a] if v.is_complaint()]
obj.round = result.round_number
return obj
| |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 28 22:08:40 2015
@author: alek
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot, matplotlib.cm, matplotlib.ticker, matplotlib.font_manager
import scipy.integrate, numpy, csv, math
import pandas as pd
from datetime import datetime, timedelta
import nrlmsise_00_header, nrlmsise_00
from utilities import GM, Arrow3D
import pytz
from astropy import coordinates as coord
from astropy import units as u
from astropy.time import Time
def readEGM96Coefficients():
""" Read the EGM96 gravitational field model coefficients from EGM96coefficients
file and parse them to be used with computeGravitationalPotential functions.
Returns
----------
2-tuple of the C and S coefficnients of EGM96 model. They are stored in dictionaries
of list. The keys are degrees of the potential expansion and the values
of the list entries are the coefficients corresponding to the orders for
the expansion to a given degree.
Reference
----------
EGM96 coefficients have been downloaded from:
ftp://cddis.gsfc.nasa.gov/pub/egm96/general_info/readme.egm96
"""
" Read the coefficients. "
degrees = []; orders = []; CcoeffsTemp = []; ScoeffsTemp = [];
with open("EGM96coefficients", "r") as egm96file:
reader = csv.reader(egm96file, delimiter=" ")
for row in reader:
degrees.append( row[1] ) # There will be some " " in row, the delimiter isn't always " ", sometimes it's " "...
orders.append( row[2] )
CcoeffsTemp.append( row[3] )
ScoeffsTemp.append( row[4] )
# Change to numbers from str.
degrees = [int(x) for x in degrees]
orders = [int(x) for x in orders]
CcoeffsTemp = [float(x) for x in CcoeffsTemp]
ScoeffsTemp = [float(x) for x in ScoeffsTemp]
" Parse C and S coefficients to an easily usable format. "
# Store a list of coefficients corresponding to the given degree of len( no. orders corresponding to this degree ).
Ccoeffs = {0:[1],1:[0,0]}; Scoeffs ={0:[0],1:[0,0]}; # Initial coefficients for spherical Earth. C_10, C_11, and S_11 are 0 if the origin is at the geocentre.
for i in range(len(degrees)): # Initialise emoty lists.
Ccoeffs[degrees[i]] = []
Scoeffs[degrees[i]] = []
for i in range(len(degrees)): # Store the coefficients.
Ccoeffs[degrees[i]].append( CcoeffsTemp[i] )
Scoeffs[degrees[i]].append( ScoeffsTemp[i] )
return Ccoeffs, Scoeffs
#TODO RK4 causes some numerical oscillation and dissipation in progapation, even in two-body problem. Need something better.
def RungeKutta4(X, t, dt, rateOfChangeFunction):
""" Use fourth-order Runge-Kutta numericla integration method to propagate
a system of differential equations from state X, expressed at time t, by a
time increment dt. Evolution of the sytstem is given by the rateOfChangeFunction
that gives the rates of change of all the components of state X.
Arguments
----------
X - numpy.ndarray of shape (1,6) with three Cartesian positions and three velocities
in an inertial reference frame in metres and metres per second, respectively.
t - datetime, UTC epoch at which state X is defined
dt - float, epoch increment to which the state X is to be propagated in seconds.
rateOfChangeFunction - function that returns a numpy.ndarray of shape (1,3)
with three Cartesian components of the acceleration in m/s2 given in an
inertial reference frame. Its arguments are the state X and epoch t.
Returns
----------
numpy.ndarray of shape (1,6) with three Cartesian positions and three velocities
in an inertial reference frame in metres and metres per second, respectively,
propagated to time t+dt.
"""
dxdt = rateOfChangeFunction(X, t)
k0 = dt*dxdt
k1 = dt*rateOfChangeFunction(X+k0/2., t+timedelta(seconds=dt/2.))
k2 = dt*rateOfChangeFunction(X+k1/2., t+timedelta(seconds=dt/2.))
k3 = dt*rateOfChangeFunction(X+k2, t+timedelta(seconds=dt))
return X + (k0+2.*k1+2.*k2+k3)/6.
"""
===============================================================================
PROPAGATION FUNCTIONS.
===============================================================================
"""
def calculateGeocentricLatLon(stateVec, epoch):
""" Calculate the geocentric co-latitude (measured from the noth pole not
equator), longitude and radius corresponding to the state vector given in
inertial frame at a certain epoch.
Arguments
----------
stateVec - numpy.ndarray of shape (1,6) with three Cartesian positions and
three velocities in an inertial reference frame in metres and metres
per second, respectively.
epoch - datetime with the UTC epoch corresponding to the stateVect.
Returns
----------
3-tuple of floats with geocentric latitude, longitude and radius in radians
and distance units of stateVec.
References
----------
Conversions taken from:
http://agamenon.tsc.uah.es/Asignaturas/it/rd/apuntes/RxControl_Manual.pdf
"""
# Get the state vector and epoch in astropy's formats.
epochAstro = Time(epoch, scale='utc', format='datetime')
stateVecAstro = coord.CartesianRepresentation(x=stateVec[0], y=stateVec[1],
z=stateVec[2], unit=u.m)
# Convert from the inertial reference frame (assume GCRS, which is practically
# the same as J2000) to Earth-fixed ITRS.
stateVec_GCRS = coord.GCRS(stateVecAstro, obstime=epochAstro)
stateVec_ITRS = stateVec_GCRS.transform_to(coord.ITRS(obstime=epochAstro))
loc = coord.EarthLocation.from_geocentric(*stateVec_ITRS.cartesian.xyz, unit=u.m)
# Compute the gravity acceleration in Earth-fixed frame.
r = numpy.linalg.norm(stateVec[:3])
colat = math.pi/2.0 - loc.lat.to_value(u.rad)
lon = loc.lon.to_value(u.rad)
return colat, lon, r
def calculateDragAcceleration(stateVec, epoch, satMass):
""" Calculate the acceleration due to atmospheric drag acting on the
satellite at a given state (3 positions and 3 velocities) and epoch.
Use NRLMSISE2000 atmospheric model with globally defined solar activity
proxies:
F10_7A - 81-day average F10.7.
F10_7 - daily F10.7 for the previous day.
MagneticIndex - daily magnetic index AP.
NRLMSISEaph - nrlmsise_00_header.ap_array with magnetic values.#
Arguments
----------
numpy.ndarray of shape (1,6) with three Cartesian positions and three
velocities in an inertial reference frame in metres and metres per
second, respectively.
epoch - datetime corresponding to the UTC epoch at which the rate of change
is to be computed.
Returns
----------
numpy.ndarray of shape (1,3) with three Cartesian components of the
acceleration in m/s2 given in an inertial reference frame.
"""
# " Prepare the atmospheric density model inputs. "
# #TODO - calculate the altitude, latitude, longitude in drag calculation
altitude_km = numpy.linalg.norm(stateVec[:3])/1000.0 #TODO this isn't altitude in km, but radius in km. Is this OK?
NRLMSISEinput = nrlmsise_00_header.nrlmsise_input(year=0, doy=0, sec=0.0, #TODO should account for the actual epoch in drag calculation...
alt=altitude_km, g_lat=0.0, g_long=0.0, #TODO should account for the geodetic latitude and longitude in the drag calculation...
lst=0.0, f107A=F10_7A, f107=F10_7, #TODO should account for the local solar time in the drag calculation...
ap=MagneticIndex, ap_a=NRLMSISEaph)
nrlmsise_00_header.lstCalc( NRLMSISEinput ) # Calculate the local solar time.
" Use the calculated atmospheric density to compute the drag force. "
NRLMSISEoutpt = nrlmsise_00_header.nrlmsise_output(); nrlmsise_00.gtd7(NRLMSISEinput, NRLMSISEflags, NRLMSISEoutpt);
atmosphericDensity = NRLMSISEoutpt.d[5]/1000.0 # Change from gm/cm3 to kg/m3
dragForce = -0.5*atmosphericDensity*dragArea*Cd* numpy.power(stateVec[3:],2) # Drag foce in Newtons.
return dragForce/satMass
def calculateGravityAcceleration(stateVec, epoch, useGeoid):
""" Calculate the acceleration due to gravtiy acting on the satellite at
a given state (3 positions and 3 velocities). Ignore satellite's mass,
i.e. use a restricted two-body problem.
Arguments
----------
numpy.ndarray of shape (1,6) with three Cartesian positions and three
velocities in an inertial reference frame in metres and metres per
second, respectively.
epoch - datetime corresponding to the UTC epoch at which the rate of change
is to be computed.
useGeoid - bool, whether to compute the gravity by using EGM geopotential
expansion (True) or a restricted two-body problem (False).
Returns
----------
numpy.ndarray of shape (1,3) with three Cartesian components of the
acceleration in m/s2 given in an inertial reference frame.
"""
if useGeoid:
" Compute geocentric co-latitude, longitude & radius. "
colatitude,longitude,r = calculateGeocentricLatLon(stateVec, epoch)
" Find the gravitational potential at the desired point. "
# See Eq. 1 in Cunningham (1996) for the general form of the geopotential expansion.
gravitationalPotential = 0.0 # Potential of the gravitational field at the stateVec location.
for degree in range(0, MAX_DEGREE+1): # Go through all the desired orders and compute the geoid corrections to the sphere.
temp = 0. # Contribution to the potential from the current degree and all corresponding orders.
legendreCoeffs = scipy.special.legendre(degree) # Legendre polynomial coefficients corresponding to the current degree.
for order in range(degree+1): # Go through all the orders corresponding to the currently evaluated degree.
if (abs(colatitude-math.pi/2. <= 1E-16)) or (abs(colatitude-3*math.pi/2. <= 1E-16)): # We're at the equator, cos(colatitude) will be zero and things will break.
temp += legendreCoeffs[order] * 1.0 * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude ))
else:
temp += legendreCoeffs[order] * math.cos(colatitude) * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude ))
gravitationalPotential += math.pow(EarthRadius/r, degree) * temp # Add the contribution from the current degree.
gravitationalPotential *= GM/r # Final correction (*GM for acceleration, /r to get r^(n+1) in the denominator).
" Compute the acceleration due to the gravity potential at the given point. "
# stateVec is defined w.r.t. Earth's centre of mass, so no need to account
# for the geoid shape here.
gravityAcceleration = gravitationalPotential/r* (-1.*stateVec[:3]/r) # First divide by the radius to get the acceleration value, then get the direction (towards centre of the Earth).
else:
r = numpy.linalg.norm(stateVec[:3]) # Earth-centred radius.
gravityAcceleration = GM/(r*r) * (-1.*stateVec[:3]/r) # First compute the magnitude, then get the direction (towards centre of the Earth).
return gravityAcceleration
def computeRateOfChangeOfState(stateVector, epoch):
""" Compute the rate of change of the state vector.
Arguments
----------
stateVector - numpy.ndarray of shape (1,6) with three Cartesian positions
and three velocities given in an inertial frame of reference.
epoch - detetime corresponding to the UTC epoch at which the rate of change
is to be computed.
Returns
----------
numpy.ndarray of shape (1,6) with the rates of change of position and velocity
in the same inertial frame as the one in which stateVector was given.
"""
gravityAcceleration = calculateGravityAcceleration(stateVector, epoch, USE_GEOID) # A vector of the gravity force from EGM96 model.
if USE_DRAG:
dragAcceleration = calculateDragAcceleration(stateVector, epoch, satelliteMass) # A vector of the drag computed with NRLMSISE-00.
else:
dragAcceleration = [0.,0.,0.]
stateDerivatives = numpy.zeros(6);
stateDerivatives[:3] = stateVector[3:]; # Velocity is the rate of change of position.
stateDerivatives[3:] = dragAcceleration+gravityAcceleration # Compute the acceleration i.e. the rate of change of velocity.
return stateDerivatives
def calculateCircularPeriod(stateVec):
""" Calculate the orbital period of a circular, Keplerian orbit passing through
the state vector (3 positions and velocities).
Arguments
----------
numpy.ndarray of shape (1,3) with three Cartesian positions and velocities,
in mtres and m/s, respectively.
Returns
----------
Orbital period of a circular orbit corresponding to the supplied state vector
in seconds.
"""
return 2*math.pi*numpy.sqrt(math.pow(numpy.linalg.norm(stateVec[:3]),3)/GM)
#%% FORCE MODEL SETTINGS.
" Gravity model settings. "
EarthRadius = 6378136.3 # Earth's equatorial radius from EGM96, m.
MAX_DEGREE = 2 # Maximum degree of the geopotential harmocic expansion to use. 0 equates to two-body problem.
USE_GEOID = True # Whether to account for Earth's geoid (True) or assume two-body problem (False).
USE_DRAG = False # Whether to account for drag acceleration (True), or ignore it (False).
Ccoeffs, Scoeffs = readEGM96Coefficients() # Get the gravitational potential exampnsion coefficients.
" Atmospheric density model settings. "
NRLMSISEflags = nrlmsise_00_header.nrlmsise_flags()
NRLMSISEaph = nrlmsise_00_header.ap_array() #TODO NRLMSISE header should contain the following:
# * Array containing the following magnetic values:
# * 0 : daily AP
# * 1 : 3 hr AP index for current time
# * 2 : 3 hr AP index for 3 hrs before current time
# * 3 : 3 hr AP index for 6 hrs before current time
# * 4 : 3 hr AP index for 9 hrs before current time
# * 5 : Average of eight 3 hr AP indicies from 12 to 33 hrs
# * prior to current time
# * 6 : Average of eight 3 hr AP indicies from 36 to 57 hrs
# * prior to current time
F10_7A = 70 # 81-day average F10.7.
F10_7 = 180 # Daily F10.7 for the previous day.
MagneticIndex = 40 # Daily magnetic index.
#%% PROPAGATE THE ORBIT NUMERICALLY.
" Initial properties of the satellite. "
satelliteMass = 1000. # kg
Cd = 2.2 # Drag coefficient, dimensionless.
dragArea = 5.0 # Area exposed to atmospheric drag, m2.
" Initial state of the satellite. "
state_0 = numpy.array([EarthRadius+500.0e3,0.,0.,0.,0.,0.]) # Initial state vector with Cartesian positions and velocities in m and m/s.
state_0[5] = numpy.sqrt( GM/numpy.linalg.norm(state_0[:3]) ) # Simple initial condition for test purposes: a circular orbit with velocity pointing along the +Z direction.
epoch_0 = datetime(2017, 9, 27, 12, 22, 0, 200, tzinfo=pytz.UTC)
initialOrbitalPeriod = calculateCircularPeriod(state_0) # Orbital period of the initial circular orbit.
######## Initial gravity acceleration - redundant because will be re-computed in computeRateOfChangeOfState?
# Ccoeffs = {0:[1],1:[0,0],2:[-0.484165371736E-03,0,0],3:[0,0,0,0]};
# Scoeffs = {0:[0],1:[0,0],2:[0,0,0],3:[0,0,0,0]}
# colatitude,longitude,geocentricRadius = calculateGeocentricLatLon(state_0, 0)
#
# gravitationalPotential = 0.0 # Potential of the gravitational field at the stateVec location.
# for degree in range(0, MAX_DEGREE+1): # Go through all the desired orders and compute the geoid corrections to the sphere.
# temp = 0. # Contribution to the potential from the current degree and all corresponding orders.
# legendreCoeffs = scipy.special.legendre(degree) # Legendre polynomial coefficients corresponding to the current degree.
# for order in range(degree+1): # Go through all the orders corresponding to the currently evaluated degree.
# if colatitude-math.pi/2. <= 1E-16: # We're at the equator, cos(colatitude) will be zero and things will break.
# temp += legendreCoeffs[order] * 1.0 * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude ))
# else:
# temp += legendreCoeffs[order] * math.cos(colatitude) * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude ))
#
# gravitationalPotential += math.pow(EarthRadius/geocentricRadius, degree) * temp # Add the contribution from the current degree.
#
# gravitationalPotential *= GM/geocentricRadius # Final correction.
# gravityAcceleration = gravitationalPotential/geocentricRadius * (-1.*state_0[:3]/numpy.linalg.norm(state_0[:3])) # First divide by the radius to get the acceleration value, then get the direction (towards centre of the Earth).
" Propagation time settings. "
INTEGRATION_TIME_STEP_S = 10.0 # Time step at which the trajectory will be propagated.
NO_ORBITS = 20 # For how manyu orbits to propagate.
epochsOfInterest = pd.date_range(start=epoch_0,
end=epoch_0+timedelta(seconds=NO_ORBITS*initialOrbitalPeriod),
freq=pd.DateOffset(seconds=INTEGRATION_TIME_STEP_S)
).to_pydatetime().tolist()
propagatedStates = numpy.zeros( (len(epochsOfInterest),6) ) # State vectors at the epochs of interest.
propagatedStates2Body = numpy.zeros( (len(epochsOfInterest),6) ) # W/o geoid, i.e. two-body acceleration for comparison.
" Actual numerical propagation main loop. "
propagatedStates[0,:] = state_0 # Apply the initial condition.
for i in range(1, len(epochsOfInterest)): # Propagate the state to all the desired epochs statring from state_0.
propagatedStates[i,:] = RungeKutta4(propagatedStates[i-1], epochsOfInterest[i-1],
INTEGRATION_TIME_STEP_S, computeRateOfChangeOfState)
#TODO check if altitude isn't too low during propagation.
# Propagate with two-body for comparison.
USE_GEOID = False
propagatedStates2Body[0,:] = state_0 # Apply the initial condition.
for i in range(1, len(epochsOfInterest)): # Propagate the state to all the desired epochs statring from state_0.
propagatedStates2Body[i,:] = RungeKutta4(propagatedStates2Body[i-1], epochsOfInterest[i-1],
INTEGRATION_TIME_STEP_S, computeRateOfChangeOfState)
" Compute quantities derived from the propagated state vectors. "
altitudes = numpy.linalg.norm(propagatedStates[:,:3], axis=1) - EarthRadius # Altitudes above spherical Earth...
specificEnergies = [ numpy.linalg.norm(x[3:])*numpy.linalg.norm(x[3:]) -
GM*satelliteMass/numpy.linalg.norm(x[:3]) for x in propagatedStates] # ...and corresponding specific orbital energies.
altitudes2Body = numpy.linalg.norm(propagatedStates2Body[:,:3], axis=1) - EarthRadius
#%% PLOT FORMATTING.
ticksFontSize = 15
labelsFontSize = 30
titleFontSize = 34
matplotlib.rc('xtick', labelsize=ticksFontSize)
matplotlib.rc('ytick', labelsize=ticksFontSize)
#%% FIGURE THAT SHOWS THE EARTH AND SATELLITE TRAJECTORY.
fig = matplotlib.pyplot.figure(figsize=(12,8))
ax = Axes3D(fig)
ax.set_aspect('auto') #TODO change 3D axes aspect ratio to equal, which isn't supported now. Current workaround is set scale_xyz below.
ax.view_init(elev=45., azim=45.)
figRange = 1.5*EarthRadius
ax.set_xlim([-figRange, figRange])
ax.set_ylim([-figRange, figRange])
ax.set_zlim([-figRange, figRange])
ax.auto_scale_xyz([-figRange, figRange], [-figRange, figRange], [-figRange, figRange])
" Plot a sphere that represents the Earth and the coordinate frame. "
N_POINTS = 20 # Number of lattitudes and longitudes used to plot the geoid.
latitudes = numpy.linspace(0, math.pi, N_POINTS) # Geocentric latitudes and longitudes where the geoid will be visualised.
longitudes = numpy.linspace(0, 2*math.pi, N_POINTS)
Xs = EarthRadius * numpy.outer(numpy.cos(latitudes), numpy.sin(longitudes))
Ys = EarthRadius * numpy.outer(numpy.sin(latitudes), numpy.sin(longitudes))
Zs = EarthRadius * numpy.outer(numpy.ones(latitudes.size), numpy.cos(longitudes))
earthSurface = ax.plot_surface(Xs, Ys, Zs, rstride=1, cstride=1, linewidth=0,
antialiased=False, shade=False, alpha=0.5)
xArrow = Arrow3D([0, 1.5*EarthRadius],[0, 0],[0, 0], mutation_scale=20, lw=1, arrowstyle='-|>', color='r')
yArrow = Arrow3D([0, 0],[0, 1.5*EarthRadius],[0, 0], mutation_scale=20, lw=1, arrowstyle='-|>', color='g')
zArrow = Arrow3D([0, 0],[0, 0],[0, 1.5*EarthRadius], mutation_scale=20, lw=1, arrowstyle='-|>', color='b')
ax.add_artist(xArrow)
ax.add_artist(yArrow)
ax.add_artist(zArrow)
" Plot the trajectory. "
ax.plot(propagatedStates[:,0],propagatedStates[:,1],propagatedStates[:,2], c='r', lw=2, ls='-')
ax.plot(propagatedStates2Body[:,0],propagatedStates2Body[:,1],propagatedStates2Body[:,2], c='b', lw=2, ls='--')
fig.show()
#%% FIGURE THAT SHOWS THE ALTITUDE AND ORBITAL ENERGY EVOLUTION.
fig2 = matplotlib.pyplot.figure(figsize=(12,8))
ax2=fig2.gca()
ax2_2 = ax2.twinx();
ax2.set_xlabel(r"$Time\ elapsed\ (s)$", fontsize=labelsFontSize)
ax2.set_ylabel(r"$Altitude\ above\ spherical\ Earth\ (m)$", fontsize=labelsFontSize)
ax2_2.set_ylabel(r"$Specific\ orbital\ energy\ (m^2 s^{-2})$", fontsize=labelsFontSize)
ax2.grid(True, which='both')
ax2.plot(epochsOfInterest, altitudes, c='r', lw=2, ls='-')
ax2.plot(epochsOfInterest, altitudes2Body, c='b', lw=2, ls='--')
ax2_2.plot(epochsOfInterest, specificEnergies, c='m', lw=2, ls='-')
fig2.show()
#%% FIGURE SHOWING EVOLUTION OF THE POSITION COMPONENTS OVER TIME.
fig3, axarr = matplotlib.pyplot.subplots(3, sharex=True, figsize=(12,8))
axarr[0].grid(linewidth=2); axarr[1].grid(linewidth=2); axarr[2].grid(linewidth=2);
axarr[0].tick_params(axis='both',reset=False,which='both',length=5,width=1.5)
axarr[1].tick_params(axis='both',reset=False,which='both',length=5,width=1.5)
axarr[2].tick_params(axis='both',reset=False,which='both',length=5,width=1.5)
axarr[2].set_xlabel(r'$Time\ elapsed\ (s)$',fontsize=labelsFontSize)
axarr[0].set_ylabel(r'$X\ (m)$',fontsize=labelsFontSize)
axarr[1].set_ylabel(r'$Y\ (m)$',fontsize=labelsFontSize)
axarr[2].set_ylabel(r'$Z\ (m)$',fontsize=labelsFontSize)
axarr[0].plot(epochsOfInterest, propagatedStates[:,0], c='r', lw=2, ls='-')
axarr[1].plot(epochsOfInterest, propagatedStates[:,1], c='r', lw=2, ls='-')
axarr[2].plot(epochsOfInterest, propagatedStates[:,2], c='r', lw=2, ls='-')
axarr[0].plot(epochsOfInterest, propagatedStates2Body[:,0], c='b', lw=2, ls='--')
axarr[1].plot(epochsOfInterest, propagatedStates2Body[:,1], c='b', lw=2, ls='--')
axarr[2].plot(epochsOfInterest, propagatedStates2Body[:,2], c='b', lw=2, ls='--')
fig3.show()
| |
# Copyright (c) 2016 Alessandro Pietro Bardelli
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module containing OrthoProj, a class which can create an Orthogonal Projection
of 3D data with full axes synchronisation.
"""
# pylint: disable=undefined-variable, invalid-name, eval-used
import types
import itertools
from textwrap import dedent
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from matplotlib.collections import PolyCollection
from six.moves import input
# This generates all sync_A_with_B(self, axis) functions that are used to
# synchronise the axis of two plots
for src, trg in itertools.product(['x', 'y', 'z'], repeat=2):
eval(compile(dedent("""
def sync_{0}_with_{1}(self, axis):
if((axis.get_{1}lim()[0] > axis.get_{1}lim()[1]) !=
(self.get_{0}lim()[0] > self.get_{0}lim()[1])):
self.set_{0}lim(axis.get_{1}lim()[::-1], emit=False)
else:
self.set_{0}lim(axis.get_{1}lim(), emit=False)
""").format(src, trg), '<string>', 'exec'))
def _merge_dicts(*dict_args):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
'''
result = {}
for dictionary in dict_args:
if dictionary is not None:
result.update(dictionary)
return result
class OrthoProj():
"""
Orthogonal Projection object.
"""
_fig = None
_locked = None
_axisXZ = None
_axisYZ = None
_axisXY = None
_axis3D = None
def __init__(self, title=None):
"""
Build an :class:`OrthoProj` object
Args:
title (string). The title string for the orthogonal projection figure.
Default: None, i.e., default naming
"""
fig = plt.figure(title)
axisXZ = fig.add_subplot(221, title="Vertical Plane - XZ")
axisYZ = fig.add_subplot(222, title="Lateral Plane - YZ")
axisXY = fig.add_subplot(223, title="Horizontal Plane - XY")
axis3D = fig.add_subplot(224, title="3D view - XYZ", projection="3d")
for ax in [axisXZ, axisYZ, axisXY, axis3D]:
ax.sync_x_with_x = types.MethodType(sync_x_with_x, ax)
ax.sync_x_with_y = types.MethodType(sync_x_with_y, ax)
ax.sync_x_with_z = types.MethodType(sync_x_with_z, ax)
ax.sync_y_with_x = types.MethodType(sync_y_with_x, ax)
ax.sync_y_with_y = types.MethodType(sync_y_with_y, ax)
ax.sync_y_with_z = types.MethodType(sync_y_with_z, ax)
axis3D.sync_z_with_x = types.MethodType(sync_z_with_x, axis3D)
axis3D.sync_z_with_y = types.MethodType(sync_z_with_y, axis3D)
axis3D.sync_z_with_z = types.MethodType(sync_z_with_z, axis3D)
# Connect XY subplot
axisXY.callbacks.connect('xlim_changed', axisXZ.sync_x_with_x)
axisXY.callbacks.connect('xlim_changed', axis3D.sync_x_with_x)
axisXY.callbacks.connect('ylim_changed', axisYZ.sync_x_with_y)
axisXY.callbacks.connect('ylim_changed', axis3D.sync_y_with_y)
# Connect XZ subplot
axisXZ.callbacks.connect('xlim_changed', axisXY.sync_x_with_x)
axisXZ.callbacks.connect('xlim_changed', axis3D.sync_x_with_x)
axisXZ.callbacks.connect('ylim_changed', axisYZ.sync_y_with_y)
axisXZ.callbacks.connect('ylim_changed', axis3D.sync_z_with_y)
# Connect YZ subplot
axisYZ.callbacks.connect('xlim_changed', axisXY.sync_y_with_x)
axisYZ.callbacks.connect('xlim_changed', axis3D.sync_y_with_x)
axisYZ.callbacks.connect('ylim_changed', axisXZ.sync_y_with_y)
axisYZ.callbacks.connect('ylim_changed', axis3D.sync_z_with_y)
# Connect 3D subplot
axis3D.callbacks.connect('xlim_changed', axisXY.sync_x_with_x)
axis3D.callbacks.connect('xlim_changed', axisXZ.sync_x_with_x)
axis3D.callbacks.connect('ylim_changed', axisXY.sync_y_with_y)
axis3D.callbacks.connect('ylim_changed', axisYZ.sync_x_with_y)
axis3D.callbacks.connect('zlim_changed', axisXZ.sync_y_with_z)
axis3D.callbacks.connect('zlim_changed', axisYZ.sync_y_with_z)
# invert the x axis in the YX subplot
axisYZ.invert_xaxis()
# set labels for 3D plot
axis3D.set_xlabel('X axis')
axis3D.set_ylabel('Y axis')
axis3D.set_zlabel('Z axis')
self._fig = fig
self._axisXZ = axisXZ
self._axisYZ = axisYZ
self._axisXY = axisXY
self._axis3D = axis3D
def plot(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a scatter.
Args:
x, y, z (1D array). Positions of data points.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.scatter`
is used for the 3D plot and standard
:func:`~matplotlib.axes.Axes.plot` for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._plot2DGraphs(x, y, z, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.plot(x, y, z)
else:
self._axis3D.plot(x, y, z, **kwargs3D)
def scatter(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a scatter.
Args:
x, y, z (1D array). Positions of data points.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.scatter`
is used for the 3D plot and standard
:func:`~matplotlib.axes.Axes.scatter` for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._scatter2DGraphs(x, y, z, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.scatter(x, y, z)
else:
self._axis3D.scatter(x, y, z, **kwargs3D)
def plot_trisurf(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a trisurf
'''
raise NotImplementedError("plot_trisurf: Not Implemented Yet")
def plot_surface(self, X, Y, Z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a surface.
Args:
X, Y, Z (2D array). Data values as 2D arrays.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.plot_surface`
is used for the 3D plot and standard :func:`~matplotlib.axes.Axes.plot`
for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._plot2DGraphs(X, Y, Z, kwargsXZ, kwargsYZ, kwargsXY)
self._plot2DGraphs(X.T, Y.T, Z.T, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.plot_surface(X, Y, Z)
else:
self._axis3D.plot_surface(X, Y, Z, **kwargs3D)
def plot_wireframe(self, X, Y, Z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a wireframe.
Args:
X, Y, Z (2D array). Data values as 2D arrays.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.plot_wireframe`
is used for the 3D plot and standard :func:`~matplotlib.axes.Axes.plot`
for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._plot2DGraphs(X, Y, Z, kwargsXZ, kwargsYZ, kwargsXY)
self._plot2DGraphs(X.T, Y.T, Z.T, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.plot_wireframe(X, Y, Z)
else:
self._axis3D.plot_wireframe(X, Y, Z, **kwargs3D)
def plot_collection(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a collection.
Args:
x, y, z (1D array). Arrays containing the vertices of the collection object.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
and :func:`~matplotlib.collections.PolyCollection` are used
to create the collections.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary).
Extra keyword arguments to be passed to the single plotting
functions. Internally :func:`add_collection3d` and
:func:`add_collection` are called.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._collection2DGraphs(x, y, z, kwargsXZ, kwargsYZ, kwargsXY)
verts = [list(zip(x, y, z))]
if kwargs3D is None:
self._axis3D.add_collection3d(Poly3DCollection(verts))
else:
self._axis3D.add_collection3d(Poly3DCollection(verts, **kwargs3D))
def show(self, block=False):
"""
Display the figure.
Args:
block (bool). If True the computation is blocked waiting for
user's input. Default: False
"""
self._fig.show()
if block:
input("Press any key to continue")
# ###############
# Private Methods
# ###############
def _plot2DGraphs(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None):
"""
Function that plot data on the 2D axis as a simple plot
"""
if kwargsXZ is None:
self._axisXZ.plot(x, z)
else:
self._axisXZ.plot(x, z, **kwargsXZ)
if kwargsYZ is None:
self._axisYZ.plot(y, z)
else:
self._axisYZ.plot(y, z, **kwargsYZ)
if kwargsXY is None:
self._axisXY.plot(x, y)
else:
self._axisXY.plot(x, y, **kwargsXY)
def _scatter2DGraphs(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None):
"""
Function that plot data on the 2D axis as a scatter plot
"""
if kwargsXZ is None:
self._axisXZ.scatter(x, z)
else:
self._axisXZ.scatter(x, z, **kwargsXZ)
if kwargsYZ is None:
self._axisYZ.scatter(y, z)
else:
self._axisYZ.scatter(y, z, **kwargsYZ)
if kwargsXY is None:
self._axisXY.scatter(x, y)
else:
self._axisXY.scatter(x, y, **kwargsXY)
def _collection2DGraphs(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None):
"""
Function that plot data on the 2D axis as collections
"""
vertxy = [list(zip(x, y))]
vertxz = [list(zip(x, z))]
vertyz = [list(zip(y, z))]
if kwargsXY is None:
self._axisXY.add_collection(PolyCollection(vertxy))
else:
self._axisXY.add_collection(PolyCollection(vertxy, **kwargsXY))
if kwargsXZ is None:
self._axisXZ.add_collection(PolyCollection(vertxz))
else:
self._axisXZ.add_collection(PolyCollection(vertxz, **kwargsXZ))
if kwargsYZ is None:
self._axisYZ.add_collection(PolyCollection(vertyz))
else:
self._axisYZ.add_collection(PolyCollection(vertyz, **kwargsYZ))
| |
import os
import smtplib
from dockerfile_parse import DockerfileParser
from flexmock import flexmock
import pytest
import requests
import six
from atomic_reactor.plugin import PluginFailedException
from atomic_reactor.plugins.pre_check_and_set_rebuild import CheckAndSetRebuildPlugin
from atomic_reactor.plugins.exit_sendmail import SendMailPlugin
from atomic_reactor.source import GitSource
from atomic_reactor.util import ImageName
MS, MF = SendMailPlugin.MANUAL_SUCCESS, SendMailPlugin.MANUAL_FAIL
AS, AF = SendMailPlugin.AUTO_SUCCESS, SendMailPlugin.AUTO_FAIL
AC = SendMailPlugin.AUTO_CANCELED
class TestSendMailPlugin(object):
def test_fails_with_unknown_states(self):
p = SendMailPlugin(None, None, send_on=['unknown_state', MS])
with pytest.raises(PluginFailedException) as e:
p.run()
assert str(e.value) == 'Unknown state(s) "unknown_state" for sendmail plugin'
@pytest.mark.parametrize('rebuild, success, canceled, send_on, expected', [
# make sure that right combinations only succeed for the specific state
(False, True, False, [MS], True),
(False, True, False, [MF, AS, AF, AC], False),
(False, False, False, [MF], True),
(False, False, False, [MS, AS, AF, AC], False),
(True, True, False, [AS], True),
(True, True, False, [MS, MF, AF, AC], False),
(True, False, False, [AF], True),
(True, False, False, [MS, MF, AS, AC], False),
(True, False, True, [AC], True),
# auto_fail would also give us True in this case
(True, False, True, [MS, MF, AS], False),
# also make sure that a random combination of more plugins works ok
(True, False, False, [AF, MS], True)
])
def test_should_send(self, rebuild, success, canceled, send_on, expected):
p = SendMailPlugin(None, None, send_on=send_on)
assert p._should_send(rebuild, success, canceled) == expected
@pytest.mark.parametrize('autorebuild, submitter', [
(True, 'John Smith <jsmith@foobar.com>'),
(False, 'John Smith <jsmith@foobar.com>'),
(True, None),
(False, None),
])
def test_render_mail(self, autorebuild, submitter):
# just test a random combination of the method inputs and hope it's ok for other
# combinations
class WF(object):
image = ImageName.parse('foo/bar:baz')
openshift_build_selflink = '/builds/blablabla'
kwargs = {'url': 'https://something.com'}
if submitter:
kwargs['submitter'] = submitter
p = SendMailPlugin(None, WF(), **kwargs)
subject, body = p._render_mail(autorebuild, False, False)
exp_subject = 'Image foo/bar:baz; Status failed; Submitted by '
exp_body = [
'Image: foo/bar:baz',
'Status: failed',
'Submitted by: ',
'Logs: https://something.com/builds/blablabla/log'
]
if autorebuild:
exp_subject += '<autorebuild>'
exp_body[2] += '<autorebuild>'
elif submitter:
exp_subject += submitter
exp_body[2] += submitter
else:
exp_subject += 'unknown'
exp_body[2] += 'unknown'
assert subject == exp_subject
assert body == '\n'.join(exp_body)
def test_get_pdc_token(self, tmpdir):
tokenfile = os.path.join(str(tmpdir), SendMailPlugin.PDC_TOKEN_FILE)
p = SendMailPlugin(None, None, pdc_secret_path=str(tmpdir))
with open(tokenfile, 'w') as f:
f.write('thisistoken')
assert p._get_pdc_token() == 'thisistoken'
@pytest.mark.parametrize('df_labels, pdc_component_df_label, expected', [
({}, 'Foo', None),
({'Foo': 'Bar'}, 'Foo', 'Bar'),
])
def test_get_component_label(self, df_labels, pdc_component_df_label, expected):
class WF(object):
class builder(object):
df_path = '/foo/bar'
p = SendMailPlugin(None, WF(), pdc_component_df_label=pdc_component_df_label)
flexmock(DockerfileParser, labels=df_labels)
if expected is None:
with pytest.raises(PluginFailedException):
p._get_component_label()
else:
assert p._get_component_label() == expected
def test_get_receivers_list_raises_unless_GitSource(self):
class WF(object):
source = None
p = SendMailPlugin(None, WF())
flexmock(p).should_receive('_get_component_label').and_return('foo')
with pytest.raises(PluginFailedException) as e:
p._get_receivers_list()
assert str(e.value) == 'Source is not of type "GitSource", panic!'
@pytest.mark.parametrize('value', [
True,
False
])
def test_get_receivers_list_passes_verify_cert(self, value):
class WF(object):
source = GitSource('git', 'foo', provider_params={'git_commit': 'foo'})
p = SendMailPlugin(None, WF(), pdc_verify_cert=value)
flexmock(p).should_receive('_get_component_label').and_return('foo')
flexmock(p).should_receive('_get_pdc_token').and_return('foo')
flexmock(requests).should_receive('get').with_args(object, headers=object, params=object,
verify=value).and_raise(RuntimeError)
with pytest.raises(RuntimeError):
p._get_receivers_list()
def test_get_receivers_list_passes_pdc_token(self):
class WF(object):
source = GitSource('git', 'foo', provider_params={'git_commit': 'foo'})
p = SendMailPlugin(None, WF())
flexmock(p).should_receive('_get_component_label').and_return('foo')
flexmock(p).should_receive('_get_pdc_token').and_return('thisistoken')
headers = {'Authorization': 'Token thisistoken'}
flexmock(requests).should_receive('get').with_args(object, headers=headers, params=object,
verify=True).and_raise(RuntimeError)
with pytest.raises(RuntimeError):
p._get_receivers_list()
def test_get_receivers_list_request_exception(self):
class WF(object):
source = GitSource('git', 'foo', provider_params={'git_commit': 'foo'})
p = SendMailPlugin(None, WF())
flexmock(p).should_receive('_get_component_label').and_return('foo')
flexmock(p).should_receive('_get_pdc_token').and_return('foo')
flexmock(requests).should_receive('get').and_raise(requests.RequestException('foo'))
with pytest.raises(RuntimeError) as e:
p._get_receivers_list()
assert str(e.value) == 'foo'
def test_get_receivers_list_wrong_status_code(self):
class WF(object):
source = GitSource('git', 'foo', provider_params={'git_commit': 'foo'})
p = SendMailPlugin(None, WF())
flexmock(p).should_receive('_get_component_label').and_return('foo')
flexmock(p).should_receive('_get_pdc_token').and_return('foo')
class R(object):
status_code = 404
text = 'bazinga!'
flexmock(requests).should_receive('get').and_return(R())
with pytest.raises(RuntimeError) as e:
p._get_receivers_list()
assert str(e.value) == 'PDC returned non-200 status code (404), see referenced build log'
def test_get_receivers_passes_proper_params(self):
class WF(object):
source = GitSource('git', 'foo', provider_params={'git_commit': 'branch'})
p = SendMailPlugin(None, WF(), pdc_contact_role='role')
flexmock(p).should_receive('_get_component_label').and_return('component')
flexmock(p).should_receive('_get_pdc_token').and_return('foo')
params = {'global_component': 'component', 'dist_git_branch': 'branch', 'role': 'role'}
flexmock(requests).should_receive('get').with_args(object, headers=object, params=params,
verify=object).\
and_raise(requests.RequestException())
with pytest.raises(RuntimeError):
p._get_receivers_list()
@pytest.mark.parametrize('pdc_response, pdc_contact_role, expected', [
({'count': 0, 'results': []},
SendMailPlugin.PDC_CONTACT_ROLE,
'no {0} role for the component'.format(SendMailPlugin.PDC_CONTACT_ROLE)),
({'count': 1, 'results': [{'contact': {'email': 'foo@bar.com'}}]},
SendMailPlugin.PDC_CONTACT_ROLE,
['foo@bar.com']),
({'count': 2,
'results':
[{'contact': {'email': 'foo@bar.com'}}, {'contact': {'email': 'spam@spam.com'}}]},
SendMailPlugin.PDC_CONTACT_ROLE,
['foo@bar.com', 'spam@spam.com']),
])
def test_get_receivers_pdc_actually_responds(self, pdc_response, pdc_contact_role, expected):
class WF(object):
source = GitSource('git', 'foo', provider_params={'git_commit': 'foo'})
p = SendMailPlugin(None, WF(), pdc_contact_role=pdc_contact_role)
flexmock(p).should_receive('_get_component_label').and_return('foo')
flexmock(p).should_receive('_get_pdc_token').and_return('foo')
class R(object):
status_code = 200
def json(self):
return pdc_response
flexmock(requests).should_receive('get').and_return(R())
if isinstance(expected, str):
with pytest.raises(RuntimeError) as e:
p._get_receivers_list()
assert str(e.value) == expected
else:
assert p._get_receivers_list() == expected
def test_send_mail(self):
p = SendMailPlugin(None, None, from_address='foo@bar.com', smtp_uri='smtp.spam.com')
class SMTP(object):
def sendmail(self, from_addr, to, msg):
pass
def quit(self):
pass
smtp_inst = SMTP()
flexmock(smtplib).should_receive('SMTP').and_return(smtp_inst)
flexmock(smtp_inst).should_receive('sendmail').\
with_args('foo@bar.com', ['spam@spam.com'], str)
flexmock(smtp_inst).should_receive('quit')
p._send_mail(['spam@spam.com'], 'subject', 'body')
def test_run_ok(self):
class WF(object):
build_failed = True
autorebuild_canceled = False
prebuild_results = {CheckAndSetRebuildPlugin.key: True}
image = ImageName.parse('repo/name')
receivers = ['foo@bar.com', 'x@y.com']
p = SendMailPlugin(None, WF(), send_on=[AF])
flexmock(p).should_receive('_should_send').with_args(True, False, False).and_return(True)
flexmock(p).should_receive('_get_receivers_list').and_return(receivers)
flexmock(p).should_receive('_send_mail').with_args(receivers, six.text_type, six.text_type)
p.run()
def test_run_fails_to_obtain_receivers(self):
class WF(object):
build_failed = True
autorebuild_canceled = False
prebuild_results = {CheckAndSetRebuildPlugin.key: True}
image = ImageName.parse('repo/name')
error_addresses = ['error@address.com']
p = SendMailPlugin(None, WF(), send_on=[AF], error_addresses=error_addresses)
flexmock(p).should_receive('_should_send').with_args(True, False, False).and_return(True)
flexmock(p).should_receive('_get_receivers_list').and_raise(RuntimeError())
flexmock(p).should_receive('_send_mail').with_args(error_addresses, six.text_type,
six.text_type)
p.run()
def test_run_does_nothing_if_conditions_not_met(self):
class WF(object):
build_failed = True
autorebuild_canceled = False
prebuild_results = {CheckAndSetRebuildPlugin.key: True}
image = ImageName.parse('repo/name')
p = SendMailPlugin(None, WF(), send_on=[MS])
flexmock(p).should_receive('_should_send').with_args(True, False, False).and_return(False)
flexmock(p).should_receive('_get_receivers_list').times(0)
flexmock(p).should_receive('_send_mail').times(0)
p.run()
| |
import pystan
import pickle as pickle
from numpy import *
from matplotlib import use
use("PDF")
import matplotlib.pyplot as plt
from astropy.io import fits
import argparse
from scipy.interpolate import interp1d
import time
import astropy.io.ascii as ascii
import subprocess
def radectoxyz(RAdeg, Decdeg):
x = cos(Decdeg/(180./pi))*cos(RAdeg/(180./pi))
y = cos(Decdeg/(180./pi))*sin(RAdeg/(180./pi))
z = sin(Decdeg/(180./pi))
return array([x, y, z], dtype=float64)
def get_dz(RAdeg, Decdeg):
dzCMB = 371.e3/299792458.
CMBcoordsRA = 168.01190437
CMBcoordsDEC = -6.98296811
CMBxyz = radectoxyz(CMBcoordsRA, CMBcoordsDEC)
inputxyz = radectoxyz(RAdeg, Decdeg)
dz = dzCMB*dot(CMBxyz, inputxyz)
dv = dzCMB*dot(CMBxyz, inputxyz)*299792.458
print("Add this to z_helio to lowest order:")
print(dz, dv)
return dz
def get_zCMB(RAdeg, Decdeg, z_helio):
dz = -get_dz(RAdeg, Decdeg)
one_plus_z_pec = sqrt((1. + dz)/(1. - dz))
one_plus_z_CMB = (1 + z_helio)/one_plus_z_pec
return one_plus_z_CMB - 1.
def get_dot_CMB(RAdeg, Decdeg):
CMBcoordsRA = 168.01190437
CMBcoordsDEC = -6.98296811
CMBxyz = radectoxyz(CMBcoordsRA, CMBcoordsDEC)
inputxyz = radectoxyz(RAdeg, Decdeg)
return dot(CMBxyz, inputxyz)
def save_img(dat, imname):
subprocess.getoutput("rm -f " + imname)
fitsobj = fits.HDUList()
hdu = fits.PrimaryHDU()
hdu.data = dat
fitsobj.append(hdu)
fitsobj.writeto(imname)
fitsobj.close()
def get_redshifts(redshifts):
appended_redshifts = arange(0., 2.51, 0.1)
tmp_redshifts = concatenate((redshifts, appended_redshifts))
sort_inds = list(argsort(tmp_redshifts))
unsort_inds = [sort_inds.index(i) for i in range(len(tmp_redshifts))]
tmp_redshifts = sort(tmp_redshifts)
redshifts_sort_fill = sort(concatenate((tmp_redshifts, 0.5*(tmp_redshifts[1:] + tmp_redshifts[:-1]))))
return redshifts, redshifts_sort_fill, unsort_inds, len(appended_redshifts)
def get_redshift_coeffs(zcmb, SNset, popmodel):
if popmodel == 0:
redshift_coeffs = ones([len(zcmb), 3], dtype=float64)
redshift_coeffs[:, 1] = zcmb
redshift_coeffs[:, 2] = zcmb - zcmb**2. # Slightly decorrelate
if popmodel == 1:
return ones([len(zcmb), 1], dtype=float64)
if popmodel == 2:
redshift_coeffs = zeros([len(zcmb), len(unique(SNset))], dtype=float64)
for i, id in enumerate(unique(SNset)):
redshift_coeffs[:,i] = (SNset == id)
if popmodel > 2:
npersample = popmodel - 1
redshift_coeffs = zeros([len(zcmb), len(unique(SNset))*npersample - 1], dtype=float64)
assert sum(SNset == 4) < 30 # Just checking that SNset 4 is HST SNe
the_pos = 0
for id in unique(SNset):
minz = (zcmb[where(SNset == id)]).min()
maxz = (zcmb[where(SNset == id)]).max()
dz = maxz - minz
if id < 4:
for j in range(npersample):
yvals = zeros(npersample, dtype=float64)
yvals[j] = 1.
ifn = interp1d(linspace(minz - 1e-8, maxz + 1e-8, npersample), yvals, kind = 'linear', fill_value = 0, bounds_error = False)
redshift_coeffs[:,the_pos] = (SNset == id)*ifn(zcmb)
the_pos += 1
else:
redshift_coeffs[:,the_pos] = (SNset == id)
the_pos += 1
return redshift_coeffs
def initfn():
if args.cosmomodel == 1:
Ominit = 0.3 + random.random()*0.1
OLinit = 0.7 + random.random()*0.1
q0init = 0.
j0init = 0.
if args.cosmomodel == 2:
Ominit = 0.3 + random.random()*0.1
OLinit = 0.01
q0init = 0.01
j0init = 0.01
if args.cosmomodel == 3 or args.cosmomodel == 5:
Ominit = 0.01
OLinit = 0.01
q0init = -0.5 + random.random()*0.1
j0init = random.normal()*0.1
if args.cosmomodel == 4:
Ominit = 0.3 + random.random()*0.1
OLinit = -1. + random.random()*0.1
q0init = 0.01
j0init = 0.01
return dict(alpha = 0.12 + random.normal()*0.01,
beta = 3. + random.normal()*0.1,
delta = random.normal()*0.01,
Om = Ominit,
OL = OLinit,
q0 = q0init,
j0 = j0init,
q0m = -0.5 + random.random()*0.1,
q0d = random.normal(),
sigma_M0 = random.random()*0.01 + 0.1,
sigma_x10 = random.random()*0.1 + 1.,
sigma_c0 = random.random()*0.01 + 0.06,
M0 = -19.1 + random.random()*0.1,
x10 = random.normal(size = len(redshift_coeffs[0]))*0.1,
c0 = random.normal(size = len(redshift_coeffs[0]))*0.01,
true_x1 = random.normal(size = nsne)*0.1,
true_c = random.normal(size = nsne)*0.01,
calibs = random.normal(size = d_mBx1c_dsys.shape[2])*0.1)
parser = argparse.ArgumentParser()
parser.add_argument("--cosmomodel", type=int, help="1 = Om/OL, 2 = FlatLCDM, 3 = q0/j0, 4 = q0m/q0d/j0")
parser.add_argument("--popmodel", type=int, help="0 = z, z^2, 1 = const, 2 = const by sample, 3 = linear by sample")
parser.add_argument("--hostmass", type=int, help="host mass? 1 = yes, 0 = no")
parser.add_argument("--includepecvelcov", type=int, help="include peculiar velocity covariance matrix? 1 = yes, 0 = no")
parser.add_argument("--ztype", type=str, help="redshift type to use for comoving distance. zcmbpecvel, zcmb, or zhelio")
parser.add_argument("--nMCMCchains", type=int, help="number of chains to run")
parser.add_argument("--nMCMCsamples", type=int, help="number of samples per chain; first half is discarded")
parser.add_argument("--min_Om", type=float, help="minimum Omega_m", default = 0)
parser.add_argument("--saveperSN", type=int, help="Save per-SN parameters in pickle file?", default = 1)
parser.add_argument("--savestan", type=int, help="Save Stan data in pickle", default = 1)
args = parser.parse_args()
print("args ", args)
lcparams = ascii.read("../covmat/jla_lcparams.txt")
sigmamu = ascii.read("../covmat/sigma_mu.txt", names = ["sigma_coh", "sigma_lens", "z"])
assert all(abs(sigmamu["z"] - lcparams["zcmb"]) < 0.02)
dmb = sqrt(lcparams["dmb"]**2. - sigmamu["sigma_coh"]**2.)
plt.plot(lcparams["zcmb"], dmb, '.')
plt.savefig("dmb_vs_z.pdf")
plt.close()
f = fits.open("../covmat/d_mBx1c_dsys_pecvel=%i.fits" % args.includepecvelcov)
d_mBx1c_dsys = f[0].data
f.close()
d_mBx1c_dsys = transpose(d_mBx1c_dsys, axes = [1, 2, 0])
dot_CMB = array([get_dot_CMB(lcparams["ra"][i], lcparams["dec"][i]) for i in range(len(lcparams["ra"]))])
all_z = dict(zcmbpecvel = lcparams["zcmb"],
zcmb = array([get_zCMB(lcparams["ra"][i], lcparams["dec"][i], lcparams["zhel"][i]) for i in range(len(lcparams["ra"]))]),
zhelio = lcparams["zhel"])
assert args.ztype in all_z, "available z keys: " + str(all_z.keys())
print(d_mBx1c_dsys.shape)
nsne = len(lcparams["zcmb"])
obs_mBx1c = zeros([nsne, 3], dtype=float64)
obs_mBx1c_cov = zeros([nsne, 3,3], dtype=float64)
for i in range(nsne):
obs_mBx1c[i] = [lcparams["mb"][i], lcparams["x1"][i], lcparams["color"][i]]
obs_mBx1c_cov[i] = [[dmb[i]**2., lcparams["cov_m_s"][i], lcparams["cov_m_c"][i]],
[lcparams["cov_m_s"][i], lcparams["dx1"][i]**2., lcparams["cov_s_c"][i]],
[lcparams["cov_m_c"][i], lcparams["cov_s_c"][i], lcparams["dcolor"][i]**2.]]
save_img(obs_mBx1c_cov, "obs_mBx1c_cov.fits")
redshifts, redshifts_sort_fill, unsort_inds, nzadd = get_redshifts(all_z[args.ztype]) # CMB for this one, helio for the other one!
redshift_coeffs = get_redshift_coeffs(all_z[args.ztype], lcparams["set"], args.popmodel)
for i in range(len(redshift_coeffs[0])):
plt.plot(lcparams["zcmb"], redshift_coeffs[:,i] + random.normal(size = nsne)*0.01, '.', label = str(i))
plt.ylim(-0.2, 1.2)
plt.legend(loc = 'best')
plt.xscale('log')
plt.savefig("redshift_coeffs_%i.pdf" % args.popmodel)
plt.close()
stan_data = dict(n_sne = nsne, n_calib = d_mBx1c_dsys.shape[2], nzadd = nzadd, n_x1c_star = len(redshift_coeffs[0]),
zhelio = lcparams["zhel"], zcmb = all_z[args.ztype], dot_CMB = dot_CMB, redshifts_sort_fill = redshifts_sort_fill, unsort_inds = unsort_inds,
redshift_coeffs = redshift_coeffs,
obs_mBx1c = obs_mBx1c, obs_mBx1c_cov = obs_mBx1c_cov,
d_mBx1c_d_calib = d_mBx1c_dsys,
obs_mass = lcparams["3rdvar"], obs_dmass = lcparams["d3rdvar"],
cosmomodel = args.cosmomodel, min_Om = args.min_Om, host_mass_relation = args.hostmass)
plt.subplot(2,1,1)
plt.hist(lcparams["3rdvar"])
plt.subplot(2,1,2)
plt.hist(lcparams["d3rdvar"], bins = 20)
plt.savefig("mass.pdf")
plt.close()
print("Ready to sample", time.asctime())
fit = pystan.stan(file = "../stan_code.txt", data=stan_data,
iter=args.nMCMCsamples, chains=args.nMCMCchains, n_jobs = args.nMCMCchains, refresh = int(min(100, args.nMCMCsamples/20)), init = initfn)
print("Done with sampling", time.asctime())
print(fit)
print("Done with printing", time.asctime())
fit_params = fit.extract(permuted = True)
print("Done with extracting", time.asctime())
if args.saveperSN:
pass
else:
for key in fit_params:
if fit_params[key].size > 100 * args.nMCMCsamples * args.nMCMCchains:
print("Deleting ", key)
fit_params[key] = array([], dtype=float64)
if args.savestan:
pass
else:
stan_data = {}
pickle.dump((stan_data, fit_params), open("results.pickle", 'wb'))
print("Done!", time.asctime())
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import math_ops
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Arguments:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
return cls(**config)
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.zeros(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.ones(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list of values, or a N-dimensional numpy array. All
elements of the initialized variable will be set to the corresponding
value in the `value` argument.
dtype: The data type.
verify_shape: Boolean that enables verification of the shape of `value`. If
`True`, the initializer will throw an error if the shape of `value` is not
compatible with the shape of the initialized tensor.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
>>> print('shape verification:')
>>> init_verify = tf.constant_initializer(value, verify_shape=True)
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init_verify)
TypeError: Expected Tensor's shape: (3, 4), got (8,).
```
"""
def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):
self.value = value
self.dtype = dtypes.as_dtype(dtype)
self._verify_shape = verify_shape
def __call__(self, shape,
dtype=None,
partition_info=None,
verify_shape=None):
if dtype is None:
dtype = self.dtype
if verify_shape is None:
verify_shape = self._verify_shape
return constant_op.constant(self.value, dtype=dtype, shape=shape,
verify_shape=verify_shape)
def get_config(self):
# We don't include `verify_shape` for compatibility with Keras.
# `verify_shape` should be passed as an argument to `__call__` rather
# than as a constructor argument: conceptually it isn't a property
# of the initializer.
return {"value": self.value,
"dtype": self.dtype.name}
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_uniform(shape, self.minval, self.maxval,
dtype, seed=self.seed)
def get_config(self):
return {"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed,
"dtype": self.dtype.name}
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.truncated_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name}
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(shape, -max_val, max_val,
dtype, seed=self.seed)
def get_config(self):
return {"factor": self.factor,
"seed": self.seed,
"dtype": self.dtype.name}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Arguments:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal":
stddev = math.sqrt(scale)
return random_ops.truncated_normal(shape, 0.0, stddev,
dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=self.seed)
def get_config(self):
return {"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed,
"dtype": self.dtype.name}
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, i is initialized
with an orthogonal matrix obtained from the singular value decomposition of a
matrix of uniform random numbers.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
dtype: The type of the output.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
square_len = math_ops.minimum(num_rows, num_cols)
d = array_ops.diag_part(r[:square_len, :square_len])
ph = d / math_ops.abs(d)
q *= ph
# Pad zeros to Q (if rows smaller than cols)
if num_rows < num_cols:
padding = array_ops.zeros([num_rows, num_cols - num_rows], dtype=dtype)
q = array_ops.concat([q, padding], 1)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain,
"seed": self.seed,
"dtype": self.dtype.name}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
uniform_unit_scaling_initializer = UniformUnitScaling
variance_scaling_initializer = VarianceScaling
orthogonal_initializer = Orthogonal
# pylint: enable=invalid-name
def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed,
dtype=dtype)
def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="normal",
seed=seed,
dtype=dtype)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Arguments:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
| |
from jinja2 import nodes
from jinja2.ext import Extension
from django.conf import settings
class StaticURLExtension(Extension):
tags = set(['static_url'])
def parse(self, parser):
token = parser.stream.next()
bits = []
while not parser.stream.current.type == 'block_end':
bits.append(parser.stream.next())
asset = nodes.Const("".join([b.value for b in bits]))
return nodes.Output([self.call_method('_static_url', args=[asset])]).set_lineno(token.lineno)
def _static_url(self, asset):
return ''.join([settings.STATIC_URL, asset])
class URLExtension(Extension):
"""Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url path.to.some_view arg1,arg2,name1=value1 %}
Known differences to Django's url-Tag:
- In Django, the view name may contain any non-space character.
Since Jinja's lexer does not identify whitespace to us, only
characters that make up valid identifers, plus dots and hyphens
are allowed. Note that identifers in Jinja 2 may not contain
non-ascii characters.
As an alternative, you may specifify the view as a string,
which bypasses all these restrictions. It further allows you
to apply filters:
{% url "model.some-view"|afilter %}
"""
tags = set(['url'])
def parse(self, parser):
stream = parser.stream
tag = stream.next()
# get view name
if stream.current.test('string'):
viewname = parser.parse_primary()
else:
# parse valid tokens and manually build a string from them
bits = []
name_allowed = True
while True:
if stream.current.test_any('dot', 'sub'):
bits.append(stream.next())
name_allowed = True
elif stream.current.test('name') and name_allowed:
bits.append(stream.next())
name_allowed = False
else:
break
viewname = nodes.Const("".join([b.value for b in bits]))
if not bits:
raise TemplateSyntaxError("'%s' requires path to view" %
tag.value, tag.lineno)
# get arguments
args = []
kwargs = []
while not stream.current.test_any('block_end', 'name:as'):
if args or kwargs:
stream.expect('comma')
if stream.current.test('name') and stream.look().test('assign'):
key = nodes.Const(stream.next().value)
stream.skip()
value = parser.parse_expression()
kwargs.append(nodes.Pair(key, value, lineno=key.lineno))
else:
args.append(parser.parse_expression())
make_call_node = lambda *kw: \
self.call_method('_reverse',
args=[viewname, nodes.List(args), nodes.Dict(kwargs)],
kwargs=kw)
# if an as-clause is specified, write the result to context...
if stream.next_if('name:as'):
var = nodes.Name(stream.expect('name').value, 'store')
call_node = make_call_node(nodes.Keyword('fail', nodes.Const(False)))
return nodes.Assign(var, call_node)
# ...otherwise print it out.
else:
return nodes.Output([make_call_node()]).set_lineno(tag.lineno)
@classmethod
def _reverse(self, viewname, args, kwargs, fail=True):
from django.core.urlresolvers import reverse, NoReverseMatch
# Try to look up the URL twice: once given the view name,
# and again relative to what we guess is the "main" app.
url = ''
try:
url = reverse(viewname, args=args, kwargs=kwargs)
except NoReverseMatch:
projectname = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(projectname + '.' + viewname,
args=args, kwargs=kwargs)
except NoReverseMatch:
if fail:
raise
else:
return ''
return url
class WithExtension(Extension):
"""Adds a value to the context (inside this block) for caching and
easy access, just like the Django-version does.
For example::
{% with person.some_sql_method as total %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
TODO: The new Scope node introduced in Jinja2 6334c1eade73 (the 2.2
dev version) would help here, but we don't want to rely on that yet.
See also:
http://dev.pocoo.org/projects/jinja/browser/tests/test_ext.py
http://dev.pocoo.org/projects/jinja/ticket/331
http://dev.pocoo.org/projects/jinja/ticket/329
"""
tags = set(['with'])
def parse(self, parser):
lineno = parser.stream.next().lineno
value = parser.parse_expression()
parser.stream.expect('name:as')
name = parser.stream.expect('name')
body = parser.parse_statements(['name:endwith'], drop_needle=True)
# Use a local variable instead of a macro argument to alias
# the expression. This allows us to nest "with" statements.
body.insert(0, nodes.Assign(nodes.Name(name.value, 'store'), value))
return nodes.CallBlock(
self.call_method('_render_block'), [], [], body).\
set_lineno(lineno)
def _render_block(self, caller=None):
return caller()
class CacheExtension(Extension):
"""Exactly like Django's own tag, but supports full Jinja2
expressiveness for all arguments.
{% cache gettimeout()*2 "foo"+options.cachename %}
...
{% endcache %}
This actually means that there is a considerable incompatibility
to Django: In Django, the second argument is simply a name, but
interpreted as a literal string. This tag, with Jinja2 stronger
emphasis on consistent syntax, requires you to actually specify the
quotes around the name to make it a string. Otherwise, allowing
Jinja2 expressions would be very hard to impossible (one could use
a lookahead to see if the name is followed by an operator, and
evaluate it as an expression if so, or read it as a string if not.
TODO: This may not be the right choice. Supporting expressions
here is probably not very important, so compatibility should maybe
prevail. Unfortunately, it is actually pretty hard to be compatibly
in all cases, simply because Django's per-character parser will
just eat everything until the next whitespace and consider it part
of the fragment name, while we have to work token-based: ``x*2``
would actually be considered ``"x*2"`` in Django, while Jinja2
would give us three tokens: ``x``, ``*``, ``2``.
General Syntax:
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Available by default (does not need to be loaded).
Partly based on the ``FragmentCacheExtension`` from the Jinja2 docs.
TODO: Should there be scoping issues with the internal dummy macro
limited access to certain outer variables in some cases, there is a
different way to write this. Generated code would look like this:
internal_name = environment.extensions['..']._get_cache_value():
if internal_name is not None:
yield internal_name
else:
internal_name = "" # or maybe use [] and append() for performance
internalname += "..."
internalname += "..."
internalname += "..."
environment.extensions['..']._set_cache_value(internalname):
yield internalname
In other words, instead of using a CallBlock which uses a local
function and calls into python, we have to separate calls into
python, but put the if-else logic itself into the compiled template.
"""
tags = set(['cache'])
def parse(self, parser):
lineno = parser.stream.next().lineno
expire_time = parser.parse_expression()
fragment_name = parser.parse_expression()
vary_on = []
while not parser.stream.current.test('block_end'):
vary_on.append(parser.parse_expression())
body = parser.parse_statements(['name:endcache'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_cache_support',
[expire_time, fragment_name,
nodes.List(vary_on), nodes.Const(lineno)]),
[], [], body).set_lineno(lineno)
def _cache_support(self, expire_time, fragm_name, vary_on, lineno, caller):
from django.core.cache import cache # delay depending in settings
from django.utils.http import urlquote
from django.utils.hashcompat import md5_constructor
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError('"%s" tag got a non-integer '
'timeout value: %r' % (list(self.tags)[0], expire_time), lineno)
args_string = u':'.join([urlquote(v) for v in vary_on])
args_md5 = md5_constructor(args_string)
cache_key = 'template.cache.%s.%s' % (fragm_name, args_md5.hexdigest())
value = cache.get(cache_key)
if value is None:
value = caller()
cache.set(cache_key, value, expire_time)
return value
class SpacelessExtension(Extension):
"""Removes whitespace between HTML tags, including tab and
newline characters.
Works exactly like Django's own tag.
"""
tags = set(['spaceless'])
def parse(self, parser):
lineno = parser.stream.next().lineno
body = parser.parse_statements(['name:endspaceless'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_strip_spaces', [], [], None, None),
[], [], body
).set_lineno(lineno)
def _strip_spaces(self, caller=None):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(caller().strip())
class CsrfTokenExtension(Extension):
"""Jinja2-version of the ``csrf_token`` tag.
Adapted from a snippet by Jason Green:
http://www.djangosnippets.org/snippets/1847/
This tag is a bit stricter than the Django tag in that it doesn't
simply ignore any invalid arguments passed in.
"""
tags = set(['csrf_token'])
def parse(self, parser):
lineno = parser.stream.next().lineno
return nodes.Output([
self.call_method('_render', [nodes.Name('csrf_token', 'load')])
]).set_lineno(lineno)
def _render(self, csrf_token):
from django.template.defaulttags import CsrfTokenNode
return Markup(CsrfTokenNode().render({'csrf_token': csrf_token}))
# nicer import names
url = URLExtension
with_ = WithExtension
cache = CacheExtension
spaceless = SpacelessExtension
csrf_token = CsrfTokenExtension
static_url = StaticURLExtension
| |
import sys
import string
"""
This module provides general purpose routines for generating
lists of strings from patterns. Thus:
python Pattern.py 172.16.[72-74,77-82].[101-200]
produces the following sequence of 800 IPs:
172.16.72.101
172.16.72.102
...
172.16.82.199
172.16.82.200
Ranges can be decimal, hexidecimal and alphabetic, e.g.
% python Pattern.py [0-10]
0
1
2
3
4
5
6
7
8
9
10
%
% python Pattern.py foobar_[a-z]
foobar_a
foobar_b
foobar_c
...
foobar_x
foobar_y
foobar_z
%
% python Pattern.py [a-z][A-Z]
aA
aB
aC
...
zX
zY
zZ
%
% python Pattern.py [0x0-0xf]
0
1
2
...
d
e
f
%
For hexadecimal output the leading 0x is left out because it's trivial to add:
% python Pattern.py 0x[0xa-0xf]
0xa
0xb
0xc
0xd
0xe
0xf
%
To help formatting, zero padding on the start of the range
is added to all members of the range, e.g.
% python Pattern.py an-sm[1-8]-g[001-100]
an-sm1-g001
an-sm1-g002
an-sm1-g003
...
an-sm8-g098
an-sm8-g099
an-sm8-g100
%
In addition, Pattern extends IPPatterns notation by adding
the concept of a zip using juxtaposition. Thus the sequences
produced by successive arguments are computing in step and
are displayed together. Thus to produce a simple table of
the hex ascii codes for lowercase letters is simply:
% python Pattern.py [A-Z] 0x[0x41-0x5a]
A 0x41
B 0x42
C 0x43
...
X 0x58
Y 0x59
Z 0x5a
%
Similarly to produce a deck of 52 cards is simply:
% python Pattern.py [2-9,T,J,Q,K,A][C,D,H,S]
2C
2D
2H
2S
3C
3D
3H
3S
...
KC
KD
KH
KS
AC
AD
AH
AS
%
"""
def aFill( str, n ):
"""leftpad str with 'a' so it is at least n chars long"""
return ('a'*(n-len(str))) + str
def zFill( str, n ):
"""leftpad str with '0' so it is at least n chars long"""
return str.zfill( n )
def computeIntRange( start, finish, toInt=int, fromInt=str, fill=zFill, padLen=len ):
"""Computes a range list from a start value, finish value and optional
int-to-string, string-to-int, pad functions and pad length."""
n = padLen( start )
return [ fill( fromInt(i), n ) for i in range(toInt(start),toInt(finish)+1) ]
def fromHex( h ):
"""Convert a hex string into a int"""
return int(h,16)
def toHex( i ):
"""Convert an int into a hex string (without the leading 0x)"""
return hex( i )[2:]
def isHexadecimalRange( start, finish ):
"""Tests for hexadecimal range"""
return start.startswith( '0x' ) and finish.startswith( '0x' )
def isNumericRange( start, finish ):
"""Tests for decimal range"""
return allNumeric( start ) and allNumeric( finish )
def allIn( as, members ):
"Tests that all elements of as are in members"""
for a in as:
if a not in members:
return False
return True
def allLower( as ):
"""Tests that all strings in as are lowercase"""
return allIn( as, string.lowercase )
def allUpper( as ):
"""Tests that all strings in as are uppercase"""
return allIn( as, string.uppercase )
def allNumeric( as ):
return allIn( as, string.digits )
def sameLength( as, bs ):
"""Tests that as and bs are the same length"""
return len( as ) == len( bs )
def lettersToInt( str ):
"""turn a string of letters into a base 26 number"""
return reduce( lambda x, y: 26*x + y, map( string.lowercase.index, str ))
def intToLetters( i, str='' ):
"""convert a number into a string of lowercase letters"""
if i == 0:
return str
else:
return intToLetters( i/26, string.lowercase[i%26] + str )
def isUpperLetterRange( start, finish ):
"""Tests start and finish are both uppercase letter ranges"""
return allUpper( start ) and allUpper( finish )
def isLowerLetterRange( start, finish ):
"""Tests start and finish are both lowercase letter ranges"""
return allLower( start ) and allLower( finish )
def computeRange( start, finish ):
if isHexadecimalRange( start, finish ):
return computeIntRange( start, finish, fromHex, toHex, zFill, lambda x: len( x )-2 )
if isLowerLetterRange( start, finish ):
return computeIntRange( start, finish, lettersToInt, intToLetters, aFill )
if isUpperLetterRange( start, finish ):
return [s.upper() for s in computeRange(start.lower(),finish.lower())]
if isNumericRange( start, finish ):
return computeIntRange( start, finish )
else:
raise SyntaxError, "invalid range syntax"
def splitAt( s, i, gap=0 ):
"""split s into two strings at index i with an optional gap"""
return s[:i], s[i+gap:]
def find( s, target ):
"""version of find that returns len( s ) when target is not found"""
result = s.find( target )
if result == -1: result = len( s )
return result
class BadOpException( Exception ):
pass
def doOp( op, a, b ):
if op == '++':
return setUnion( a, b )
elif op == '--':
return setDifference( a, b )
elif op == '^^':
return setIntersection( a, b )
else:
raise BadOpException
"""Implementation of Sets based on lists
We don't use the python built-in sets because
a) they were added in a later version (2.3?)
b) we wanted an implemetation that presevered
the ordering of the leftmost argument to any set
operation even if it's slower.
"""
def setEmpty():
"""return the empty set"""
return []
def setCopy( set ):
"""(shallow) copy a set"""
return set[:]
def member( x, set ):
"""test for set membership"""
try:
set.index( x )
return True
except ValueError:
return False
def setToList( set ):
"""takes a set and returns a list"""
return set
def setAdd( set, m ):
if not member( m, set ):
set.append( m )
return set
def setFromList( list ):
"""takes a list and returns a set by ignoring duplicates"""
set = setEmpty()
for a in list:
setAdd( set, a )
return set
def setSubtract( set, m ):
"""in place set removal"""
if member( m, set ): set.remove( m )
def setUnion( as, bs ):
"""returns a new set that is the union of as and bs"""
set = setCopy( as )
for b in bs:
setAdd( set, b )
return set
def setDifference( as, bs ):
"""returns a new set that is the difference of as and bs"""
set = setEmpty()
for a in as:
if not member( a, bs ):
set.append( a )
return set
def setIntersection( as, bs ):
"""returns a new set that is the intersection of as and bs"""
set = setEmpty()
for a in as:
if member( a, bs ):
set.append( a )
return set
def find( s, target, i=0 ):
"""Version fo find which returns len( s ) if target is not found"""
result = s.find( target, i )
if result == -1: result = len( s )
return result
def multifind( s, targets, i=0 ):
"""Find the earliest index in s which matches one of targets starting at i"""
return min( [find( s, target, i ) for target in targets] )
def fileExpr( expr, fileStr='@' ):
"""If expression contains @file@ read in contents of file and return as a list"""
if expr.startswith( fileStr ) and expr.endswith( fileStr ):
return [ line.rstrip() for line in file( expr[1:-1] ).readlines() ]
else:
return [ expr ]
def computeList( expr, openStr='[', closeStr=']', rangeStr='-', sepStr=',', fileStr='@' ):
"""Parse and compute range in expr"""
if expr[0] == openStr:
result = []
while expr[0] != closeStr:
expr = expr[1:]
i = multifind( expr, [closeStr,rangeStr,sepStr] )
if expr[i] == sepStr:
item, expr = splitAt( expr, i )
result = result + fileExpr( item )
elif expr[i] == rangeStr:
start, expr = splitAt( expr, i, 1 )
finish, expr = splitAt( expr, multifind( expr, [closeStr,sepStr] ) )
result = result + computeRange( start, finish )
else:
if i > 0: result = result + fileExpr( expr[:-1] )
break
return result
elif expr[0] == fileStr:
return fileExpr( expr[1:] )
else:
return [expr]
def splitOnBrackets( expr, openStr='[', closeStr=']' ):
"""Splits expr in a sequence of alternating non-bracketed and bracketed expressions"""
n = len( expr )
components = []
while len( expr ) > 0:
n = len( expr )
target = openStr
if expr[0] == target: target = closeStr
i = multifind( expr, [target] )
if target == closeStr: i += 1
components.append( expr[:i] )
expr = expr[i:]
return components
def product( fields, i=0, result='' ):
"""Takes a list of list of fields and produces a generator
that permutes through every possible combination of fields in order."""
if i == len( fields ):
yield result
else:
for field in fields[i]:
for x in product( fields, i+1, result + field ):
yield x
def pattern( p ):
"""Splits pattern p into it's constituents and produces
a iterator than generates all possible permutation"""
return product( map( computeList, splitOnBrackets( p ) ) )
def setExpression( expr ):
"""Handles IP expressions of the form:
<exp> = <exp> [-- <exp> | ++ <exp> | ^^ <exp>]*"""
subexp, expr = splitAt( expr, multifind( expr, ['--', '++', '^^'] ) )
accum = setFromList( pattern( subexp ) )
while expr != '':
op, expr = splitAt( expr, 2 )
subexp, expr = splitAt( expr, multifind( expr, ['--', '++', '^^'] ) )
accum = doOp( op, accum, setFromList( pattern( subexp ) ) )
return iter( setToList( accum ) )
def expression( ps, joinStr=' ' ):
return zipGenerators( map( setExpression, ps ), joinStr )
def zipGenerators( ps, joinStr ):
"""Takes a list of string iterators and produces an iterator of strings joined by joinStr"""
while True:
yield joinStr.join( [p.next() for p in ps] )
def Pattern( ps ):
return expression( ps )
if __name__ == '__main__':
for result in expression( sys.argv[1:] ):
print "%s" % result
| |
# -*- coding: utf-8 -*-
import unittest
import time
from ._mouse_event import MoveEvent, ButtonEvent, WheelEvent, LEFT, RIGHT, MIDDLE, X, X2, UP, DOWN, DOUBLE
from keyboard import mouse
class FakeOsMouse(object):
def __init__(self):
self.append = None
self.position = (0, 0)
self.queue = None
self.init = lambda: None
def listen(self, queue):
self.listening = True
self.queue = queue
def press(self, button):
self.append((DOWN, button))
def release(self, button):
self.append((UP, button))
def get_position(self):
return self.position
def move_to(self, x, y):
self.append(('move', (x, y)))
self.position = (x, y)
def wheel(self, delta):
self.append(('wheel', delta))
def move_relative(self, x, y):
self.position = (self.position[0] + x, self.position[1] + y)
class TestMouse(unittest.TestCase):
@staticmethod
def setUpClass():
mouse._os_mouse= FakeOsMouse()
mouse._listener.start_if_necessary()
assert mouse._os_mouse.listening
def setUp(self):
self.events = []
mouse._pressed_events.clear()
mouse._os_mouse.append = self.events.append
def tearDown(self):
mouse.unhook_all()
# Make sure there's no spill over between tests.
self.wait_for_events_queue()
def wait_for_events_queue(self):
mouse._listener.queue.join()
def flush_events(self):
self.wait_for_events_queue()
events = list(self.events)
# Ugly, but requried to work in Python2. Python3 has list.clear
del self.events[:]
return events
def press(self, button=LEFT):
mouse._os_mouse.queue.put(ButtonEvent(DOWN, button, time.time()))
self.wait_for_events_queue()
def release(self, button=LEFT):
mouse._os_mouse.queue.put(ButtonEvent(UP, button, time.time()))
self.wait_for_events_queue()
def double_click(self, button=LEFT):
mouse._os_mouse.queue.put(ButtonEvent(DOUBLE, button, time.time()))
self.wait_for_events_queue()
def click(self, button=LEFT):
self.press(button)
self.release(button)
def wheel(self, delta=1):
mouse._os_mouse.queue.put(WheelEvent(delta, time.time()))
self.wait_for_events_queue()
def move(self, x=0, y=0):
mouse._os_mouse.queue.put(MoveEvent(x, y, time.time()))
self.wait_for_events_queue()
def test_hook(self):
events = []
self.press()
mouse.hook(events.append)
self.press()
mouse.unhook(events.append)
self.press()
self.assertEqual(len(events), 1)
def test_is_pressed(self):
self.assertFalse(mouse.is_pressed())
self.press()
self.assertTrue(mouse.is_pressed())
self.release()
self.press(X2)
self.assertFalse(mouse.is_pressed())
self.assertTrue(mouse.is_pressed(X2))
self.press(X2)
self.assertTrue(mouse.is_pressed(X2))
self.release(X2)
self.release(X2)
self.assertFalse(mouse.is_pressed(X2))
def test_buttons(self):
mouse.press()
self.assertEqual(self.flush_events(), [(DOWN, LEFT)])
mouse.release()
self.assertEqual(self.flush_events(), [(UP, LEFT)])
mouse.click()
self.assertEqual(self.flush_events(), [(DOWN, LEFT), (UP, LEFT)])
mouse.double_click()
self.assertEqual(self.flush_events(), [(DOWN, LEFT), (UP, LEFT), (DOWN, LEFT), (UP, LEFT)])
mouse.right_click()
self.assertEqual(self.flush_events(), [(DOWN, RIGHT), (UP, RIGHT)])
mouse.click(RIGHT)
self.assertEqual(self.flush_events(), [(DOWN, RIGHT), (UP, RIGHT)])
mouse.press(X2)
self.assertEqual(self.flush_events(), [(DOWN, X2)])
def test_position(self):
self.assertEqual(mouse.get_position(), mouse._os_mouse.get_position())
def test_move(self):
mouse.move(0, 0)
self.assertEqual(mouse._os_mouse.get_position(), (0, 0))
mouse.move(100, 500)
self.assertEqual(mouse._os_mouse.get_position(), (100, 500))
mouse.move(1, 2, False)
self.assertEqual(mouse._os_mouse.get_position(), (101, 502))
mouse.move(0, 0)
mouse.move(100, 499, True, duration=0.01)
self.assertEqual(mouse._os_mouse.get_position(), (100, 499))
mouse.move(100, 1, False, duration=0.01)
self.assertEqual(mouse._os_mouse.get_position(), (200, 500))
mouse.move(0, 0, False, duration=0.01)
self.assertEqual(mouse._os_mouse.get_position(), (200, 500))
def triggers(self, fn, events, **kwargs):
self.triggered = False
def callback():
self.triggered = True
handler = fn(callback, **kwargs)
for event_type, arg in events:
if event_type == DOWN:
self.press(arg)
elif event_type == UP:
self.release(arg)
elif event_type == DOUBLE:
self.double_click(arg)
elif event_type == 'WHEEL':
self.wheel()
mouse._listener.remove_handler(handler)
return self.triggered
def test_on_button(self):
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, LEFT)]))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, X)]))
self.assertFalse(self.triggers(mouse.on_button, [('WHEEL', '')]))
self.assertFalse(self.triggers(mouse.on_button, [(DOWN, X)], buttons=MIDDLE))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE))
self.assertFalse(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE, types=UP))
self.assertTrue(self.triggers(mouse.on_button, [(UP, MIDDLE)], buttons=MIDDLE, types=UP))
self.assertTrue(self.triggers(mouse.on_button, [(UP, MIDDLE)], buttons=[MIDDLE, LEFT], types=[UP, DOWN]))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, LEFT)], buttons=[MIDDLE, LEFT], types=[UP, DOWN]))
self.assertFalse(self.triggers(mouse.on_button, [(UP, X)], buttons=[MIDDLE, LEFT], types=[UP, DOWN]))
def test_ons(self):
self.assertTrue(self.triggers(mouse.on_click, [(UP, LEFT)]))
self.assertFalse(self.triggers(mouse.on_click, [(UP, RIGHT)]))
self.assertFalse(self.triggers(mouse.on_click, [(DOWN, LEFT)]))
self.assertFalse(self.triggers(mouse.on_click, [(DOWN, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_double_click, [(DOUBLE, LEFT)]))
self.assertFalse(self.triggers(mouse.on_double_click, [(DOUBLE, RIGHT)]))
self.assertFalse(self.triggers(mouse.on_double_click, [(DOWN, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_right_click, [(UP, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_middle_click, [(UP, MIDDLE)]))
def test_wait(self):
# If this fails it blocks. Unfortunately, but I see no other way of testing.
from threading import Thread, Lock
lock = Lock()
lock.acquire()
def t():
mouse.wait()
lock.release()
Thread(target=t).start()
self.press()
lock.acquire()
def test_record_play(self):
from threading import Thread, Lock
lock = Lock()
lock.acquire()
def t():
self.recorded = mouse.record(RIGHT)
lock.release()
Thread(target=t).start()
self.click()
self.wheel(5)
self.move(100, 50)
self.press(RIGHT)
lock.acquire()
self.assertEqual(len(self.recorded), 5)
self.assertEqual(self.recorded[0]._replace(time=None), ButtonEvent(DOWN, LEFT, None))
self.assertEqual(self.recorded[1]._replace(time=None), ButtonEvent(UP, LEFT, None))
self.assertEqual(self.recorded[2]._replace(time=None), WheelEvent(5, None))
self.assertEqual(self.recorded[3]._replace(time=None), MoveEvent(100, 50, None))
self.assertEqual(self.recorded[4]._replace(time=None), ButtonEvent(DOWN, RIGHT, None))
mouse.play(self.recorded, speed_factor=0)
events = self.flush_events()
self.assertEqual(len(events), 5)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('wheel', 5))
self.assertEqual(events[3], ('move', (100, 50)))
self.assertEqual(events[4], (DOWN, RIGHT))
mouse.play(self.recorded)
events = self.flush_events()
self.assertEqual(len(events), 5)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('wheel', 5))
self.assertEqual(events[3], ('move', (100, 50)))
self.assertEqual(events[4], (DOWN, RIGHT))
mouse.play(self.recorded, include_clicks=False)
events = self.flush_events()
self.assertEqual(len(events), 2)
self.assertEqual(events[0], ('wheel', 5))
self.assertEqual(events[1], ('move', (100, 50)))
mouse.play(self.recorded, include_moves=False)
events = self.flush_events()
self.assertEqual(len(events), 4)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('wheel', 5))
self.assertEqual(events[3], (DOWN, RIGHT))
mouse.play(self.recorded, include_wheel=False)
events = self.flush_events()
self.assertEqual(len(events), 4)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('move', (100, 50)))
self.assertEqual(events[3], (DOWN, RIGHT))
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.executor import MainThreadExecutor
from c7n.resources.rdscluster import RDSCluster
from .common import BaseTest
class RDSClusterTest(BaseTest):
def remove_augments(self):
# This exists because we added tag augmentation after eight other tests
# were created and I did not want to re-create the state to re-record
# them with the extra API call. If those get re-recorded we can remove
# this.
self.patch(RDSCluster, "augment", lambda x, y: y)
def test_rdscluster_security_group(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_sg_filter")
p = self.load_policy(
{
"name": "rdscluster-sg",
"resource": "rds-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DatabaseName"], "devtest")
def test_rdscluster_subnet(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_subnet")
p = self.load_policy(
{
"name": "rdscluster-sub",
"resource": "rds-cluster",
"filters": [
{"type": "subnet", "key": "MapPublicIpOnLaunch", "value": True}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DatabaseName"], "devtest")
def test_rdscluster_simple(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_simple")
p = self.load_policy(
{"name": "rdscluster-simple", "resource": "rds-cluster"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_simple_filter(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_simple")
p = self.load_policy(
{
"name": "rdscluster-simple-filter",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_delete")
p = self.load_policy(
{
"name": "rdscluster-delete",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "delete", "delete-instances": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete_with_instances(self):
self.remove_augments()
session_factory = self.replay_flight_data(
"test_rdscluster_delete_with_instances"
)
p = self.load_policy(
{
"name": "rdscluster-delete",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "delete", "delete-instances": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_retention(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_retention")
p = self.load_policy(
{
"name": "rdscluster-delete",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "retention", "days": 21}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_snapshot")
p = self.load_policy(
{
"name": "rdscluster-snapshot",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "snapshot"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_tag_augment(self):
session_factory = self.replay_flight_data("test_rdscluster_tag_augment")
p = self.load_policy(
{
"name": "rdscluster-tag-augment",
"resource": "rds-cluster",
"filters": [{"tag:cfoo": "cbar"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_tag_and_remove(self):
self.patch(RDSCluster, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_rdscluster_tag_and_remove")
client = session_factory().client("rds")
p = self.load_policy(
{
"name": "rds-cluster-tag",
"resource": "rds-cluster",
"filters": [{"DBClusterIdentifier": "c7ntest"}],
"actions": [{"type": "tag", "key": "xyz", "value": "hello world"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["DBClusterIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("xyz" in tag_map)
policy = self.load_policy(
{
"name": "rds-cluster-remove-tag",
"resource": "rds-cluster",
"filters": [{"tag:xyz": "not-null"}],
"actions": [{"type": "remove-tag", "tags": ["xyz"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertFalse("xyz" in tag_map)
def test_rdscluster_mark_match_unmark(self):
session_factory = self.replay_flight_data("test_rdscluster_mark_and_match")
client = session_factory().client("rds")
# mark
p = self.load_policy(
{
"name": "rds-mark",
"resource": "rds-cluster",
"filters": [{"DBClusterIdentifier": "c7ntest"}],
"actions": [
{
"type": "mark-for-op",
"tag": "custodian_next",
"days": 1,
"op": "delete",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# assert marked
arn = p.resource_manager.generate_arn(resources[0]["DBClusterIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("custodian_next" in tag_map)
# match marked
policy = self.load_policy(
{
"name": "rds-mark-filter",
"resource": "rds-cluster",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_next",
"op": "delete",
"skew": 1,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
# unmark
policy = self.load_policy(
{
"name": "rds-mark-filter",
"resource": "rds-cluster",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_next",
"op": "delete",
"skew": 1,
}
],
"actions": [{"type": "unmark", "tags": ["custodian_next"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
# assert unmarked
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertFalse("custodian_next" in tag_map)
class RDSClusterSnapshotTest(BaseTest):
def test_rdscluster_snapshot_simple(self):
session_factory = self.replay_flight_data("test_rdscluster_snapshot_simple")
p = self.load_policy(
{"name": "rdscluster-snapshot-simple", "resource": "rds-cluster-snapshot"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_simple_filter(self):
session_factory = self.replay_flight_data("test_rdscluster_snapshot_simple")
p = self.load_policy(
{
"name": "rdscluster-snapshot-simple-filter",
"resource": "rds-cluster-snapshot",
"filters": [
{"type": "value", "key": "StorageEncrypted", "value": False}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot_age_filter(self):
factory = self.replay_flight_data("test_rdscluster_snapshot_simple")
p = self.load_policy(
{
"name": "rdscluster-snapshot-age-filter",
"resource": "rds-cluster-snapshot",
"filters": [{"type": "age", "days": 7}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_trim(self):
factory = self.replay_flight_data("test_rdscluster_snapshot_delete")
p = self.load_policy(
{
"name": "rdscluster-snapshot-trim",
"resource": "rds-cluster-snapshot",
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
| |
import operator
from spec import Spec, eq_, ok_, raises, assert_raises
from invoke.collection import Collection
from invoke.tasks import task, Task
from invoke.vendor import six
from invoke.vendor.six.moves import reduce
from _utils import load, support_path
@task
def _mytask():
six.print_("woo!")
def _func():
pass
class Collection_(Spec):
class init:
"__init__"
def can_accept_task_varargs(self):
"can accept tasks as *args"
@task
def task1():
pass
@task
def task2():
pass
c = Collection(task1, task2)
assert 'task1' in c
assert 'task2' in c
def can_accept_collections_as_varargs_too(self):
sub = Collection('sub')
ns = Collection(sub)
eq_(ns.collections['sub'], sub)
def kwargs_act_as_name_args_for_given_objects(self):
sub = Collection()
@task
def task1():
pass
ns = Collection(loltask=task1, notsub=sub)
eq_(ns['loltask'], task1)
eq_(ns.collections['notsub'], sub)
def initial_string_arg_acts_as_name(self):
sub = Collection('sub')
ns = Collection(sub)
eq_(ns.collections['sub'], sub)
def initial_string_arg_meshes_with_varargs_and_kwargs(self):
@task
def task1():
pass
@task
def task2():
pass
sub = Collection('sub')
ns = Collection('root', task1, sub, sometask=task2)
for x, y in (
(ns.name, 'root'),
(ns['task1'], task1),
(ns.collections['sub'], sub),
(ns['sometask'], task2),
):
eq_(x, y)
def accepts_load_path_kwarg(self):
eq_(Collection().loaded_from, None)
eq_(Collection(loaded_from='a/path').loaded_from, 'a/path')
class useful_special_methods:
def _meh(self):
@task
def task1():
pass
@task
def task2():
pass
return Collection('meh', task1=task1, task2=task2)
def setup(self):
self.c = self._meh()
def repr_(self):
"__repr__"
eq_(repr(self.c), "<Collection 'meh': task1, task2>")
def equality_should_be_useful(self):
eq_(self.c, self._meh())
class from_module:
def setup(self):
self.c = Collection.from_module(load('integration'))
class parameters:
def setup(self):
self.mod = load('integration')
self.fm = Collection.from_module
def name_override(self):
eq_(self.fm(self.mod).name, 'integration')
eq_(
self.fm(self.mod, name='not-integration').name,
'not-integration'
)
def inline_configuration(self):
# No configuration given, none gotten
eq_(self.fm(self.mod).configuration(), {})
# Config kwarg given is reflected when config obtained
eq_(
self.fm(self.mod, config={'foo': 'bar'}).configuration(),
{'foo': 'bar'}
)
def name_and_config_simultaneously(self):
# Test w/ posargs to enforce ordering, just for safety.
c = self.fm(self.mod, 'the name', {'the': 'config'})
eq_(c.name, 'the name')
eq_(c.configuration(), {'the': 'config'})
def adds_tasks(self):
assert 'print_foo' in self.c
def derives_collection_name_from_module_name(self):
eq_(self.c.name, 'integration')
def submodule_names_are_stripped_to_last_chunk(self):
with support_path():
from package import module
c = Collection.from_module(module)
eq_(module.__name__, 'package.module')
eq_(c.name, 'module')
assert 'mytask' in c # Sanity
def honors_explicit_collections(self):
coll = Collection.from_module(load('explicit_root'))
assert 'top_level' in coll.tasks
assert 'sub' in coll.collections
# The real key test
assert 'sub_task' not in coll.tasks
def allows_tasks_with_explicit_names_to_override_bound_name(self):
coll = Collection.from_module(load('subcollection_task_name'))
assert 'explicit_name' in coll.tasks # not 'implicit_name'
def returns_unique_Collection_objects_for_same_input_module(self):
# Ignoring self.c for now, just in case it changes later.
# First, a module with no root NS
mod = load('integration')
c1 = Collection.from_module(mod)
c2 = Collection.from_module(mod)
assert c1 is not c2
# Now one *with* a root NS (which was previously buggy)
mod2 = load('explicit_root')
c3 = Collection.from_module(mod2)
c4 = Collection.from_module(mod2)
assert c3 is not c4
class explicit_root_ns:
def setup(self):
mod = load('explicit_root')
mod.ns.configure({
'key': 'builtin',
'otherkey': 'yup',
'subconfig': {'mykey': 'myvalue'}
})
mod.ns.name = 'builtin_name'
self.unchanged = Collection.from_module(mod)
self.changed = Collection.from_module(
mod,
name='override_name',
config={
'key': 'override',
'subconfig': {'myotherkey': 'myothervalue'}
}
)
def inline_config_with_root_namespaces_overrides_builtin(self):
eq_(self.unchanged.configuration()['key'], 'builtin')
eq_(self.changed.configuration()['key'], 'override')
def inline_config_overrides_via_merge_not_replacement(self):
ok_('otherkey' in self.changed.configuration())
def config_override_merges_recursively(self):
eq_(
self.changed.configuration()['subconfig']['mykey'],
'myvalue'
)
def inline_name_overrides_root_namespace_object_name(self):
eq_(self.unchanged.name, 'builtin_name')
eq_(self.changed.name, 'override_name')
def root_namespace_object_name_overrides_module_name(self):
# Duplicates part of previous test for explicitness' sake.
# I.e. proves that the name doesn't end up 'explicit_root'.
eq_(self.unchanged.name, 'builtin_name')
class add_task:
def setup(self):
self.c = Collection()
def associates_given_callable_with_given_name(self):
self.c.add_task(_mytask, 'foo')
eq_(self.c['foo'], _mytask)
def uses_function_name_as_implicit_name(self):
self.c.add_task(_mytask)
assert '_mytask' in self.c
def prefers_name_kwarg_over_task_name_attr(self):
self.c.add_task(Task(_func, name='notfunc'), name='yesfunc')
assert 'yesfunc' in self.c
assert 'notfunc' not in self.c
def prefers_task_name_attr_over_function_name(self):
self.c.add_task(Task(_func, name='notfunc'))
assert 'notfunc' in self.c
assert '_func' not in self.c
@raises(ValueError)
def raises_ValueError_if_no_name_found(self):
# Can't use a lambda here as they are technically real functions.
class Callable(object):
def __call__(self):
pass
self.c.add_task(Task(Callable()))
@raises(ValueError)
def raises_ValueError_on_multiple_defaults(self):
t1 = Task(_func, default=True)
t2 = Task(_func, default=True)
self.c.add_task(t1, 'foo')
self.c.add_task(t2, 'bar')
@raises(ValueError)
def raises_ValueError_if_task_added_mirrors_subcollection_name(self):
self.c.add_collection(Collection('sub'))
self.c.add_task(_mytask, 'sub')
def allows_specifying_task_defaultness(self):
self.c.add_task(_mytask, default=True)
eq_(self.c.default, '_mytask')
def specifying_default_False_overrides_task_setting(self):
@task(default=True)
def its_me():
pass
self.c.add_task(its_me, default=False)
eq_(self.c.default, None)
class add_collection:
def setup(self):
self.c = Collection()
def adds_collection_as_subcollection_of_self(self):
c2 = Collection('foo')
self.c.add_collection(c2)
assert 'foo' in self.c.collections
def can_take_module_objects(self):
self.c.add_collection(load('integration'))
assert 'integration' in self.c.collections
@raises(ValueError)
def raises_ValueError_if_collection_without_name(self):
# Aka non-root collections must either have an explicit name given
# via kwarg, have a name attribute set, or be a module with
# __name__ defined.
root = Collection()
sub = Collection()
root.add_collection(sub)
@raises(ValueError)
def raises_ValueError_if_collection_named_same_as_task(self):
self.c.add_task(_mytask, 'sub')
self.c.add_collection(Collection('sub'))
class getitem:
"__getitem__"
def setup(self):
self.c = Collection()
def finds_own_tasks_by_name(self):
# TODO: duplicates an add_task test above, fix?
self.c.add_task(_mytask, 'foo')
eq_(self.c['foo'], _mytask)
def finds_subcollection_tasks_by_dotted_name(self):
sub = Collection('sub')
sub.add_task(_mytask)
self.c.add_collection(sub)
eq_(self.c['sub._mytask'], _mytask)
def honors_aliases_in_own_tasks(self):
t = Task(_func, aliases=['bar'])
self.c.add_task(t, 'foo')
eq_(self.c['bar'], t)
def honors_subcollection_task_aliases(self):
self.c.add_collection(load('decorator'))
assert 'decorator.bar' in self.c
def honors_own_default_task_with_no_args(self):
t = Task(_func, default=True)
self.c.add_task(t)
eq_(self.c[''], t)
def honors_subcollection_default_tasks_on_subcollection_name(self):
sub = Collection.from_module(load('decorator'))
self.c.add_collection(sub)
# Sanity
assert self.c['decorator.biz'] is sub['biz']
# Real test
assert self.c['decorator'] is self.c['decorator.biz']
@raises(ValueError)
def raises_ValueError_for_no_name_and_no_default(self):
self.c['']
@raises(ValueError)
def ValueError_for_empty_subcol_task_name_and_no_default(self):
self.c.add_collection(Collection('whatever'))
self.c['whatever']
class to_contexts:
def setup(self):
@task
def mytask(text, boolean=False, number=5):
six.print_(text)
@task(aliases=['mytask27'])
def mytask2():
pass
@task(aliases=['othertask'], default=True)
def subtask():
pass
sub = Collection('sub', subtask)
self.c = Collection(mytask, mytask2, sub)
self.contexts = self.c.to_contexts()
alias_tups = [list(x.aliases) for x in self.contexts]
self.aliases = reduce(operator.add, alias_tups, [])
# Focus on 'mytask' as it has the more interesting sig
self.context = [x for x in self.contexts if x.name == 'mytask'][0]
def returns_iterable_of_Contexts_corresponding_to_tasks(self):
eq_(self.context.name, 'mytask')
eq_(len(self.contexts), 3)
def allows_flaglike_access_via_flags(self):
assert '--text' in self.context.flags
def positional_arglist_preserves_order_given(self):
@task(positional=('second', 'first'))
def mytask(first, second, third):
pass
c = Collection()
c.add_task(mytask)
ctx = c.to_contexts()[0]
eq_(ctx.positional_args, [ctx.args['second'], ctx.args['first']])
def exposes_namespaced_task_names(self):
assert 'sub.subtask' in [x.name for x in self.contexts]
def exposes_namespaced_task_aliases(self):
assert 'sub.othertask' in self.aliases
def exposes_subcollection_default_tasks(self):
assert 'sub' in self.aliases
def exposes_aliases(self):
assert 'mytask27' in self.aliases
class task_names:
def setup(self):
self.c = Collection.from_module(load('explicit_root'))
def returns_all_task_names_including_subtasks(self):
eq_(
set(self.c.task_names.keys()),
set(['top_level', 'sub.sub_task'])
)
def includes_aliases_and_defaults_as_values(self):
names = self.c.task_names
eq_(names['top_level'], ['othertop'])
eq_(names['sub.sub_task'], ['sub.othersub', 'sub'])
class configuration:
"Configuration methods"
def setup(self):
self.root = Collection()
self.task = Task(_func, name='task')
def basic_set_and_get(self):
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration(), {'foo': 'bar'})
def configure_performs_merging(self):
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration()['foo'], 'bar')
self.root.configure({'biz': 'baz'})
eq_(set(self.root.configuration().keys()), set(['foo', 'biz']))
def configure_merging_is_recursive_for_nested_dicts(self):
self.root.configure({'foo': 'bar', 'biz': {'baz': 'boz'}})
self.root.configure({'biz': {'otherbaz': 'otherboz'}})
c = self.root.configuration()
eq_(c['biz']['baz'], 'boz')
eq_(c['biz']['otherbaz'], 'otherboz')
def configure_allows_overwriting(self):
self.root.configure({'foo': 'one'})
eq_(self.root.configuration()['foo'], 'one')
self.root.configure({'foo': 'two'})
eq_(self.root.configuration()['foo'], 'two')
def call_returns_dict(self):
eq_(self.root.configuration(), {})
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration(), {'foo': 'bar'})
def access_merges_from_subcollections(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'bar'})
self.root.configure({'biz': 'baz'})
# With no inner collection
eq_(set(self.root.configuration().keys()), set(['biz']))
# With inner collection
self.root.add_collection(inner)
eq_(
set(self.root.configuration('inner.task').keys()),
set(['foo', 'biz'])
)
def parents_overwrite_children_in_path(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'inner'})
self.root.add_collection(inner)
# Before updating root collection's config, reflects inner
eq_(self.root.configuration('inner.task')['foo'], 'inner')
self.root.configure({'foo': 'outer'})
# After, reflects outer (since that now overrides)
eq_(self.root.configuration('inner.task')['foo'], 'outer')
def sibling_subcollections_ignored(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'hi there'})
inner2 = Collection('inner2', Task(_func, name='task2'))
inner2.configure({'foo': 'nope'})
root = Collection(inner, inner2)
eq_(root.configuration('inner.task')['foo'], 'hi there')
eq_(root.configuration('inner2.task2')['foo'], 'nope')
def subcollection_paths_may_be_dotted(self):
leaf = Collection('leaf', self.task)
leaf.configure({'key': 'leaf-value'})
middle = Collection('middle', leaf)
root = Collection('root', middle)
eq_(root.configuration('middle.leaf.task'), {'key': 'leaf-value'})
def invalid_subcollection_paths_result_in_KeyError(self):
# Straight up invalid
assert_raises(KeyError,
Collection('meh').configuration,
'nope.task'
)
# Exists but wrong level (should be 'root.task', not just
# 'task')
inner = Collection('inner', self.task)
assert_raises(KeyError,
Collection('root', inner).configuration, 'task')
def keys_dont_have_to_exist_in_full_path(self):
# Kinda duplicates earlier stuff; meh
# Key only stored on leaf
leaf = Collection('leaf', self.task)
leaf.configure({'key': 'leaf-value'})
middle = Collection('middle', leaf)
root = Collection('root', middle)
eq_(root.configuration('middle.leaf.task'), {'key': 'leaf-value'})
# Key stored on mid + leaf but not root
middle.configure({'key': 'whoa'})
eq_(root.configuration('middle.leaf.task'), {'key': 'whoa'})
| |
import functools
import logging
import os
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.resolvelib import ResolutionImpossible
from pip._vendor.resolvelib import Resolver as RLResolver
from pip._internal.exceptions import InstallationError
from pip._internal.req.req_install import check_invalid_constraint_type
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.resolution.resolvelib.provider import PipProvider
from pip._internal.resolution.resolvelib.reporter import (
PipDebuggingReporter,
PipReporter,
)
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.filetypes import is_archive_file
from pip._internal.utils.misc import dist_is_editable
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .base import Constraint
from .factory import Factory
if MYPY_CHECK_RUNNING:
from typing import Dict, List, Optional, Set, Tuple
from pip._vendor.resolvelib.resolvers import Result
from pip._vendor.resolvelib.structs import Graph
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
logger = logging.getLogger(__name__)
class Resolver(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
):
super(Resolver, self).__init__()
assert upgrade_strategy in self._allowed_strategies
self.factory = Factory(
finder=finder,
preparer=preparer,
make_install_req=make_install_req,
wheel_cache=wheel_cache,
use_user_site=use_user_site,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
py_version_info=py_version_info,
)
self.ignore_dependencies = ignore_dependencies
self.upgrade_strategy = upgrade_strategy
self._result = None # type: Optional[Result]
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
constraints = {} # type: Dict[str, Constraint]
user_requested = set() # type: Set[str]
requirements = []
for req in root_reqs:
if req.constraint:
# Ensure we only accept valid constraints
problem = check_invalid_constraint_type(req)
if problem:
raise InstallationError(problem)
if not req.match_markers():
continue
name = canonicalize_name(req.name)
if name in constraints:
constraints[name] &= req
else:
constraints[name] = Constraint.from_ireq(req)
else:
if req.user_supplied and req.name:
user_requested.add(canonicalize_name(req.name))
r = self.factory.make_requirement_from_install_req(
req, requested_extras=(),
)
if r is not None:
requirements.append(r)
provider = PipProvider(
factory=self.factory,
constraints=constraints,
ignore_dependencies=self.ignore_dependencies,
upgrade_strategy=self.upgrade_strategy,
user_requested=user_requested,
)
if "PIP_RESOLVER_DEBUG" in os.environ:
reporter = PipDebuggingReporter()
else:
reporter = PipReporter()
resolver = RLResolver(provider, reporter)
try:
try_to_avoid_resolution_too_deep = 2000000
self._result = resolver.resolve(
requirements, max_rounds=try_to_avoid_resolution_too_deep,
)
except ResolutionImpossible as e:
error = self.factory.get_installation_error(e)
six.raise_from(error, e)
req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
for candidate in self._result.mapping.values():
ireq = candidate.get_install_requirement()
if ireq is None:
continue
# Check if there is already an installation under the same name,
# and set a flag for later stages to uninstall it, if needed.
installed_dist = self.factory.get_dist_to_uninstall(candidate)
if installed_dist is None:
# There is no existing installation -- nothing to uninstall.
ireq.should_reinstall = False
elif self.factory.force_reinstall:
# The --force-reinstall flag is set -- reinstall.
ireq.should_reinstall = True
elif installed_dist.parsed_version != candidate.version:
# The installation is different in version -- reinstall.
ireq.should_reinstall = True
elif candidate.is_editable or dist_is_editable(installed_dist):
# The incoming distribution is editable, or different in
# editable-ness to installation -- reinstall.
ireq.should_reinstall = True
elif candidate.source_link.is_file:
# The incoming distribution is under file://
if candidate.source_link.is_wheel:
# is a local wheel -- do nothing.
logger.info(
"%s is already installed with the same version as the "
"provided wheel. Use --force-reinstall to force an "
"installation of the wheel.",
ireq.name,
)
continue
looks_like_sdist = (
is_archive_file(candidate.source_link.file_path)
and candidate.source_link.ext != ".zip"
)
if looks_like_sdist:
# is a local sdist -- show a deprecation warning!
reason = (
"Source distribution is being reinstalled despite an "
"installed package having the same name and version as "
"the installed package."
)
replacement = "use --force-reinstall"
deprecated(
reason=reason,
replacement=replacement,
gone_in="21.1",
issue=8711,
)
# is a local sdist or path -- reinstall
ireq.should_reinstall = True
else:
continue
link = candidate.source_link
if link and link.is_yanked:
# The reason can contain non-ASCII characters, Unicode
# is required for Python 2.
msg = (
u'The candidate selected for download or install is a '
u'yanked version: {name!r} candidate (version {version} '
u'at {link})\nReason for being yanked: {reason}'
).format(
name=candidate.name,
version=candidate.version,
link=link,
reason=link.yanked_reason or u'<none given>',
)
logger.warning(msg)
req_set.add_named_requirement(ireq)
reqs = req_set.all_requirements
self.factory.preparer.prepare_linked_requirements_more(reqs)
return req_set
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Get order for installation of requirements in RequirementSet.
The returned list contains a requirement before another that depends on
it. This helps ensure that the environment is kept consistent as they
get installed one-by-one.
The current implementation creates a topological ordering of the
dependency graph, while breaking any cycles in the graph at arbitrary
points. We make no guarantees about where the cycle would be broken,
other than they would be broken.
"""
assert self._result is not None, "must call resolve() first"
graph = self._result.graph
weights = get_topological_weights(
graph,
expected_node_count=len(self._result.mapping) + 1,
)
sorted_items = sorted(
req_set.requirements.items(),
key=functools.partial(_req_set_item_sorter, weights=weights),
reverse=True,
)
return [ireq for _, ireq in sorted_items]
def get_topological_weights(graph, expected_node_count):
# type: (Graph, int) -> Dict[Optional[str], int]
"""Assign weights to each node based on how "deep" they are.
This implementation may change at any point in the future without prior
notice.
We take the length for the longest path to any node from root, ignoring any
paths that contain a single node twice (i.e. cycles). This is done through
a depth-first search through the graph, while keeping track of the path to
the node.
Cycles in the graph result would result in node being revisited while also
being it's own path. In this case, take no action. This helps ensure we
don't get stuck in a cycle.
When assigning weight, the longer path (i.e. larger length) is preferred.
"""
path = set() # type: Set[Optional[str]]
weights = {} # type: Dict[Optional[str], int]
def visit(node):
# type: (Optional[str]) -> None
if node in path:
# We hit a cycle, so we'll break it here.
return
# Time to visit the children!
path.add(node)
for child in graph.iter_children(node):
visit(child)
path.remove(node)
last_known_parent_count = weights.get(node, 0)
weights[node] = max(last_known_parent_count, len(path))
# `None` is guaranteed to be the root node by resolvelib.
visit(None)
# Sanity checks
assert weights[None] == 0
assert len(weights) == expected_node_count
return weights
def _req_set_item_sorter(
item, # type: Tuple[str, InstallRequirement]
weights, # type: Dict[Optional[str], int]
):
# type: (...) -> Tuple[int, str]
"""Key function used to sort install requirements for installation.
Based on the "weight" mapping calculated in ``get_installation_order()``.
The canonical package name is returned as the second member as a tie-
breaker to ensure the result is predictable, which is useful in tests.
"""
name = canonicalize_name(item[0])
return weights[name], name
| |
"""Demo views for `django-docusign`."""
from __future__ import unicode_literals
import os
from django.core.files.base import ContentFile
from django.utils.text import slugify
from django.utils.timezone import now
from django.views.generic import FormView, RedirectView, TemplateView
from django.views.generic.detail import SingleObjectMixin
from django_anysign import api as django_anysign
from django_docusign import api as django_docusign
from django_docusign_demo import forms, models
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
def docusign_settings(request):
"""Return dictionary of credentials for DocuSign from session or environ.
Values are read from session, and fallback to environ.
"""
data = {}
for key in ['root_url', 'username', 'password', 'integrator_key',
'timeout']:
try:
data[key] = request.session[key]
except KeyError:
pass
return data
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
data = super(HomeView, self).get_context_data(**kwargs)
data['has_settings'] = all(docusign_settings(self.request))
data['latest_signatures'] = models.Signature.objects \
.all() \
.order_by('-pk')[0:5]
return data
class SettingsView(FormView):
"""Store DocuSign settings in session."""
form_class = forms.SettingsForm
template_name = 'settings.html'
def form_valid(self, form):
"""Save configuration in session."""
data = form.cleaned_data
for (key, value) in data.items():
if value or isinstance(value, bool):
self.request.session[key] = value
return super(SettingsView, self).form_valid(form)
def get_success_url(self):
return reverse('home')
def get_initial(self):
return docusign_settings(self.request)
def get_context_data(self, **kwargs):
data = super(SettingsView, self).get_context_data(**kwargs)
data['default_backend'] = django_docusign.DocuSignBackend()
return data
class CreateSignatureView(FormView):
"""Create DocuSign envelope."""
form_class = forms.CreateSignatureForm
template_name = 'create_signature.html'
def get_success_url(self):
"""Return home URL."""
return reverse('home')
def form_valid(self, form):
"""Create envelope on DocuSign's side."""
self.cleaned_data = form.cleaned_data
# Prepare signature instance with uploaded document, Django side.
(signature_type, created) = models.SignatureType.objects.get_or_create(
signature_backend_code='docusign',
docusign_template_id='')
signature = models.Signature.objects.create(
signature_type=signature_type,
document=self.request.FILES['document'],
document_title=self.cleaned_data['title'],
)
# Add signers.
for position, signer_data in enumerate(self.cleaned_data['signers']):
signature.signers.create(
full_name=signer_data['name'],
email=signer_data['email'],
signing_order=position + 1, # Position starts at 1.
)
# Create signature, backend side.
self.create_signature(signature)
return super(CreateSignatureView, self).form_valid(form)
@property
def signature_backend(self):
try:
return self._signature_backend
except AttributeError:
self._signature_backend = self.get_signature_backend()
return self._signature_backend
def get_signature_backend(self):
"""Return signature backend instance."""
backend_settings = docusign_settings(self.request)
signature_backend = django_anysign.get_signature_backend(
'docusign',
**backend_settings
)
return signature_backend
def create_signature(self, signature):
"""Create signature backend-side."""
self.signature_backend.create_signature(
signature,
subject=signature.document_title,
)
class CreateSignatureTemplateView(CreateSignatureView):
"""Create DocuSign envelope."""
form_class = forms.CreateSignatureTemplateForm
template_name = 'create_signature_template.html'
def get_initial(self):
"""template_id initial value."""
initial = self.initial.copy()
initial['template_id'] = os.environ.get('DOCUSIGN_TEST_TEMPLATE_ID')
return initial
def form_valid(self, form):
"""Create envelope on DocuSign's side."""
self.cleaned_data = form.cleaned_data
# Prepare signature instance with uploaded document, Django side.
(signature_type, created) = models.SignatureType.objects.get_or_create(
signature_backend_code='docusign',
docusign_template_id=self.cleaned_data['template_id'])
signature = models.Signature.objects.create(
signature_type=signature_type,
document_title=self.cleaned_data['title'],
)
# Add signers.
for position, signer_data in enumerate(self.cleaned_data['signers']):
signature.signers.create(
full_name=signer_data['name'],
email=signer_data['email'],
signing_order=position + 1, # Position starts at 1.
)
# Create signature, backend side.
self.create_signature(signature)
return super(CreateSignatureView, self).form_valid(form)
class SignerView(SingleObjectMixin, RedirectView):
"""Embed DocuSign's recipient view."""
model = models.Signer
permanent = False
def get_redirect_url(self, *args, **kwargs):
"""Return URL where signer is redirected once doc has been signed."""
signer = self.get_object()
backend_settings = docusign_settings(self.request)
signature_backend = django_anysign.get_signature_backend(
'docusign',
**backend_settings
)
signer_return_url = self.request.build_absolute_uri(
signature_backend.get_signer_return_url(signer))
url = signature_backend.post_recipient_view(
signer, signer_return_url=signer_return_url)
return url
class SignerReturnView(django_docusign.SignerReturnView):
"""Welcome the signer back from DocuSign."""
def get_signature_backend(self):
"""Return signature backend instance."""
backend_settings = docusign_settings(self.request)
signature_backend = django_anysign.get_signature_backend(
'docusign',
**backend_settings
)
return signature_backend
def get_signer_canceled_url(self, event, status):
"""Url redirect when signer canceled signature."""
return reverse('anysign:signer_canceled',
args=[self.kwargs[self.pk_url_kwarg]])
def get_signer_error_url(self, event, status):
"""Url redirect when failure."""
return reverse('anysign:signer_error',
args=[self.kwargs[self.pk_url_kwarg]])
def get_signer_declined_url(self, event, status):
"""Url redirect when signer declined signature."""
return reverse('anysign:signer_declined',
args=[self.kwargs[self.pk_url_kwarg]])
def get_signer_signed_url(self, event, status):
"""Url redirect when signer signed signature."""
return reverse('anysign:signer_signed',
args=[self.kwargs[self.pk_url_kwarg]])
def update_signer(self, status, message=''):
"""Update ``signer`` with ``status``.
Additional ``status_datetime`` argument is the datetime mentioned by
DocuSign.
"""
signer = self.get_object()
signer.status = status
signer.status_datetime = now()
signer.status_details = message
signer.save()
def update_signature(self, status):
self.signature.status = status
self.signature.save()
def replace_document(self, signed_document):
# Replace old document by signed one.
filename = self.signature.document.name
if not filename:
filename = "%s.pdf" % slugify(
self.signature.document_title)
self.signature.document.delete(save=False)
self.signature.document.save(filename,
ContentFile(signed_document.read()),
save=True)
class SignerCanceledView(TemplateView):
template_name = 'signer_canceled.html'
class SignerErrorView(TemplateView):
template_name = 'signer_error.html'
class SignerDeclinedView(TemplateView):
template_name = 'signer_declined.html'
class SignerSignedView(TemplateView):
template_name = 'signer_signed.html'
| |
#!/usr/bin/env python3
"""
This is an example of how the pytradfri-library can be used async.
To run the script, do the following:
$ pip3 install pytradfri
$ Download this file (example_async.py)
$ python3 example_async.py <IP>
Where <IP> is the address to your IKEA gateway. The first time
running you will be asked to input the 'Security Code' found on
the back of your IKEA gateway.
"""
# Hack to allow relative import above top level package
import sys
import os
folder = os.path.dirname(os.path.abspath(__file__)) # noqa
sys.path.insert(0, os.path.normpath("%s/.." % folder)) # noqa
from pytradfri import Gateway
from pytradfri.api.aiocoap_api import APIFactory
from pytradfri.error import PytradfriError
from pytradfri.util import load_json, save_json
import asyncio
import uuid
import argparse
import json
import datetime
lightArrayId = []
lightArraySts = []
lightArrayColor = []
lightArrayValue = []
groupArrayId = []
groupArraySts = []
groupArrayValue = []
groupArrayMood = []
def output(o, x):
print(str(str(o) + " " + str(datetime.datetime.now().time())[:8]) + " "+ str(x))
sys.stdout.flush()
def get_color_temp_idx(color):
if color == 'dcf0f8':
c=4
elif color == 'eaf6fb':
c=5
elif color == 'f5faf6':
c=1
elif color == 'f2eccf':
c=6
elif color == 'f1e0b5':
c=2
elif color == 'efd275':
c=3
elif color == 'ebb63e':
c=7
elif color == 'e78834':
c=8
elif color == 'e57345':
c=9
elif color == 'da5d41':
c=10
elif color == 'dc4b31':
c=11
elif color == 'e491af':
c=12
elif color == 'e8bedd':
c=13
elif color == 'd9337c':
c=14
elif color == 'c984bb':
c=15
elif color == '8f2686':
c=16
elif color == '4a418a':
c=17
elif color == '6c83ba':
c=18
elif color == 'a9d62b':
c=19
elif color == 'd6e44b':
c=20
else:
c=1
return c
CONFIG_FILE = 'tradfri_standalone_psk.conf'
parser = argparse.ArgumentParser()
parser.add_argument('host', metavar='IP', type=str,
help='IP Address of your Tradfri gateway')
parser.add_argument('-K', '--key', dest='key', required=False,
help='Key found on your Tradfri gateway')
args = parser.parse_args()
if args.host not in load_json(CONFIG_FILE) and args.key is None:
print("Please provide the 'Security Code' on the back of your "
"Tradfri gateway:", end=" ")
key = input().strip()
if len(key) != 16:
raise PytradfriError("Invalid 'Security Code' provided.")
else:
args.key = key
try:
# pylint: disable=ungrouped-imports
from asyncio import ensure_future
except ImportError:
# Python 3.4.3 and earlier has this as async
# pylint: disable=unused-import
from asyncio import async
ensure_future = async
def savelights():
i = 0
lista = ""
for id in lightArrayId:
# scrivi record
lista = lista + str(lightArrayId[i])
lista = lista + ","
lista = lista + str(lightArraySts[i])
lista = lista + ","
lista = lista + str(lightArrayValue[i])
lista = lista + ","
lista = lista + str(lightArrayColor[i])
lista = lista + ","
i += 1
wfile = open ("files/datafile.json", "w")
wfile.write(lista)
wfile.close()
output("LIGHYMONITOR","file salvato")
def savegroups():
i = 0
lista = ""
for id in groupArrayId:
# scrivi record
lista = lista + str(groupArrayId[i])
lista = lista + ","
lista = lista + str(groupArraySts[i])
lista = lista + ","
lista = lista + str(groupArrayValue[i])
lista = lista + ","
lista = lista + str(groupArrayMood[i])
lista = lista + ","
i += 1
wfile = open ("files/datafilegroups.json", "w")
wfile.write(lista)
wfile.close()
output("LIGHYMONITOR","file salvato")
def get_index(id, list_):
for i, s in enumerate(list_):
if str(id) == str(s):
return i
return -1
conf = load_json(CONFIG_FILE)
identity = conf[args.host].get('identity')
psk = conf[args.host].get('key')
api_factory = APIFactory(host=args.host, psk_id=identity, psk=psk)
api = api_factory.request
gateway = Gateway()
lights = []
groups = []
@asyncio.coroutine
def run():
global conf
global api_factory
global lights
global groups
global api
global gateway
# Assign configuration variables.
# The configuration check takes care they are present.
try:
identity = conf[args.host].get('identity')
psk = conf[args.host].get('key')
api_factory = APIFactory(host=args.host, psk_id=identity, psk=psk)
except KeyError:
identity = uuid.uuid4().hex
api_factory = APIFactory(host=args.host, psk_id=identity)
try:
psk = yield from api_factory.generate_psk(args.key)
print('Generated PSK: ', psk)
conf[args.host] = {'identity': identity,
'key': psk}
save_json(CONFIG_FILE, conf)
except AttributeError:
raise PytradfriError("Please provide the 'Security Code' on the "
"back of your Tradfri gateway using the "
"-K flag.")
#get all devices
devices_command = gateway.get_devices()
devices_commands = yield from api(devices_command)
devices = yield from api(devices_commands)
#get all available groups
groups_command = gateway.get_groups()
groups_commands = yield from api(groups_command)
groupc = yield from api(groups_commands)
groups = [dev for dev in groupc]
lights = [dev for dev in devices if dev.has_light_control]
#insert lights in the arrays
for light in lights:
lightArrayId.append(light.id)
lightArraySts.append(light.light_control.lights[0].state)
lightArrayValue.append(light.light_control.lights[0].dimmer)
lightArrayColor.append(get_color_temp_idx(light.light_control.lights[0].hex_color))
#insert groups in the arrays
for group in groups:
groupArrayId.append(str(group.path[1]))
groupArraySts.append(group.state)
groupArrayValue.append(group.dimmer)
groupArrayMood.append(group.mood)
savelights()
savegroups()
# Lights can be accessed by its index, so lights[1] is the second light
if lights:
light = lights[0]
else:
output("LIGHYMONITOR","No lights found!")
light = None
def observe_callback(updated_device):
light = updated_device.light_control.lights[0]
output("LIGHYMONITOR","Received message for: %s" % light)
light = updated_device
x = get_index(light.id, lightArrayId)
lightArraySts[x] = light.light_control.lights[0].state
lightArrayValue[x] = light.light_control.lights[0].dimmer
lightArrayColor[x] = get_color_temp_idx(light.light_control.lights[0].hex_color)
savelights()
def observe_callback_2(updated_device):
#light = updated_device.light_control.lights[0]
output("LIGHYMONITOR","Received message for: %s" % updated_device.path[1])
#light = updated_device
#x = get_index(light.id, lightArrayId)
#lightArraySts[x] = light.light_control.lights[0].state
#lightArrayValue[x] = light.light_control.lights[0].dimmer
#lightArrayColor[x] = get_color_temp_idx(light.light_control.lights[0].hex_color)
#savelights()
#get all available groups
#groups_command = gateway.get_groups()
#groups_commands = yield from api(groups_command)
#groupc = yield from api(groups_commands)
#groups = [dev for dev in groupc]
#insert groups in the arrays
#for group in groups:
# groupArrayId.append(str(group.path[1]))
# groupArraySts.append(group.state)
# groupArrayValue.append(group.dimmer)
# groupArrayMood.append(group.mood)
#savegroups()
def observe_err_callback(err):
output("LIGHYMONITOR",'observe error:', err)
for light in lights:
observe_command = light.observe(observe_callback, observe_err_callback,
duration=120)
# Start observation as a second task on the loop.
ensure_future(api(observe_command))
# Yield to allow observing to start.
yield from asyncio.sleep(0)
for group in groups:
observe_command = group.observe(observe_callback_2, observe_err_callback,
duration=120)
# Start observation as a second task on the loop.
ensure_future(api(observe_command))
# Yield to allow observing to start.
yield from asyncio.sleep(0)
output("LIGHYMONITOR","Waiting for observation to end (2 mins)")
output("LIGHYMONITOR","Try altering any light in the app, and watch the events!")
while True:
#output("LIGHYMONITOR","restart")
yield from asyncio.sleep(10)
#yield from api.shutdown()
asyncio.get_event_loop().run_until_complete(run())
| |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import bitcoin
import keystore
from keystore import bip44_derivation,bip44_derivation_145
from wallet import Wallet, Imported_Wallet, Standard_Wallet, Multisig_Wallet, wallet_types
from i18n import _
from plugins import run_hook
class BaseWizard(object):
def __init__(self, config, storage):
super(BaseWizard, self).__init__()
self.config = config
self.storage = storage
self.wallet = None
self.stack = []
self.plugin = None
self.keystores = []
self.is_kivy = config.get('gui') == 'kivy'
self.seed_type = None
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
f(self, *args)
elif hasattr(self, action):
f = getattr(self, action)
f(*args)
else:
raise BaseException("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Create") + ' ' + name.decode('utf8')
message = '\n'.join([
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('multisig', _("Multi-signature wallet")),
('imported', _("Watch Bitcoin addresses")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == 'imported':
action = 'import_addresses'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
i = len(self.keystores)
title = _('Add cosigner') + ' (%d of %d)'%(i+1, self.n) if self.wallet_type=='multisig' else _('Keystore')
if self.wallet_type =='standard' or i==0:
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_from_seed', _('I already have a seed')),
('restore_from_key', _('Use public or private keys')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Use a hardware device')))
else:
message = _('Add a cosigner to your multi-sig wallet')
choices = [
('restore_from_key', _('Enter cosigner key')),
('restore_from_seed', _('Enter cosigner seed')),
]
if not self.is_kivy:
choices.append(('choose_hw_device', _('Cosign with hardware device')))
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def import_addresses(self):
v = keystore.is_address_list
title = _("Import Bitcoin Addresses")
message = _("Enter a list of Bitcoin addresses. This will create a watching-only wallet.")
self.add_xpub_dialog(title=title, message=message, run_next=self.on_import_addresses, is_valid=v)
def on_import_addresses(self, text):
assert keystore.is_address_list(text)
self.wallet = Imported_Wallet(self.storage)
for x in text.split():
self.wallet.import_address(x)
self.terminate()
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_any_key
title = _("Create keystore from keys")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv), or a list of Bitcoin private keys.")
])
self.add_xpub_dialog(title=title, message=message, run_next=self.on_restore_from_key, is_valid=v)
else:
v = keystore.is_bip32_key
i = len(self.keystores) + 1
self.add_cosigner_dialog(index=i, run_next=self.on_restore_from_key, is_valid=v)
def on_restore_from_key(self, text):
k = keystore.from_keys(text)
self.on_keystore(k)
def choose_hw_device(self):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device())
return
# scan devices
devices = []
devmgr = self.plugins.device_manager
for name, description, plugin in support:
try:
# FIXME: side-effect: unpaired_device_info sets client.handler
u = devmgr.unpaired_device_infos(None, plugin)
except:
devmgr.print_error("error", name)
continue
devices += map(lambda x: (name, x), u)
if not devices:
msg = ''.join([
_('No hardware device detected.') + '\n',
_('To trigger a rescan, press \'Next\'.') + '\n\n',
_('If your device is not detected on Windows, go to "Settings", "Devices", "Connected devices", and do "Remove device". Then, plug your device again.') + ' ',
_('On Linux, you might have to add a new permission to your udev rules.'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device())
return
# select device
self.devices = devices
choices = []
for name, info in devices:
state = _("initialized") if info.initialized else _("wiped")
label = info.label or _("An unnamed %s")%name
descr = "%s [%s, %s]" % (label, name, state)
choices.append(((name, info), descr))
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next=self.on_device)
def on_device(self, name, device_info):
self.plugin = self.plugins.get_plugin(name)
try:
self.plugin.setup_device(device_info, self)
except BaseException as e:
self.show_error(str(e))
self.choose_hw_device()
return
if self.wallet_type=='multisig':
# There is no general standard for HD multisig.
# This is partially compatible with BIP45; assumes index=0
self.on_hw_derivation(name, device_info, "m/45'/0")
else:
f = lambda x: self.run('on_hw_derivation', name, device_info, str(x))
self.derivation_dialog_145(f)
def derivation_dialog(self, f):
default = bip44_derivation(0)
message = '\n'.join([
_('Enter your wallet derivation here.'),
_('If you are not sure what this is, leave this field unchanged.'),
_("If you want the wallet to use legacy Bitcoin addresses use m/44'/0'/0'"),
_("If you want the wallet to use Bitcoin Cash addresses use m/44'/145'/0'")
])
self.line_dialog(run_next=f, title=_('Derivation'), message=message, default=default, test=bitcoin.is_bip32_derivation)
def derivation_dialog_145(self, f):
default = bip44_derivation_145(0)
message = '\n'.join([
_('Enter your wallet derivation here.'),
_('If you are not sure what this is, leave this field unchanged.'),
_("If you want the wallet to use legacy Bitcoin addresses use m/44'/0'/0'"),
_("If you want the wallet to use Bitcoin Cash adresses use m/44'/145'/0'")
])
self.line_dialog(run_next=f, title=_('Derivation'), message=message, default=default, test=bitcoin.is_bip32_derivation)
def on_hw_derivation(self, name, device_info, derivation):
from keystore import hardware_keystore
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, self)
if xpub is None:
self.show_error('Cannot read xpub from device')
return
d = {
'type': 'hardware',
'hw_type': name,
'derivation': derivation,
'xpub': xpub,
'label': device_info.label,
}
k = hardware_keystore(d)
self.on_keystore(k)
def passphrase_dialog(self, run_next):
title = _('Seed extension')
message = '\n'.join([
_('You may extend your seed with custom words.'),
_('Your seed extension must be saved together with your seed.'),
])
warning = '\n'.join([
_('Note that this is NOT your encryption password.'),
_('If you do not know what this is, leave this field empty.'),
])
self.line_dialog(title=title, message=message, warning=warning, default='', test=lambda x:True, run_next=run_next)
def restore_from_seed(self):
self.opt_bip39 = True
self.opt_ext = True
self.opt_bip39_145 = True
test = bitcoin.is_seed if self.wallet_type == 'standard' else bitcoin.is_new_seed
self.restore_seed_dialog(run_next=self.on_restore_seed, test=test)
def on_restore_seed(self, seed, is_bip39, is_ext,is_bip39_145):
self.seed_type = 'bip39' if is_bip39 else bitcoin.seed_type(seed)
if self.seed_type == 'bip39':
if is_bip39_145 == True:
f=lambda passphrase: self.on_restore_bip39_145(seed, passphrase)
else:
f=lambda passphrase: self.on_restore_bip39(seed, passphrase)
self.passphrase_dialog(run_next=f) if is_ext else f('')
elif self.seed_type in ['standard', 'segwit']:
f = lambda passphrase: self.run('create_keystore', seed, passphrase)
self.passphrase_dialog(run_next=f) if is_ext else f('')
elif self.seed_type == 'old':
self.run('create_keystore', seed, '')
else:
raise BaseException('Unknown seed type', seed_type)
def on_restore_bip39(self, seed, passphrase):
f = lambda x: self.run('on_bip44', seed, passphrase, str(x))
self.derivation_dialog(f)
def on_restore_bip39_145(self, seed, passphrase):
f = lambda x: self.run('on_bip44', seed, passphrase, str(x))
self.derivation_dialog_145(f)
def create_keystore(self, seed, passphrase):
k = keystore.from_seed(seed, passphrase)
self.on_keystore(k)
def on_bip44(self, seed, passphrase, derivation):
k = keystore.BIP32_KeyStore({})
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
k.add_xprv_from_seed(bip32_seed, 0, derivation)
self.on_keystore(k)
def on_keystore(self, k):
if self.wallet_type == 'standard':
self.keystores.append(k)
self.run('create_wallet')
elif self.wallet_type == 'multisig':
if k.xpub in map(lambda x: x.xpub, self.keystores):
self.show_error(_('Error: duplicate master public key'))
self.run('choose_keystore')
return
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
self.run('create_wallet')
def create_wallet(self):
if any(k.may_have_password() for k in self.keystores):
self.request_password(run_next=self.on_password)
else:
self.on_password(None, False)
def on_password(self, password, encrypt):
self.storage.set_password(password, encrypt)
for k in self.keystores:
if k.may_have_password():
k.update_password(None, password)
if self.wallet_type == 'standard':
self.storage.put('seed_type', self.seed_type)
self.storage.put('keystore', k.dump())
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def add_cosigners(self, password, i):
self.add_cosigner_dialog(run_next=lambda x: self.on_cosigner(x, password, i), index=i, is_valid=keystore.is_xpub)
def on_cosigner(self, text, password, i):
k = keystore.from_keys(text, password)
self.on_keystore(k)
def create_seed(self):
import mnemonic
self.seed_type = 'segwit' if bitcoin.TESTNET and self.config.get('segwit') else 'standard'
seed = mnemonic.Mnemonic('en').make_seed(self.seed_type)
self.opt_bip39 = False
f = lambda x: self.request_passphrase(seed, x)
self.show_seed_dialog(run_next=f, seed_text=seed)
def request_passphrase(self, seed, opt_passphrase):
if opt_passphrase:
f = lambda x: self.confirm_seed(seed, x)
self.passphrase_dialog(run_next=f)
else:
self.run('confirm_seed', seed, '')
def confirm_seed(self, seed, passphrase):
f = lambda x: self.confirm_passphrase(seed, passphrase)
self.confirm_seed_dialog(run_next=f, test=lambda x: x==seed)
def confirm_passphrase(self, seed, passphrase):
f = lambda x: self.run('create_keystore', seed, x)
if passphrase:
title = _('Confirm Seed Extension')
message = '\n'.join([
_('Your seed extension must be saved together with your seed.'),
_('Please type it here.'),
])
self.line_dialog(run_next=f, title=title, message=message, default='', test=lambda x: x==passphrase)
else:
f('')
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait.")
self.waiting_dialog(task, msg)
| |
# testutils.py - utility module for psycopg2 testing.
#
# Copyright (C) 2010-2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# Use unittest2 if available. Otherwise mock a skip facility with warnings.
import re
import os
import sys
import select
import platform
from functools import wraps
from .testconfig import dsn, repl_dsn
try:
import unittest2
unittest = unittest2
except ImportError:
import unittest
unittest2 = None
if hasattr(unittest, 'skipIf'):
skip = unittest.skip
skipIf = unittest.skipIf
else:
import warnings
def skipIf(cond, msg):
def skipIf_(f):
@wraps(f)
def skipIf__(self):
if cond:
with warnings.catch_warnings():
warnings.simplefilter('always', UserWarning)
warnings.warn(msg)
return
else:
return f(self)
return skipIf__
return skipIf_
def skip(msg):
return skipIf(True, msg)
def skipTest(self, msg):
with warnings.catch_warnings():
warnings.simplefilter('always', UserWarning)
warnings.warn(msg)
return
unittest.TestCase.skipTest = skipTest
# Silence warnings caused by the stubbornness of the Python unittest
# maintainers
# http://bugs.python.org/issue9424
if (not hasattr(unittest.TestCase, 'assert_')
or unittest.TestCase.assertTrue is not unittest.TestCase.assertTrue):
# mavaff...
unittest.TestCase.assertTrue = unittest.TestCase.assertTrue
unittest.TestCase.assertTrue = unittest.TestCase.assertTrue
unittest.TestCase.assertEqual = unittest.TestCase.assertEqual
unittest.TestCase.assertEqual = unittest.TestCase.assertEqual
def assertDsnEqual(self, dsn1, dsn2, msg=None):
"""Check that two conninfo string have the same content"""
self.assertEqual(set(dsn1.split()), set(dsn2.split()), msg)
unittest.TestCase.assertDsnEqual = assertDsnEqual
class ConnectingTestCase(unittest.TestCase):
"""A test case providing connections for tests.
A connection for the test is always available as `self.conn`. Others can be
created with `self.connect()`. All are closed on tearDown.
Subclasses needing to customize setUp and tearDown should remember to call
the base class implementations.
"""
def setUp(self):
self._conns = []
def tearDown(self):
# close the connections used in the test
for conn in self._conns:
if not conn.closed:
conn.close()
def assertQuotedEqual(self, first, second, msg=None):
"""Compare two quoted strings disregarding eventual E'' quotes"""
def f(s):
if isinstance(s, str):
return re.sub(r"\bE'", "'", s)
elif isinstance(first, bytes):
return re.sub(br"\bE'", b"'", s)
else:
return s
return self.assertEqual(f(first), f(second), msg)
def connect(self, **kwargs):
try:
self._conns
except AttributeError as e:
raise AttributeError(
"%s (did you forget to call ConnectingTestCase.setUp()?)"
% e)
if 'dsn' in kwargs:
conninfo = kwargs.pop('dsn')
else:
conninfo = dsn
import psycopg2
conn = psycopg2.connect(conninfo, **kwargs)
self._conns.append(conn)
return conn
def repl_connect(self, **kwargs):
"""Return a connection set up for replication
The connection is on "PSYCOPG2_TEST_REPL_DSN" unless overridden by
a *dsn* kwarg.
Should raise a skip test if not available, but guard for None on
old Python versions.
"""
if repl_dsn is None:
return self.skipTest("replication tests disabled by default")
if 'dsn' not in kwargs:
kwargs['dsn'] = repl_dsn
import psycopg2
try:
conn = self.connect(**kwargs)
if conn.async_ == 1:
self.wait(conn)
except psycopg2.OperationalError as e:
# If pgcode is not set it is a genuine connection error
# Otherwise we tried to run some bad operation in the connection
# (e.g. bug #482) and we'd rather know that.
if e.pgcode is None:
return self.skipTest("replication db not configured: %s" % e)
else:
raise
return conn
def _get_conn(self):
if not hasattr(self, '_the_conn'):
self._the_conn = self.connect()
return self._the_conn
def _set_conn(self, conn):
self._the_conn = conn
conn = property(_get_conn, _set_conn)
# for use with async connections only
def wait(self, cur_or_conn):
import psycopg2.extensions
pollable = cur_or_conn
if not hasattr(pollable, 'poll'):
pollable = cur_or_conn.connection
while True:
state = pollable.poll()
if state == psycopg2.extensions.POLL_OK:
break
elif state == psycopg2.extensions.POLL_READ:
select.select([pollable], [], [], 10)
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [pollable], [], 10)
else:
raise Exception("Unexpected result from poll: %r", state)
def decorate_all_tests(cls, *decorators):
"""
Apply all the *decorators* to all the tests defined in the TestCase *cls*.
"""
for n in dir(cls):
if n.startswith('test'):
for d in decorators:
setattr(cls, n, d(getattr(cls, n)))
def skip_if_no_uuid(f):
"""Decorator to skip a test if uuid is not supported by Py/PG."""
@wraps(f)
def skip_if_no_uuid_(self):
try:
import uuid # noqa
except ImportError:
return self.skipTest("uuid not available in this Python version")
try:
cur = self.conn.cursor()
cur.execute("select typname from pg_type where typname = 'uuid'")
has = cur.fetchone()
finally:
self.conn.rollback()
if has:
return f(self)
else:
return self.skipTest("uuid type not available on the server")
return skip_if_no_uuid_
def skip_if_tpc_disabled(f):
"""Skip a test if the server has tpc support disabled."""
@wraps(f)
def skip_if_tpc_disabled_(self):
from psycopg2 import ProgrammingError
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("SHOW max_prepared_transactions;")
except ProgrammingError:
return self.skipTest(
"server too old: two phase transactions not supported.")
else:
mtp = int(cur.fetchone()[0])
cnn.close()
if not mtp:
return self.skipTest(
"server not configured for two phase transactions. "
"set max_prepared_transactions to > 0 to run the test")
return f(self)
return skip_if_tpc_disabled_
def skip_if_no_namedtuple(f):
@wraps(f)
def skip_if_no_namedtuple_(self):
try:
from collections import namedtuple # noqa
except ImportError:
return self.skipTest("collections.namedtuple not available")
else:
return f(self)
return skip_if_no_namedtuple_
def skip_if_no_iobase(f):
"""Skip a test if io.TextIOBase is not available."""
@wraps(f)
def skip_if_no_iobase_(self):
try:
from io import TextIOBase # noqa
except ImportError:
return self.skipTest("io.TextIOBase not found.")
else:
return f(self)
return skip_if_no_iobase_
def skip_before_postgres(*ver):
"""Skip a test on PostgreSQL before a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_before_postgres_(f):
@wraps(f)
def skip_before_postgres__(self):
if self.conn.server_version < int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_before_postgres__
return skip_before_postgres_
def skip_after_postgres(*ver):
"""Skip a test on PostgreSQL after (including) a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_after_postgres_(f):
@wraps(f)
def skip_after_postgres__(self):
if self.conn.server_version >= int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_after_postgres__
return skip_after_postgres_
def libpq_version():
import psycopg2
v = psycopg2.__libpq_version__
if v >= 90100:
v = psycopg2.extensions.libpq_version()
return v
def skip_before_libpq(*ver):
"""Skip a test if libpq we're linked to is older than a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_before_libpq_(f):
@wraps(f)
def skip_before_libpq__(self):
v = libpq_version()
if v < int("%d%02d%02d" % ver):
return self.skipTest("skipped because libpq %d" % v)
else:
return f(self)
return skip_before_libpq__
return skip_before_libpq_
def skip_after_libpq(*ver):
"""Skip a test if libpq we're linked to is newer than a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_after_libpq_(f):
@wraps(f)
def skip_after_libpq__(self):
v = libpq_version()
if v >= int("%d%02d%02d" % ver):
return self.skipTest("skipped because libpq %s" % v)
else:
return f(self)
return skip_after_libpq__
return skip_after_libpq_
def skip_before_python(*ver):
"""Skip a test on Python before a certain version."""
def skip_before_python_(f):
@wraps(f)
def skip_before_python__(self):
if sys.version_info[:len(ver)] < ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_before_python__
return skip_before_python_
def skip_from_python(*ver):
"""Skip a test on Python after (including) a certain version."""
def skip_from_python_(f):
@wraps(f)
def skip_from_python__(self):
if sys.version_info[:len(ver)] >= ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_from_python__
return skip_from_python_
def skip_if_no_superuser(f):
"""Skip a test if the database user running the test is not a superuser"""
@wraps(f)
def skip_if_no_superuser_(self):
from psycopg2 import ProgrammingError
try:
return f(self)
except ProgrammingError as e:
import psycopg2.errorcodes
if e.pgcode == psycopg2.errorcodes.INSUFFICIENT_PRIVILEGE:
self.skipTest("skipped because not superuser")
else:
raise
return skip_if_no_superuser_
def skip_if_green(reason):
def skip_if_green_(f):
@wraps(f)
def skip_if_green__(self):
from .testconfig import green
if green:
return self.skipTest(reason)
else:
return f(self)
return skip_if_green__
return skip_if_green_
skip_copy_if_green = skip_if_green("copy in async mode currently not supported")
def skip_if_no_getrefcount(f):
@wraps(f)
def skip_if_no_getrefcount_(self):
if not hasattr(sys, 'getrefcount'):
return self.skipTest('skipped, no sys.getrefcount()')
else:
return f(self)
return skip_if_no_getrefcount_
def skip_if_windows(f):
"""Skip a test if run on windows"""
@wraps(f)
def skip_if_windows_(self):
if platform.system() == 'Windows':
return self.skipTest("Not supported on Windows")
else:
return f(self)
return skip_if_windows_
def script_to_py3(script):
"""Convert a script to Python3 syntax if required."""
if sys.version_info[0] < 3:
return script
import tempfile
f = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
f.write(script.encode())
f.flush()
filename = f.name
f.close()
# 2to3 is way too chatty
import logging
logging.basicConfig(filename=os.devnull)
from lib2to3.main import main
if main("lib2to3.fixes", ['--no-diffs', '-w', '-n', filename]):
raise Exception('py3 conversion failed')
f2 = open(filename)
try:
return f2.read()
finally:
f2.close()
os.remove(filename)
class py3_raises_typeerror(object):
def __enter__(self):
pass
def __exit__(self, type, exc, tb):
if sys.version_info[0] >= 3:
assert type is TypeError
return True
def slow(f):
"""Decorator to mark slow tests we may want to skip
Note: in order to find slow tests you can run:
make check 2>&1 | ts -i "%.s" | sort -n
"""
@wraps(f)
def slow_(self):
if os.environ.get('PSYCOPG2_TEST_FAST'):
return self.skipTest("slow test")
return f(self)
return slow_
| |
#!/usr/bin/python
#
# xikij.py
#
# this is a xikij shell, for being executed on other side of
# ssh connection, for keeping a connection open and run things
# there
#
import sys, json, os, uuid, subprocess, re
from types import GeneratorType
PY3 = sys.version_info[0] >= 3
def exec_code(code, globals=None, locals=None):
"""Homogeneous interface for python2 and python3 to execute code
:param code:
code to execute
:param globals:
optional globals dictionary
:param locals:
optional locals dictionary
"""
if PY3:
import builtins
getattr(builtins, 'exec')(code, globals, locals)
# exec(code, globals, locals)
else:
exec(code, globals, locals)
from threading import Thread, Condition, Lock
THREADS = {}
def shellRequest(request):
"""Start a new request thread"""
THREADS[request['req']] = t = ShellRequestThread(request)
t.start()
def xikijRequest(request):
THREADS[request['req']] = t = XikijShellRequestThread(request)
t.start()
class ProcessProvider:
def __init__(self, process, encoding):
self.process = process
self.encoding = encoding
self.doing_input = False
def start_input(self, reqthread):
if self.doing_input: return
self.doing_input = True
def consume_input():
for input in reqthread:
self.process.stdin.write(input)
self.process.stdin.close()
Thread(target=consume_input).start()
def output(self, reqthread, request):
p = self.process
encoding = self.encoding
line_bundle = 50
# we rather bundle some lines to a package, then doing the
# protocol overhead for each single line
def _output(**kargs):
return output(res=request['req'], **kargs)
def consume_output(stream, name):
i = 0
lines = ''
for i, line in enumerate(iter(stream.readline,'')):
if isinstance(line, bytes):
line = line.decode(encoding) # .replace('\r', '')
# TODO maybe fire a funcion on timeout, which will provide data
# earlier than 10 lines are collected, if it takes longer
lines += line
if i % line_bundle == 0:
_output(chl=name, cnk=lines)
lines = ''
if p.poll() is not None:
break
data = stream.read()
if isinstance(data, bytes):
data = data.decode(encoding)
if data:
lines += data
if lines:
_output(chl=name, cnk=lines)
#def
_output(process=p.pid)
stdout_handler = Thread(target=consume_output, args=(p.stdout, 'stdout'))
stderr_handler = Thread(target=consume_output, args=(p.stderr, 'stderr'))
# stdin_handler = Thread(target=provide_input, args=(p.stdin, 'stdin'))
stdout_handler.start()
stderr_handler.start()
stdout_handler.join()
stderr_handler.join()
_output(exit=p.wait())
outputLock = Lock()
modules = {}
def output(*args, **kargs):
if len(args):
object = args[0]
else:
object = kargs
try:
outputLock.acquire()
sys.stdout.write(json.dumps(object)+"\n")
sys.stdout.flush()
finally:
outputLock.release()
class XikijShell:
"""
This is a shell to xikij methods. It provides xikij API to python
menu files.
"""
ctx = None
# def __init__(self, ctx=None):
# self.ctx = ctx
def __getattr__(self, name):
def _request(*args, **kargs):
"""ctx is a request handle to a running request. it is used to this can be a normal request handle, which
resolves to ctx"""
ctx = kargs.get('ctx', self.ctx)
if ctx is not None:
xikijRequest({'req': str(uuid.uuidv4()), 'cmd': name, 'args': args, 'ctx': ctx})
else:
xikijRequest({'req': str(uuid.uuidv4()), 'cmd': name, 'args': args})
return _request
class ContextShell(XikijShell):
"""Wrap a context, which can be passed to a module function
"""
def __init__(self, ctx):
self.ctx = ctx
xikij = XikijShell()
class Shell:
"""
Methods of this class will be called from Xikij to have a remote
shell (with xikij API) e.g. on a foreign machine.
"""
def readDir(self, path):
return [ os.path.isdir(p) and p+"/" or p for p in os.listdir(path) ]
def exists(self, path):
return os.path.exists(path)
# create a stream
def openFile(self, path):
fh = open(path, 'rb')
while True:
chunk = fh.read(8192)
if not chunk: break
yield chunk
fh.close()
#def writeFile(sel,f ):
def isDirectory(self, path):
return os.path.isdir(path)
def respond(self, uuid, type, value):
try:
self.iolock.acquire()
sys.stdout.write(json.dumps({'response': uuid, type: value})+"\n")
sys.stdout.flush()
finally:
self.iolock.release()
def registerModule(self, data, content):
import imp
modName = data['moduleName'].replace('-', '_').replace("/", ".")
m = imp.new_module(modName)
m.__file__ = data['fileName']
m.os = os
m.re = re
m.sys = sys
#m.xikij = Xikij
code = compile(content, filename=data['fileName'], mode="exec")
exec_code(code, m.__dict__)
modules[data['moduleName']] = m
result = data.copy()
result.update({'callables': [], 'contexts': []})
#import rpdb2 ; rpdb2.start_embedded_('foo')
for entry in dir(m):
if entry in ('__file__', 'os', 're', 'sys', 'xikij'): continue
if callable(getattr(m, entry)):
result['callables'].append(entry)
# if entry describes a context, also add it to contexts
return result
def updateModule(self, data, content):
self.registerModule(data, content)
def moduleRun(self, moduleName, method, request):
func = getattr(modules[moduleName], method)
argcount = func.__code__.co_argcount
if argcount == 0:
args = []
if argcount == 2:
args = (args[0], ContextShell(self.request['req']))
#context, request = args
return getattr(modules[moduleName], method)(*args)
def shellExpand(self, string):
return os.path.expandvars(string)
def dirExpand(self, string): return os.path.exanduser(string)
def execute(self, *args):
opts = {}
if isinstance(args[-1], dict):
opts = args[-1]
args = args[:-1]
kwargs = {}
kwargs['cwd'] = cwd = opts.get('cwd', None)
if cwd is None:
kwargs['cwd'] = xikij.getCwd()
stdin = None
if 'input' in opts:
if opts['input'] is not None:
stdin = opts['input']
kwargs['stdin'] = subprocess.PIPE
encoding = 'utf-8'
if 'encoding' in opts:
encoding = opts['encoding']
if subprocess.mswindows:
su = subprocess.STARTUPINFO()
su.dwFlags |= subprocess.STARTF_USESHOWWINDOW
su.wShowWindow = subprocess.SW_HIDE
kwargs['startupinfo'] = su
p = subprocess.Popen(list(args),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
**kwargs
)
if stdin:
import errno
try:
p.stdin.write(stdin.encode(encoding))
except IOError as e:
if e.errno != errno.EPIPE:
raise
p.stdin.close()
return ProcessProvider(p, encoding)
class XikijRequestThread(Thread):
def __init__(self, request, inputAvailable=None):
"""
request is a simple object with keys:
- xikij: which function to call
- context: a context may be provided
- args: args for function to call, may be an array or dict
- request: a uuid for request
if you are making a request, rather than handling one, you
have to pass inputAvailable condition. As soon it is notified,
you can get input from thread
"""
Thread.__init__(self)
self.request = request
self.inputAvailable = inputAvailable
self.inputDone = None
if inputAvailable:
self.awaitingInput = True
self.inputConsumed = Condition()
def request(self, function, *args):
xikijRequest({'xikij': function, 'args': args})
def __iter__(self):
if self.inputAvailable is None:
self.inputAvailable = Condition()
self.inputConsumed = Condition()
self.awaitingInput = True
while True:
with self.inputAvailable:
self.inputAvailable.wait()
yield self.input
self.inputConsumed.notify()
if not self.awaitingInput: break
def input(self, request):
if self.inputAvailable is None:
self.inputAvailable = Condition()
self.inputConsumed = Condition()
if self.process:
self.process.start_input(self)
if self.inputDone is None:
self.inputDone = Condition()
with self.inputAvailable:
if 'cnk' in request:
self.input = request['cnk']
elif 'ret' in request:
self.input = request['ret']
elif 'input' in request:
self.input = request['input']
else:
self.input = ''
self.inputAvailable.notify()
self.inputConsumed.wait()
if 'ret' in request or not self.input:
self.awaitingInput = False
with self.inputDone:
self.inputDone.notify()
def output(self, object):
output(object)
def registerMenu(self, path):
createMenu(path)
def runMenu(self, menu):
self.menus[menu]
def run(self):
try:
# handle a request to xikij server
if 'req' not in self.request:
self.inputDone = Condition()
self.request['req'] = str(uuid.uuid4())
self.output(self.request)
with self.inputDone:
self.inputDone.wait()
return
# handle a request from xikij server
else:
request = self.request
attr = request['cmd']
args = request.get('args', [])
if isinstance(args, list):
result = getattr(self, attr)(*args)
else:
result = getattr(self, attr)(**args)
if isinstance(result, ProcessProvider):
result.output(self, request)
self.process = result
elif isinstance(result, GeneratorType):
for part in result:
self.output({'res': request['req'], 'size': part.length, 'cnk': part})
self.output({'res': request['req']})
else:
self.output({'res': request['req'], 'ret': result})
except Exception as e:
import traceback
self.output({'res': request['req'], 'error': str(e), 'stack': traceback.format_exc()})
class ShellRequestThread(XikijRequestThread, Shell): pass
class XikijShellRequestThread(XikijRequestThread, XikijShell): pass
def chat():
"""Dispatch incoming requests and responses.
"""
while True:
# get next request from STDIN
line = sys.stdin.readline()
if not line: break
# unpack json
request = json.loads(line)
if 'res' in request:
# dispatch response
THREADS[request['res']].respond(request)
else:
# handle requests
if request['cmd'] == 'exit':
for uuid in THREADS.keys():
THREADS[uuid].join()
output({'res': request['req'], 'ret': 'exited'})
sys.exit(0)
# if request already in process, this is stream input
elif request['req'] in THREADS:
assert 'input' in request
THREADS[request['req']].input(request['input'])
# normal request
else:
shellRequest(request)
# harvest loose threads
for uuid in THREADS.keys():
if not THREADS[uuid].isAlive():
del THREADS[uuid]
# wait for threads to finish
for uuid in THREADS.keys():
THREADS[uuid].join()
if __name__ == "__main__":
chat()
| |
import datetime as dt
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from etcetera.extras.search import get_query
from etcetera.structure import forms as stforms
from etcetera.structure import models as structure
def count_checkouts(ou):
checkout_count = ou.checkouts.count()
for kid in ou.children.all():
checkout_count += count_checkouts(kid)
return checkout_count
def count_workorders(ou):
workorder_count = ou.workorders.count()
for kid in ou.children.all():
workorder_count += count_workorders(kid)
return workorder_count
@login_required
def index(request, structure_kind='buildings'):
if not structure_kind in ['buildings','departments','campuses',]: raise Http404
paged_objects = None
q = None
form = stforms.SearchForm()
if request.GET and request.GET.get('q'):
form = stforms.SearchForm(request.GET)
if form.is_valid():
data = form.cleaned_data
if data['q'] and data['q'].strip():
# This sends a query to search middleware, if such
# query exists. It gets a Q object back which is used
# in a Django filter to get our queryset.
structure_query = get_query(
data['q'],
form.get_list(structure_kind)
)
if structure_kind == 'buildings':
paged_objects = structure.Building.objects.filter(
structure_query
)
if structure_kind == 'departments':
paged_objects = structure.OrganizationalUnit.objects.filter(
structure_query
)
if structure_kind == 'campuses':
paged_objects = structure.Campus.objects.filter(
structure_query
)
q = data['q']
else:
if structure_kind == 'buildings':
paged_objects = structure.Building.objects.all()
if structure_kind == 'departments':
paged_objects = structure.OrganizationalUnit.objects.all()
if structure_kind == 'campuses':
paged_objects = structure.Campus.objects.all()
# Repackage everything into paged_objects using Paginator.
paginator = Paginator(paged_objects, 20)
# Make sure the page request is an int -- if not, then deliver page 1.
try:
page = int(request.GET.get('page','1'))
except ValueError:
page = 1
# If the page request is out of range, deliver the last page of results.
try:
paged_objects = paginator.page(page)
except (EmptyPage, InvalidPage):
paged_objects = paginator.page(paginator.num_pages)
# Bundle everything into the context and send it out.
context = {
'paged_objects': paged_objects,
'object_list': paged_objects.object_list,
'form': form,
'view_type': structure_kind,
'q': q,
}
return render_to_response(
"structure/index.html",
context,
context_instance=RequestContext(request)
)
def detail(request, slug=None, abbreviation=None, object_id=None, room=None):
# In the future: redo this with annotations.
view_type = None
stru_obj = None
if abbreviation:
# Get our building and let the context know it's a building
stru_obj = get_object_or_404(
structure.Building,
abbreviation=abbreviation.upper()
)
view_type = 'buildings'
# Building-specific information retrieval
stru_obj.equipment_installed = stru_obj.equipments.filter(
status='installed'
)
elif object_id:
# Get our department and let the context know it's a department
stru_obj = get_object_or_404(
structure.OrganizationalUnit,
pk=object_id
)
view_type = 'departments'
elif slug:
# Get our campus and let the context know it's a campus
stru_obj = get_object_or_404(
structure.Campus,
slug=slug
)
view_type = 'campuses'
if room:
stru_obj.room_checkouts = stru_obj.checkouts.filter(room=room)
stru_obj.room_workorders = stru_obj.workorders.filter(room=room)
stru_obj.checkouts.active = stru_obj.checkouts.active().filter(
room=room
)
stru_obj.workorders.active = stru_obj.workorders.active().filter(
room=room
)
stru_obj.equipment_installed = stru_obj.equipment_installed.filter(
room=room
)
# Call a custom function that gives us back a graph URL in a string; soon.
context = {
'object': stru_obj,
'view_type': view_type,
'room': room,
}
return render_to_response(
"structure/detail.html",
context,
context_instance=RequestContext(request)
)
@login_required
def edit(request, slug=None, abbreviation=None, object_id=None):
view_type = None
stru_obj = None
form = None
return_reverse = None
if abbreviation:
# Get our building
stru_obj = get_object_or_404(
structure.Building,
abbreviation=abbreviation.upper()
)
form = stforms.BuildingModelForm(instance=stru_obj)
view_type = 'buildings'
elif object_id:
stru_obj = get_object_or_404(
structure.OrganizationalUnit,
pk=object_id
)
form = stforms.OrganizationalUnitModelForm(instance=stru_obj)
view_type = 'departments'
elif slug:
stru_obj = get_object_or_404(
structure.Campus,
slug=slug
)
form = stforms.CampusModelForm(instance=stru_obj)
view_type = 'campuses'
if request.method == 'POST':
if abbreviation:
form = stforms.BuildingModelForm(request.POST, instance=stru_obj)
elif object_id:
form = stforms.OrganizationalUnitModelForm(
request.POST, instance=stru_obj
)
elif slug:
form = stforms.CampusModelForm(
request.POST, instance=stru_obj
)
if form.is_valid():
stru_obj = form.save()
if abbreviation:
return_reverse = reverse(
'building-detail',
args=(stru_obj.abbreviation,),
)
elif object_id:
return_reverse = reverse(
'organizationalunit-detail',
args=(stru_obj.id,),
)
elif slug:
return_reverse = reverse(
'campus-detail',
args=(stru_obj.slug,),
)
return HttpResponseRedirect(return_reverse)
context = {
'object': stru_obj,
'view_type': view_type,
'form': form,
}
return render_to_response(
"structure/edit.html",
context,
context_instance=RequestContext(request)
)
@login_required
def new(request, structure_kind):
if structure_kind not in ['buildings','departments','campuses',]: raise Http404
form = None
return_reverse = None
if structure_kind == 'buildings':
form = stforms.BuildingModelForm()
elif structure_kind == 'departments':
form = stforms.OrganizationalUnitModelForm()
elif structure_kind == 'campuses':
form = stforms.CampusModelForm()
if request.method == 'POST':
if structure_kind == 'buildings':
form = stforms.BuildingModelForm(request.POST)
elif structure_kind == 'departments':
form = stforms.OrganizationalUnitModelForm(request.POST)
elif structure_kind == 'campuses':
form = stforms.CampusModelForm(request.POST)
if form.is_valid():
stru_obj = form.save()
if structure_kind == 'buildings':
return_reverse = reverse(
'building-detail',
args=(stru_obj.abbreviation,),
)
elif structure_kind == 'departments':
return_reverse = reverse(
'organizationalunit-detail',
args=(stru_obj.id,),
)
elif structure_kind == 'campuses':
return_reverse = reverse(
'campus-detail',
args=(stru_obj.slug,),
)
return HttpResponseRedirect(return_reverse)
context = {
'form': form,
'view_type': structure_kind,
}
return render_to_response(
"structure/edit.html",
context,
context_instance=RequestContext(request)
)
| |
# coding=utf-8
"""
Send metrics to a [graphite](http://graphite.wikidot.com/) using the default
interface.
Graphite is an enterprise-scale monitoring tool that runs well on cheap
hardware. It was originally designed and written by Chris Davis at Orbitz in
2006 as side project that ultimately grew to be a foundational monitoring tool.
In 2008, Orbitz allowed Graphite to be released under the open source Apache
2.0 license. Since then Chris has continued to work on Graphite and has
deployed it at other companies including Sears, where it serves as a pillar of
the e-commerce monitoring system. Today many
[large companies](http://graphite.readthedocs.org/en/latest/who-is-using.html)
use it.
"""
from Handler import Handler
import socket
class GraphiteHandler(Handler):
"""
Implements the abstract Handler class, sending data to graphite
"""
def __init__(self, config=None):
"""
Create a new instance of the GraphiteHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Data
self.socket = None
# Initialize Options
self.proto = self.config['proto'].lower().strip()
self.host = self.config['host']
self.port = int(self.config['port'])
self.timeout = float(self.config['timeout'])
self.keepalive = bool(self.config['keepalive'])
self.keepaliveinterval = int(self.config['keepaliveinterval'])
self.batch_size = int(self.config['batch'])
self.max_backlog_multiplier = int(
self.config['max_backlog_multiplier'])
self.trim_backlog_multiplier = int(
self.config['trim_backlog_multiplier'])
self.flow_info = self.config['flow_info']
self.scope_id = self.config['scope_id']
self.metrics = []
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GraphiteHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'proto': 'udp, udp4, udp6, tcp, tcp4, or tcp6',
'timeout': '',
'batch': 'How many to store before sending to the graphite server',
'max_backlog_multiplier': 'how many batches to store before trimming', # NOQA
'trim_backlog_multiplier': 'Trim down how many batches',
'keepalive': 'Enable keepalives for tcp streams',
'keepaliveinterval': 'How frequently to send keepalives',
'flow_info': 'IPv6 Flow Info',
'scope_id': 'IPv6 Scope ID',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphiteHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
'keepalive': 0,
'keepaliveinterval': 10,
'flow_info': 0,
'scope_id': 0,
})
return config
def __del__(self):
"""
Destroy instance of the GraphiteHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to graphite
"""
# Append the data to the array as a string
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
self._send()
def flush(self):
"""Flush metrics in queue"""
self._send()
def _send_data(self, data):
"""
Try to send all data in buffer.
"""
try:
self.socket.sendall(data)
self._reset_errors()
except:
self._close()
self._throttle_error("GraphiteHandler: Socket error, "
"trying reconnect.")
self._connect()
try:
self.socket.sendall(data)
except:
return
self._reset_errors()
def _send(self):
"""
Send data to graphite. Data that can not be sent will be queued.
"""
# Check to see if we have a valid socket. If not, try to connect.
try:
try:
if self.socket is None:
self.log.debug("GraphiteHandler: Socket is not connected. "
"Reconnecting.")
self._connect()
if self.socket is None:
self.log.debug("GraphiteHandler: Reconnect failed.")
else:
# Send data to socket
self._send_data(''.join(self.metrics))
self.metrics = []
except Exception:
self._close()
self._throttle_error("GraphiteHandler: Error sending metrics.")
raise
finally:
if len(self.metrics) >= (
self.batch_size * self.max_backlog_multiplier):
trim_offset = (self.batch_size *
self.trim_backlog_multiplier * -1)
self.log.warn('GraphiteHandler: Trimming backlog. Removing' +
' oldest %d and keeping newest %d metrics',
len(self.metrics) - abs(trim_offset),
abs(trim_offset))
self.metrics = self.metrics[trim_offset:]
def _connect(self):
"""
Connect to the graphite server
"""
if (self.proto == 'udp'):
stream = socket.SOCK_DGRAM
else:
stream = socket.SOCK_STREAM
if (self.proto[-1] == '4'):
family = socket.AF_INET
connection_struct = (self.host, self.port)
elif (self.proto[-1] == '6'):
family = socket.AF_INET6
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
connection_struct = (self.host, self.port)
try:
addrinfo = socket.getaddrinfo(self.host, self.port, 0, stream)
except socket.gaierror, ex:
self.log.error("GraphiteHandler: Error looking up graphite host"
" '%s' - %s",
self.host, ex)
return
if (len(addrinfo) > 0):
family = addrinfo[0][0]
if (family == socket.AF_INET6):
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
family = socket.AF_INET
# Create socket
self.socket = socket.socket(family, stream)
if self.socket is None:
# Log Error
self.log.error("GraphiteHandler: Unable to create socket.")
# Close Socket
self._close()
return
# Enable keepalives?
if self.proto != 'udp' and self.keepalive:
self.log.error("GraphiteHandler: Setting socket keepalives...")
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
# Set socket timeout
self.socket.settimeout(self.timeout)
# Connect to graphite server
try:
self.socket.connect(connection_struct)
# Log
self.log.debug("GraphiteHandler: Established connection to "
"graphite server %s:%d.",
self.host, self.port)
except Exception, ex:
# Log Error
self._throttle_error("GraphiteHandler: Failed to connect to "
"%s:%i. %s.", self.host, self.port, ex)
# Close Socket
self._close()
return
def _close(self):
"""
Close the socket
"""
if self.socket is not None:
self.socket.close()
self.socket = None
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from unittest import mock
import boto3
import pytest
from airflow.models import Connection
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
try:
from moto import mock_dynamodb2, mock_emr, mock_iam, mock_sts
except ImportError:
mock_emr = None
mock_dynamodb2 = None
mock_sts = None
mock_iam = None
class TestAwsBaseHook(unittest.TestCase):
@unittest.skipIf(mock_emr is None, 'mock_emr package not present')
@mock_emr
def test_get_client_type_returns_a_boto3_client_of_the_requested_type(self):
client = boto3.client('emr', region_name='us-east-1')
if client.list_clusters()['Clusters']:
raise ValueError('AWS not properly mocked')
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='emr')
client_from_hook = hook.get_client_type('emr')
assert client_from_hook.list_clusters()['Clusters'] == []
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_resource_type_returns_a_boto3_resource_of_the_requested_type(self):
hook = AwsBaseHook(aws_conn_id='aws_default', resource_type='dynamodb')
resource_from_hook = hook.get_resource_type('dynamodb')
# this table needs to be created in production
table = resource_from_hook.create_table( # pylint: disable=no-member
TableName='test_airflow',
KeySchema=[
{'AttributeName': 'id', 'KeyType': 'HASH'},
],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10},
)
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
assert table.item_count == 0
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_session_returns_a_boto3_session(self):
hook = AwsBaseHook(aws_conn_id='aws_default', resource_type='dynamodb')
session_from_hook = hook.get_session()
resource_from_session = session_from_hook.resource('dynamodb')
table = resource_from_session.create_table( # pylint: disable=no-member
TableName='test_airflow',
KeySchema=[
{'AttributeName': 'id', 'KeyType': 'HASH'},
],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10},
)
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
assert table.item_count == 0
@mock.patch.object(AwsBaseHook, 'get_connection')
def test_get_credentials_from_login_with_token(self, mock_get_connection):
mock_connection = Connection(
login='aws_access_key_id',
password='aws_secret_access_key',
extra='{"aws_session_token": "test_token"}',
)
mock_get_connection.return_value = mock_connection
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='airflow_test')
credentials_from_hook = hook.get_credentials()
assert credentials_from_hook.access_key == 'aws_access_key_id'
assert credentials_from_hook.secret_key == 'aws_secret_access_key'
assert credentials_from_hook.token == 'test_token'
@mock.patch.object(AwsBaseHook, 'get_connection')
def test_get_credentials_from_login_without_token(self, mock_get_connection):
mock_connection = Connection(
login='aws_access_key_id',
password='aws_secret_access_key',
)
mock_get_connection.return_value = mock_connection
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='spam')
credentials_from_hook = hook.get_credentials()
assert credentials_from_hook.access_key == 'aws_access_key_id'
assert credentials_from_hook.secret_key == 'aws_secret_access_key'
assert credentials_from_hook.token is None
@mock.patch.object(AwsBaseHook, 'get_connection')
def test_get_credentials_from_extra_with_token(self, mock_get_connection):
mock_connection = Connection(
extra='{"aws_access_key_id": "aws_access_key_id",'
'"aws_secret_access_key": "aws_secret_access_key",'
' "aws_session_token": "session_token"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='airflow_test')
credentials_from_hook = hook.get_credentials()
assert credentials_from_hook.access_key == 'aws_access_key_id'
assert credentials_from_hook.secret_key == 'aws_secret_access_key'
assert credentials_from_hook.token == 'session_token'
@mock.patch.object(AwsBaseHook, 'get_connection')
def test_get_credentials_from_extra_without_token(self, mock_get_connection):
mock_connection = Connection(
extra='{"aws_access_key_id": "aws_access_key_id",'
'"aws_secret_access_key": "aws_secret_access_key"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='airflow_test')
credentials_from_hook = hook.get_credentials()
assert credentials_from_hook.access_key == 'aws_access_key_id'
assert credentials_from_hook.secret_key == 'aws_secret_access_key'
assert credentials_from_hook.token is None
@mock.patch(
'airflow.providers.amazon.aws.hooks.base_aws._parse_s3_config',
return_value=('aws_access_key_id', 'aws_secret_access_key'),
)
@mock.patch.object(AwsBaseHook, 'get_connection')
def test_get_credentials_from_extra_with_s3_config_and_profile(
self, mock_get_connection, mock_parse_s3_config
):
mock_connection = Connection(
extra='{"s3_config_format": "aws", '
'"profile": "test", '
'"s3_config_file": "aws-credentials", '
'"region_name": "us-east-1"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='airflow_test')
hook._get_credentials(region_name=None)
mock_parse_s3_config.assert_called_once_with('aws-credentials', 'aws', 'test')
@unittest.skipIf(mock_sts is None, 'mock_sts package not present')
@mock.patch.object(AwsBaseHook, 'get_connection')
@mock_sts
def test_get_credentials_from_role_arn(self, mock_get_connection):
mock_connection = Connection(extra='{"role_arn":"arn:aws:iam::123456:role/role_arn"}')
mock_get_connection.return_value = mock_connection
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='airflow_test')
credentials_from_hook = hook.get_credentials()
assert "ASIA" in credentials_from_hook.access_key
# We assert the length instead of actual values as the values are random:
# Details: https://github.com/spulec/moto/commit/ab0d23a0ba2506e6338ae20b3fde70da049f7b03
assert 20 == len(credentials_from_hook.access_key)
assert 40 == len(credentials_from_hook.secret_key)
assert 356 == len(credentials_from_hook.token)
def test_get_credentials_from_gcp_credentials(self):
mock_connection = Connection(
extra=json.dumps(
{
"role_arn": "arn:aws:iam::123456:role/role_arn",
"assume_role_method": "assume_role_with_web_identity",
"assume_role_with_web_identity_federation": 'google',
"assume_role_with_web_identity_federation_audience": 'aws-federation.airflow.apache.org',
}
)
)
# Store original __import__
orig_import = __import__
mock_id_token_credentials = mock.Mock()
def import_mock(name, *args):
if name == 'airflow.providers.google.common.utils.id_token_credentials':
return mock_id_token_credentials
return orig_import(name, *args)
with mock.patch('builtins.__import__', side_effect=import_mock), mock.patch.dict(
'os.environ', AIRFLOW_CONN_AWS_DEFAULT=mock_connection.get_uri()
), mock.patch('airflow.providers.amazon.aws.hooks.base_aws.boto3') as mock_boto3, mock.patch(
'airflow.providers.amazon.aws.hooks.base_aws.botocore'
) as mock_botocore, mock.patch(
'airflow.providers.amazon.aws.hooks.base_aws.botocore.session'
) as mock_session:
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='airflow_test')
credentials_from_hook = hook.get_credentials()
mock_get_credentials = mock_boto3.session.Session.return_value.get_credentials
assert (
mock_get_credentials.return_value.get_frozen_credentials.return_value == credentials_from_hook
)
mock_boto3.assert_has_calls(
[
mock.call.session.Session(
aws_access_key_id=None,
aws_secret_access_key=None,
aws_session_token=None,
region_name=None,
),
mock.call.session.Session()._session.__bool__(),
mock.call.session.Session(
botocore_session=mock_session.Session.return_value,
region_name=mock_boto3.session.Session.return_value.region_name,
),
mock.call.session.Session().get_credentials(),
mock.call.session.Session().get_credentials().get_frozen_credentials(),
]
)
mock_fetcher = mock_botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher
mock_botocore.assert_has_calls(
[
mock.call.credentials.AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=mock_boto3.session.Session.return_value._session.create_client,
extra_args={},
role_arn='arn:aws:iam::123456:role/role_arn',
web_identity_token_loader=mock.ANY,
),
mock.call.credentials.DeferredRefreshableCredentials(
method='assume-role-with-web-identity',
refresh_using=mock_fetcher.return_value.fetch_credentials,
time_fetcher=mock.ANY,
),
]
)
mock_session.assert_has_calls([mock.call.Session()])
mock_id_token_credentials.assert_has_calls(
[mock.call.get_default_id_token_credentials(target_audience='aws-federation.airflow.apache.org')]
)
@unittest.skipIf(mock_iam is None, 'mock_iam package not present')
@mock_iam
def test_expand_role(self):
conn = boto3.client('iam', region_name='us-east-1')
conn.create_role(RoleName='test-role', AssumeRolePolicyDocument='some policy')
hook = AwsBaseHook(aws_conn_id='aws_default', client_type='airflow_test')
arn = hook.expand_role('test-role')
expect_arn = conn.get_role(RoleName='test-role').get('Role').get('Arn')
assert arn == expect_arn
def test_use_default_boto3_behaviour_without_conn_id(self):
for conn_id in (None, ''):
hook = AwsBaseHook(aws_conn_id=conn_id, client_type='s3')
# should cause no exception
hook.get_client_type('s3')
class ThrowErrorUntilCount:
"""Holds counter state for invoking a method several times in a row."""
def __init__(self, count, quota_retry, **kwargs):
self.counter = 0
self.count = count
self.retry_args = quota_retry
self.kwargs = kwargs
self.log = None
def __call__(self):
"""
Raise an Forbidden until after count threshold has been crossed.
Then return True.
"""
if self.counter < self.count:
self.counter += 1
raise Exception()
return True
def _always_true_predicate(e: Exception): # pylint: disable=unused-argument
return True
@AwsBaseHook.retry(_always_true_predicate)
def _retryable_test(thing):
return thing()
def _always_false_predicate(e: Exception): # pylint: disable=unused-argument
return False
@AwsBaseHook.retry(_always_false_predicate)
def _non_retryable_test(thing):
return thing()
class TestRetryDecorator(unittest.TestCase): # ptlint: disable=invalid-name
def test_do_nothing_on_non_exception(self):
result = _retryable_test(lambda: 42)
assert result, 42
def test_retry_on_exception(self):
quota_retry = {
'stop_after_delay': 2,
'multiplier': 1,
'min': 1,
'max': 10,
}
custom_fn = ThrowErrorUntilCount(
count=2,
quota_retry=quota_retry,
)
result = _retryable_test(custom_fn)
assert custom_fn.counter == 2
assert result
def test_no_retry_on_exception(self):
quota_retry = {
'stop_after_delay': 2,
'multiplier': 1,
'min': 1,
'max': 10,
}
custom_fn = ThrowErrorUntilCount(
count=2,
quota_retry=quota_retry,
)
with pytest.raises(Exception):
_non_retryable_test(custom_fn)
def test_raise_exception_when_no_retry_args(self):
custom_fn = ThrowErrorUntilCount(count=2, quota_retry=None)
with pytest.raises(Exception):
_retryable_test(custom_fn)
| |
#!/usr/bin/env python3
"""
Makes pictures for quality assurance of fmri data and pastes them
together into a html pages.
Usage:
cifti_vis_fmri snaps [options] <task_label> <subject>
cifti_vis_fmri subject [options] <task_label> <subject>
cifti_vis_fmri index [options]
Arguments:
<task_label> NameOffMRI argument given during ciftify_subject_fmri
<subject> Subject ID to process
Options:
--qcdir PATH Full path to location of QC directory
--ciftify-work-dir PATH The directory for HCP subjects (overrides
CIFTIFY_WORKDIR/ HCP_DATA enivironment variables)
--SmoothingFWHM FWHM SmoothingFWHM argument given during ciftify_subject_fmri
--smooth-conn FWHM Add smoothing with this FWHM [default: 4] to connectivity images
if no smoothing was during ciftify_subject_fmri
--hcp-data-dir PATH DEPRECATED, use --ciftify-work-dir instead
-v, --verbose Verbose logging
--debug Debug logging
--help Print help
DETAILS
Produces visualizations for quality assurance of volume to cortex mapping step
- as well as subcortical resampling. It also produces some
This produces:
++ views of the functional data that has been projected to the "cifti space"
++ overlays of the functional volume and the pial surface
++ seed connectivity from 3 seeds
++ this option requires that 2 more arguments are specified
++ '--NameOffMRI' and '--SmoothingFWHM' -
++ these should match what was input in the ciftify_subject_fmri command
The functional to surface QC plots are shown in unsmoothed space.
(i.e. referencing the <task_label>_Atlas_s0.dtseries.nii file)
Gross patterns of connetivity as more visible with some surface smoothing.
So connectivity are shown either on the smoothed dtseries files indicated by the
'--SmoothingFWHM' option, or they using temporary files smoothed with the kernel
indicated by the ('--smoothed-conn') option (default value 8mm).
Written by Erin W Dickie, Feb 2016
"""
import os
import sys
import logging
import logging.config
import nibabel
import numpy as np
from docopt import docopt
import ciftify
from ciftify.utils import VisSettings, run, get_stdout
from ciftify.qc_config import replace_path_references, replace_all_references
# Read logging.conf
config_path = os.path.join(os.path.dirname(__file__), "logging.conf")
logging.config.fileConfig(config_path, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
class UserSettings(VisSettings):
def __init__(self, arguments):
VisSettings.__init__(self, arguments, qc_mode='fmri')
self.fmri_name = arguments['<task_label>']
self.subject = arguments['<subject>']
self.snaps = arguments['subject'] or arguments['snaps']
self.dtseries_s0 = self.get_dtseries_s0()
self.fwhm = self.get_fwhm(arguments)
self.surf_mesh = '.32k_fs_LR'
def get_dtseries_s0(self):
dtseries_s0 = ''
if self.snaps:
dtseries_s0 = os.path.join(self.work_dir, self.subject,
'MNINonLinear', 'Results', self.fmri_name,
'{}_Atlas_s0.dtseries.nii'.format(self.fmri_name))
if not os.path.exists(dtseries_s0):
logger.error("Expected fmri file {} not found."
"".format(dtseries_s0))
sys.exit(1)
return dtseries_s0
def get_fwhm(self, arguments):
if arguments['--SmoothingFWHM']:
fwhm = arguments['--SmoothingFWHM']
dtseries_sm = os.path.join(self.work_dir, self.subject,
'MNINonLinear', 'Results', self.fmri_name,
'{}_Atlas_s{}.dtseries.nii'.format(self.fmri_name,fwhm))
if not os.path.exists(dtseries_sm):
logger.error("Expected smoothed fmri file {} not found."
"To generate temporary smoothed file for visulizations "
"use the --smooth-con flag instead".format(dtseries_sm))
sys.exit(1)
else:
fwhm = arguments['--smooth-conn']
return(fwhm)
def main():
arguments = docopt(__doc__)
snaps_only = arguments['subject'] or arguments['snaps']
verbose = arguments['--verbose']
debug = arguments['--debug']
if arguments['snaps']:
logger.warning("The 'snaps' argument has be deprecated. Please use 'subject' in the future.")
if verbose:
logger.setLevel(logging.INFO)
logging.getLogger('ciftify').setLevel(logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
logging.getLogger('ciftify').setLevel(logging.DEBUG)
ciftify.utils.log_arguments(arguments)
user_settings = UserSettings(arguments)
config = ciftify.qc_config.Config(user_settings.qc_mode)
title_formatter = {'fwhm': user_settings.fwhm}
if snaps_only:
logger.info("Making snaps for subject {}".format(user_settings.subject))
write_single_qc_page(user_settings, config, title_formatter)
return
logger.info("Writing index pages to {}".format(user_settings.qc_dir))
# Double nested braces allows two stage formatting and get filled in after
# single braces (i.e. qc mode gets inserted into the second set of braces)
ciftify.html.write_index_pages(user_settings.qc_dir, config,
user_settings.qc_mode, title="cifti_vis_fmri Index", title_formatter=title_formatter)
def write_single_qc_page(user_settings, config, title_formatter):
"""
Generates a QC page for the subject specified by the user.
"""
qc_dir = os.path.join(user_settings.qc_dir,
'{}_{}'.format(user_settings.subject, user_settings.fmri_name))
qc_html = os.path.join(qc_dir, 'qc.html')
with ciftify.utils.TempDir() as temp_dir:
generate_qc_page(user_settings, config, qc_dir, temp_dir, qc_html,
temp_dir, title_formatter)
def generate_qc_page(user_settings, config, qc_dir, scene_dir, qc_html,
temp_dir, title_formatter):
sbref_nii = change_sbref_palette(user_settings, temp_dir)
dtseries_sm = get_smoothed_dtseries_file(user_settings, temp_dir)
contents = config.get_template_contents()
scene_file = personalize_template(contents, scene_dir, user_settings,
sbref_nii, dtseries_sm)
ciftify.utils.make_dir(qc_dir)
with open(qc_html, 'w') as qc_page:
ciftify.html.add_page_header(qc_page, config, user_settings.qc_mode,
subject=user_settings.subject, path='..')
wb_logging = 'INFO' if user_settings.debug_mode else 'WARNING'
ciftify.html.add_images(qc_page, qc_dir, config.images,
scene_file, wb_logging = wb_logging, add_titles = True,
title_formatter = title_formatter)
def personalize_template(template_contents, output_dir, user_settings, sbref_nii, dtseries_sm):
"""
Modify a copy of the given template to match the user specified values.
"""
scene_file = os.path.join(output_dir,
'qc{}_{}.scene'.format(user_settings.qc_mode,
user_settings.subject))
with open(scene_file,'w') as scene_stream:
new_text = modify_template_contents(template_contents, user_settings,
scene_file, sbref_nii, dtseries_sm)
scene_stream.write(new_text)
return scene_file
def modify_template_contents(template_contents, user_settings, scene_file,
sbref_nii, dtseries_sm):
"""
Customizes a template file to a specific working directory, by
replacing all relative path references and place holder paths
with references to specific files.
"""
surfs_dir = os.path.join(user_settings.work_dir, user_settings.subject,
'MNINonLinear', 'fsaverage_LR32k')
T1w_nii = os.path.join(user_settings.work_dir, user_settings.subject,
'MNINonLinear', 'T1w.nii.gz')
dtseries_sm_base = os.path.basename(dtseries_sm)
dtseries_sm_base_noext = dtseries_sm_base.replace('.dtseries.nii','')
txt = template_contents.replace('SURFS_SUBJECT', user_settings.subject)
txt = txt.replace('SURFS_MESHNAME', user_settings.surf_mesh)
txt = replace_path_references(txt, 'SURFSDIR', surfs_dir, scene_file)
txt = replace_all_references(txt, 'T1W', T1w_nii, scene_file)
txt = replace_all_references(txt, 'SBREF', sbref_nii, scene_file)
txt = replace_all_references(txt, 'S0DTSERIES', user_settings.dtseries_s0, scene_file)
txt = replace_path_references(txt, 'SMDTSERIES', os.path.dirname(dtseries_sm), scene_file)
txt = txt.replace('SMDTSERIES_BASENOEXT', dtseries_sm_base_noext)
return txt
def change_sbref_palette(user_settings, temp_dir):
''' create a temporary sbref file and returns it's path'''
sbref_nii = os.path.join(temp_dir,
'{}_SBRef.nii.gz'.format(user_settings.fmri_name))
func4D_nii = os.path.join(user_settings.work_dir, user_settings.subject,
'MNINonLinear', 'Results', user_settings.fmri_name,
'{}.nii.gz'.format(user_settings.fmri_name))
run(['wb_command', '-volume-reduce',
func4D_nii, 'MEAN', sbref_nii])
run(['wb_command', '-volume-palette',
sbref_nii,
'MODE_AUTO_SCALE_PERCENTAGE',
'-disp-neg', 'false',
'-disp-zero', 'false',
'-pos-percent', '5', '99',
'-palette-name','fidl'])
return sbref_nii
def get_smoothed_dtseries_file(user_settings, temp_dir):
'''
create smoothed file if it does not exist,
returns path to smoothed file
'''
pre_dtseries_sm = os.path.join(user_settings.work_dir, user_settings.subject,
'MNINonLinear', 'Results', user_settings.fmri_name,
'{}_Atlas_s{}.dtseries.nii'.format(user_settings.fmri_name,
user_settings.fwhm))
if os.path.exists(pre_dtseries_sm):
return pre_dtseries_sm
else:
dtseries_sm = os.path.join(temp_dir,
'{}_Atlas_s{}.dtseries.nii'.format(user_settings.fmri_name,
user_settings.fwhm))
Sigma = ciftify.utils.FWHM2Sigma(user_settings.fwhm)
surfs_dir = os.path.join(user_settings.work_dir, user_settings.subject,
'MNINonLinear', 'fsaverage_LR32k')
run(['wb_command', '-cifti-smoothing',
user_settings.dtseries_s0,
str(Sigma), str(Sigma), 'COLUMN',
dtseries_sm,
'-left-surface', os.path.join(surfs_dir,
'{}.L.midthickness{}.surf.gii'.format(user_settings.subject,
user_settings.surf_mesh)),
'-right-surface', os.path.join(surfs_dir,
'{}.R.midthickness{}.surf.gii'.format(user_settings.subject,
user_settings.surf_mesh))])
return dtseries_sm
if __name__ == '__main__':
main()
| |
#!/usr/bin/env
from cgum.utility import *
from cgum.program import Program
import codecs
import json
import tempfile
from subprocess import Popen
class Mappings(object):
@staticmethod
def from_json(jsn):
before_to_after = dict()
after_to_before = dict()
for m in jsn:
src = int(m['src'])
dest = int(m['dest'])
before_to_after[src] = dest
after_to_before[dest] = src
return Mappings(before_to_after, after_to_before)
def __init__(self, before_to_after, after_to_before):
self.before_to_after = before_to_after
self.after_to_before = after_to_before
# Given the number of a node in P, returns the number of the matching node
# in P', or None if no such match exists.
def after(self, num):
return self.before_to_after.get(num, None)
# Given the number of a node in P', returns the number of the matching node
# in P, or None if no such match exists.
def before(self, num):
return self.after_to_before.get(num, None)
class Action(object):
@staticmethod
def from_json_with_mappings(jsn, mapping):
return ({
'insert': Insert,
'update': Update,
'move': Move,
'delete': Delete
})[jsn['action']].from_json_with_mappings(jsn, mapping)
# Gives the ID of the node in the original tree that was deleted.
class Delete(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
return Delete(jsn['tree'])
def __init__(self, node_id):
self.__deleted_id = node_id
self.__deleted = None
def annotate(self, before, after):
self.__deleted = before.find(self.__deleted_id)
# Returns the deleted node from the before AST
def deleted(self):
return self.__deleted
def __str__(self):
return "DEL(%d)" % self.__deleted_id
# Position parameter is NOT to be trusted
class Move(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
from_id = jsn['tree']
to_id = mapping.after(from_id)
return Move(from_id, to_id, jsn['parent'], jsn['at'])
def __init__(self, from_id, to_id, parent_id, position):
self.__from_id = from_id
self.__to_id = to_id
self.__parent_id = parent_id
self.__position = position
self.__from = None
self.__to = None
# Annotates this action by recording the from and to nodes
def annotate(self, before, after):
self.__from = before.find(self.__from_id)
self.__to = after.find(self.__to_id)
# Returns the node in the before AST
def moved_from(self):
if self.__from is None:
raise Exception("moved_from: action hasn't been annotated")
return self.__from
# Returns the node in the after AST
def moved_to(self):
if self.__to is None:
raise Exception("moved_to: action hasn't been annotated")
return self.__to
# Returns the ID of the node that was moved in the before AST
def moved_from_id(self):
return self.__to_id
def moved_to_id(self):
return self.__from_id
# Returns the original (incorrect) GumTree description
def __str__(self):
return "MOV(%d, %d, %d)" % \
(self.__from_id, self.__parent_id, self.__position)
# Doesn't handle insert root?
class Insert(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
return Insert(jsn['tree'], jsn['parent'], jsn['at'])
def __init__(self, inserted_id, parent_id, position):
self.__inserted_id = inserted_id
self.__parent_id = parent_id
self.__position = position
self.__inserted = None
self.__parent = None
# Annotates this action by caching the inserted and parent nodes
def annotate(self, before, after):
self.__inserted = after.find(self.__inserted_id)
self.__parent = after.find(self.__parent_id)
# Returns the node which was inserted into the AST
def inserted(self):
return self.__inserted
# Returns the parent of the node that was inserted into the AST
def parent(self):
return self.__parent
# Returns the ID of the node that was inserted into the AST
def inserted_id(self):
return self.__child_id
def parent_id(self):
return self.__parent_id
# Returns the position that the node was inserted into its parents subtree,
# according to GumTree output; flawed.
def position(self):
return self.__position
def __str__(self):
return "INS(%d, %d, %d)" % \
(self.__inserted_id, self.__parent_id, self.__position)
class Update(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
after_id = mapping.after(jsn['tree'])
return Update(jsn['tree'], after_id, jsn['label'])
def __init__(self, before_id, after_id, label):
self.__before_id = before_id
self.__after_id = after_id
self.__label = label
self.__before = None
self.__after = None
# Annotates this action by caching the before and after forms of the node
def annotate(self, before, after):
self.__before = before.find(self.__before_id)
self.__after = after.find(self.__after_id)
# Returns the node that was the subject of this Update operation, in P
def before(self):
return self.__before
# Returns the node that was the subject of this Update operation, in P'
def after(self):
return self.__after
# Returns the ID of the node in P
def before_id(self):
return self.__before_id
# Returns the ID of the node in P'
def after_id(self):
return self.__after_id
# Returns the updated label for this node
def label(self):
return self.__label
# Returns a string description of the operation, in its original GumTree
# encoding
def __str__(self):
return "UPD(%d, %s)" % (self.__before_id, self.__label)
class AnnotatedDiff(object):
@staticmethod
def from_source_files(fn_from, fn_to):
tmp_f = tempfile.NamedTemporaryFile()
AnnotatedDiff.parse_to_json_file(fn_from, fn_to, tmp_f)
before = Program.from_source_file(fn_from)
after = Program.from_source_file(fn_to)
return AnnotatedDiff.from_file(tmp_f.name,\
before,\
after)
@staticmethod
def parse_to_json_file(fn_from, fn_to, jsn_fn):
assert Popen(("gumtree jsondiff \"%s\" \"%s\"" % (fn_from, fn_to)), \
shell=True, stdin=FNULL, stdout=jsn_fn).wait() == 0
@staticmethod
def from_file(fn, before, after):
with codecs.open(fn, 'r', 'utf-8') as f:
return AnnotatedDiff.from_json(json.load(f), before, after)
@staticmethod
def from_json(jsn, before, after):
mappings = Mappings.from_json(jsn['matches'])
actions = \
[Action.from_json_with_mappings(a, mappings) for a in jsn['actions']]
return AnnotatedDiff(actions, mappings, before, after)
def __init__(self, actions, mappings, before, after):
self.__actions = actions
self.__mappings = mappings
self.__before = before
self.__after = after
self.__insertions = []
self.__deletions = []
self.__updates = []
self.__moves = []
# Annotate and group the actions
for action in self.__actions:
action.annotate(before, after)
({
Insert: self.__insertions,
Delete: self.__deletions,
Update: self.__updates,
Move: self.__moves
})[action.__class__].append(action)
def before(self):
return self.__before
def after(self):
return self.__after
def actions(self):
return self.__actions
def insertions(self):
return self.__insertions
def deletions(self):
return self.__deletions
def moves(self):
return self.__moves
def updates(self):
return self.__updates
def mappings(self):
return self.__mappings
# checks whether a given node in P' was moved to that location
def was_moved_to(self, to):
return any([to == move.moved_to() in self.moves()])
# checks whether a given node in P was moved to another location
def was_moved_from(self, frm):
return any([frm == move.moved_from() in self.moves()])
# Given a node in P, return the matching node in P', or None if no such
# match exists.
def was_is(self, node):
assert not node is None, "was_is: provided node must not be null"
was_num = node.number()
is_num = self.__mappings.after(was_num)
if is_num is None:
return None
else:
return self.__after.find(is_num)
# Given a node in P', return the matching node in P, or None if no such
# match exists.
def is_was(self, node):
assert not node is None, "is_was: provided node must not be null"
is_num = node.number()
was_num = self.__mappings.before(is_num)
if was_num is None:
return None
else:
return self.__before.find(was_num)
def __str__(self):
return '\n'.join(map(str, self.__actions))
| |
"""
Support for the IKEA Tradfri platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.tradfri/
"""
import logging
from homeassistant.core import callback
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_TRANSITION,
SUPPORT_BRIGHTNESS, SUPPORT_TRANSITION, SUPPORT_COLOR_TEMP,
SUPPORT_COLOR, Light)
from homeassistant.components.light import \
PLATFORM_SCHEMA as LIGHT_PLATFORM_SCHEMA
from homeassistant.components.tradfri import (
KEY_GATEWAY, KEY_API, DOMAIN as TRADFRI_DOMAIN)
from homeassistant.components.tradfri.const import (
CONF_IMPORT_GROUPS, CONF_GATEWAY_ID)
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
ATTR_DIMMER = 'dimmer'
ATTR_HUE = 'hue'
ATTR_SAT = 'saturation'
ATTR_TRANSITION_TIME = 'transition_time'
DEPENDENCIES = ['tradfri']
PLATFORM_SCHEMA = LIGHT_PLATFORM_SCHEMA
IKEA = 'IKEA of Sweden'
TRADFRI_LIGHT_MANAGER = 'Tradfri Light Manager'
SUPPORTED_FEATURES = SUPPORT_TRANSITION
SUPPORTED_GROUP_FEATURES = SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load Tradfri lights based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
api = hass.data[KEY_API][config_entry.entry_id]
gateway = hass.data[KEY_GATEWAY][config_entry.entry_id]
devices_commands = await api(gateway.get_devices())
devices = await api(devices_commands)
lights = [dev for dev in devices if dev.has_light_control]
if lights:
async_add_entities(
TradfriLight(light, api, gateway_id) for light in lights)
if config_entry.data[CONF_IMPORT_GROUPS]:
groups_commands = await api(gateway.get_groups())
groups = await api(groups_commands)
if groups:
async_add_entities(
TradfriGroup(group, api, gateway_id) for group in groups)
class TradfriGroup(Light):
"""The platform class required by hass."""
def __init__(self, group, api, gateway_id):
"""Initialize a Group."""
self._api = api
self._unique_id = "group-{}-{}".format(gateway_id, group.id)
self._group = group
self._name = group.name
self._refresh(group)
async def async_added_to_hass(self):
"""Start thread when added to hass."""
self._async_start_observe()
@property
def unique_id(self):
"""Return unique ID for this group."""
return self._unique_id
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_GROUP_FEATURES
@property
def name(self):
"""Return the display name of this group."""
return self._name
@property
def is_on(self):
"""Return true if group lights are on."""
return self._group.state
@property
def brightness(self):
"""Return the brightness of the group lights."""
return self._group.dimmer
async def async_turn_off(self, **kwargs):
"""Instruct the group lights to turn off."""
await self._api(self._group.set_state(0))
async def async_turn_on(self, **kwargs):
"""Instruct the group lights to turn on, or dim."""
keys = {}
if ATTR_TRANSITION in kwargs:
keys['transition_time'] = int(kwargs[ATTR_TRANSITION]) * 10
if ATTR_BRIGHTNESS in kwargs:
if kwargs[ATTR_BRIGHTNESS] == 255:
kwargs[ATTR_BRIGHTNESS] = 254
await self._api(
self._group.set_dimmer(kwargs[ATTR_BRIGHTNESS], **keys))
else:
await self._api(self._group.set_state(1))
@callback
def _async_start_observe(self, exc=None):
"""Start observation of light."""
# pylint: disable=import-error
from pytradfri.error import PytradfriError
if exc:
_LOGGER.warning("Observation failed for %s", self._name,
exc_info=exc)
try:
cmd = self._group.observe(callback=self._observe_update,
err_callback=self._async_start_observe,
duration=0)
self.hass.async_create_task(self._api(cmd))
except PytradfriError as err:
_LOGGER.warning("Observation failed, trying again", exc_info=err)
self._async_start_observe()
def _refresh(self, group):
"""Refresh the light data."""
self._group = group
self._name = group.name
@callback
def _observe_update(self, tradfri_device):
"""Receive new state data for this light."""
self._refresh(tradfri_device)
self.async_schedule_update_ha_state()
async def async_update(self):
"""Fetch new state data for the group."""
await self._api(self._group.update())
class TradfriLight(Light):
"""The platform class required by Home Assistant."""
def __init__(self, light, api, gateway_id):
"""Initialize a Light."""
self._api = api
self._unique_id = "light-{}-{}".format(gateway_id, light.id)
self._light = None
self._light_control = None
self._light_data = None
self._name = None
self._hs_color = None
self._features = SUPPORTED_FEATURES
self._available = True
self._gateway_id = gateway_id
self._refresh(light)
@property
def unique_id(self):
"""Return unique ID for light."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
info = self._light.device_info
return {
'identifiers': {
(TRADFRI_DOMAIN, self._light.id)
},
'name': self._name,
'manufacturer': info.manufacturer,
'model': info.model_number,
'sw_version': info.firmware_version,
'via_hub': (TRADFRI_DOMAIN, self._gateway_id),
}
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._light_control.min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._light_control.max_mireds
async def async_added_to_hass(self):
"""Start thread when added to hass."""
self._async_start_observe()
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def should_poll(self):
"""No polling needed for tradfri light."""
return False
@property
def supported_features(self):
"""Flag supported features."""
return self._features
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def is_on(self):
"""Return true if light is on."""
return self._light_data.state
@property
def brightness(self):
"""Return the brightness of the light."""
return self._light_data.dimmer
@property
def color_temp(self):
"""Return the color temp value in mireds."""
return self._light_data.color_temp
@property
def hs_color(self):
"""HS color of the light."""
if self._light_control.can_set_color:
hsbxy = self._light_data.hsb_xy_color
hue = hsbxy[0] / (self._light_control.max_hue / 360)
sat = hsbxy[1] / (self._light_control.max_saturation / 100)
if hue is not None and sat is not None:
return hue, sat
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
# This allows transitioning to off, but resets the brightness
# to 1 for the next set_state(True) command
transition_time = None
if ATTR_TRANSITION in kwargs:
transition_time = int(kwargs[ATTR_TRANSITION]) * 10
dimmer_data = {ATTR_DIMMER: 0, ATTR_TRANSITION_TIME:
transition_time}
await self._api(self._light_control.set_dimmer(**dimmer_data))
else:
await self._api(self._light_control.set_state(False))
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
transition_time = None
if ATTR_TRANSITION in kwargs:
transition_time = int(kwargs[ATTR_TRANSITION]) * 10
dimmer_command = None
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
if brightness > 254:
brightness = 254
elif brightness < 0:
brightness = 0
dimmer_data = {ATTR_DIMMER: brightness, ATTR_TRANSITION_TIME:
transition_time}
dimmer_command = self._light_control.set_dimmer(**dimmer_data)
transition_time = None
else:
dimmer_command = self._light_control.set_state(True)
color_command = None
if ATTR_HS_COLOR in kwargs and self._light_control.can_set_color:
hue = int(kwargs[ATTR_HS_COLOR][0] *
(self._light_control.max_hue / 360))
sat = int(kwargs[ATTR_HS_COLOR][1] *
(self._light_control.max_saturation / 100))
color_data = {ATTR_HUE: hue, ATTR_SAT: sat, ATTR_TRANSITION_TIME:
transition_time}
color_command = self._light_control.set_hsb(**color_data)
transition_time = None
temp_command = None
if ATTR_COLOR_TEMP in kwargs and (self._light_control.can_set_temp or
self._light_control.can_set_color):
temp = kwargs[ATTR_COLOR_TEMP]
# White Spectrum bulb
if self._light_control.can_set_temp:
if temp > self.max_mireds:
temp = self.max_mireds
elif temp < self.min_mireds:
temp = self.min_mireds
temp_data = {ATTR_COLOR_TEMP: temp, ATTR_TRANSITION_TIME:
transition_time}
temp_command = self._light_control.set_color_temp(**temp_data)
transition_time = None
# Color bulb (CWS)
# color_temp needs to be set with hue/saturation
elif self._light_control.can_set_color:
temp_k = color_util.color_temperature_mired_to_kelvin(temp)
hs_color = color_util.color_temperature_to_hs(temp_k)
hue = int(hs_color[0] * (self._light_control.max_hue / 360))
sat = int(hs_color[1] *
(self._light_control.max_saturation / 100))
color_data = {ATTR_HUE: hue, ATTR_SAT: sat,
ATTR_TRANSITION_TIME: transition_time}
color_command = self._light_control.set_hsb(**color_data)
transition_time = None
# HSB can always be set, but color temp + brightness is bulb dependant
command = dimmer_command
if command is not None:
command += color_command
else:
command = color_command
if self._light_control.can_combine_commands:
await self._api(command + temp_command)
else:
if temp_command is not None:
await self._api(temp_command)
if command is not None:
await self._api(command)
@callback
def _async_start_observe(self, exc=None):
"""Start observation of light."""
# pylint: disable=import-error
from pytradfri.error import PytradfriError
if exc:
self._available = False
self.async_schedule_update_ha_state()
_LOGGER.warning("Observation failed for %s", self._name,
exc_info=exc)
try:
cmd = self._light.observe(callback=self._observe_update,
err_callback=self._async_start_observe,
duration=0)
self.hass.async_create_task(self._api(cmd))
except PytradfriError as err:
_LOGGER.warning("Observation failed, trying again", exc_info=err)
self._async_start_observe()
def _refresh(self, light):
"""Refresh the light data."""
self._light = light
# Caching of LightControl and light object
self._available = light.reachable
self._light_control = light.light_control
self._light_data = light.light_control.lights[0]
self._name = light.name
self._features = SUPPORTED_FEATURES
if light.light_control.can_set_dimmer:
self._features |= SUPPORT_BRIGHTNESS
if light.light_control.can_set_color:
self._features |= SUPPORT_COLOR
if light.light_control.can_set_temp:
self._features |= SUPPORT_COLOR_TEMP
@callback
def _observe_update(self, tradfri_device):
"""Receive new state data for this light."""
self._refresh(tradfri_device)
self.async_schedule_update_ha_state()
| |
import smtplib
from collections import OrderedDict
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import multiprocessing
from multiprocessing.managers import BaseManager
import platform
from time import sleep
import json
import signal
import pdb
import os
class Server(object):
def load_file(self):
"""
Load the settings in from a JSON string.
"""
filename = platform.node() + ".Waymoot"
if not os.path.isfile(filename):
self.settings = OrderedDict()
self.makedefaults()
else:
filehandle = open(filename, 'r')
filecontents = filehandle.read()
self.settings = json.loads(filecontents,object_pairs_hook=OrderedDict,object_hook=OrderedDict)
filehandle.close()
def save_file(self):
"""
Save our settings to a JSON string.
"""
filename = platform.node() + ".Waymoot"
filehandle = open(filename,'w')
filehandle.write(json.dumps(self.settings,separators=(',',':')))
filehandle.close()
def __init__(self):
"""
Initialize our main module, and create threads.
"""
self.load_file()
# if self.settings['debug'] == True:
# pdb.set_trace()
# Create a hopper for all the emails to reside in
self.allemails = multiprocessing.Queue()
self.optouts = []
self.sentmails = multiprocessing.Queue()
self.procs = []
def makedefaults(self):
"""
Stick default settings in a file.
"""
self.settings['sender'] = "noreply <noreply@example.com>"
self.settings['smtpserver'] = 'smtp.example.com'
self.settings['username'] = "user@example.com"
self.settings['password'] = "password"
self.settings['workers'] = multiprocessing.cpu_count() - 1
self.settings['saveinterval'] = 1000
self.settings['debug'] = True
self.settings['SSL'] = False
self.settings['port'] = 25
self.save_file()
def load(self,emails=None,exclude=None,htmltemplate=None,txttemplate=None,subject=None):
"""
Load in the emails
"""
if subject == None or emails == None or htmltemplate == None or txttemplate == None:
print("Error - Invalid call")
return -1
self.subject = subject
self.sendlist = emails
self.excludelist = exclude
# Load Opt Outs into Memory
f = open('lists/' + self.excludelist, 'r')
for email in f:
self.optouts.append(email)
f.close()
# Load Emails into Memory
# Don't add anyone who's opted out
f = open('lists/' + self.sendlist, 'r')
for email in f:
if email not in self.optouts:
self.allemails.put(email)
f.close()
# Load in the email contents
inputFile = open('templates/' + txttemplate, 'r')
# Create the body of the message (a plain-text and an HTML version).
self.text = inputFile.read()
inputFile.close()
inputFile = open('templates/' + htmltemplate, 'r')
self.html = inputFile.read()
inputFile.close()
# Create a thread which will save out progress every X emails
saver = multiprocessing.Process(target=self.saveprogress, args=())
self.procs.append(saver)
for proc in range(0,self.settings['workers']):
newproc = multiprocessing.Process(target=self.sendmail, args=())
self.procs.append(newproc)
print(" Created Process - " + str(proc))
def start(self):
count = 0
for proc in self.procs:
proc.start()
print(" Started " + str(count))
count += 1
def stop(self):
count = 0
print("stopping")
for proc in self.procs:
proc.stop()
print(" Stopped " + str(count))
count += 1
self.saveprogress(daemon=False)
print("You are now free to turn off your computer.")
def saveprogress(self,daemon=True):
"""
Thread to save progress out to a file every X emails.
"""
loop = True
while loop:
if self.sentmails.qsize() > self.settings['saveinterval']:
print('saving progress')
filehandle = open('progress/' + self.sendlist + '-sent.txt','a')
while self.sentmails.qsize() > 0:
email = self.sentmails.get()
filehandle.write(email)
filehandle.close()
# Break the loop if needed. If not, wait.
if daemon == False:
loop = False
else:
sleep(10)
# Catch any that were < saveinterval
filehandle = open('progress/' + self.sendlist + '-sent.txt','a')
while self.sentmails.qsize() > 0:
email = self.sentmails.get()
filehandle.write(email)
filehandle.close()
def sendmail(self):
"""
Actually connect to the server, and push out the message.
"""
msg = MIMEMultipart('alternative')
msg['Subject'] = self.subject
msg['From'] = self.settings['sender']
msg['To'] = self.settings['sender']
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(self.text, 'plain')
part2 = MIMEText(self.html, 'html')
# Attach parts into message container.
# Do this in the Process, so it's local
msg.attach(part1)
msg.attach(part2)
count = 0
# Grab some emails from the stack
while self.allemails.qsize() > 0:
curemail = self.allemails.get()
if count % 100 == 0:
if count > 0:
conn.close()
print("Establishing Connection to emailserver " + self.settings['smtpserver'])
if self.settings['SSL'] == True:
conn = smtplib.SMTP_SSL(host=self.settings['smtpserver'],port=self.settings['port'])
else:
conn = smtplib.SMTP(host=self.settings['smtpserver'],port=self.settings['port'])
conn.set_debuglevel(False)
conn.login(self.settings['username'], self.settings['password'])
msg.replace_header("To", curemail)
try:
conn.sendmail(self.settings['sender'], curemail, msg.as_string())
self.sentmails.put(curemail)
finally:
count +=1
print ("This thread has sent " + str(count))
print("Thead is done. Thanks!")
self.saveprogress(daemon=False)
conn.close()
server = Server()
| |
# Copyright (c) 2013 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient import exc as ironic_exc
import mock
import six
from webob import exc
from nova.api.openstack.compute import baremetal_nodes \
as b_nodes_v21
from nova.api.openstack.compute.legacy_v2.contrib import baremetal_nodes \
as b_nodes_v2
from nova.api.openstack import extensions
from nova import context
from nova import test
from nova.tests.unit.virt.ironic import utils as ironic_utils
class FakeRequest(object):
def __init__(self, context):
self.environ = {"nova.context": context}
def fake_node(**updates):
node = {
'id': 1,
'service_host': "host",
'cpus': 8,
'memory_mb': 8192,
'local_gb': 128,
'pm_address': "10.1.2.3",
'pm_user': "pm_user",
'pm_password': "pm_pass",
'terminal_port': 8000,
'interfaces': [],
'instance_uuid': 'fake-instance-uuid',
}
if updates:
node.update(updates)
return node
def fake_node_ext_status(**updates):
node = fake_node(uuid='fake-uuid',
task_state='fake-task-state',
updated_at='fake-updated-at',
pxe_config_path='fake-pxe-config-path')
if updates:
node.update(updates)
return node
FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
@mock.patch.object(b_nodes_v21, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
class BareMetalNodesTestV21(test.NoDBTestCase):
mod = b_nodes_v21
def setUp(self):
super(BareMetalNodesTestV21, self).setUp()
self._setup()
self.context = context.get_admin_context()
self.request = FakeRequest(self.context)
def _setup(self):
self.controller = b_nodes_v21.BareMetalNodeController()
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic(self, mock_list):
properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': properties['memory_mb'],
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic_missing_properties(self, mock_list):
properties = {'cpus': 2}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': 0,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
def test_index_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
self.assertRaises(exc.HTTPNotImplemented,
self.controller.index,
self.request)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': properties['memory_mb'],
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_properties(self, mock_get, mock_list_ports):
properties = {}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': 0,
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': 0}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
mock_get.return_value = node
mock_list_ports.return_value = []
res_dict = self.controller.show(self.request, node.uuid)
self.assertEqual([], res_dict['node']['interfaces'])
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get',
side_effect=ironic_exc.NotFound())
def test_show_ironic_node_not_found(self, mock_get):
error = self.assertRaises(exc.HTTPNotFound, self.controller.show,
self.request, 'fake-uuid')
self.assertIn('fake-uuid', six.text_type(error))
def test_show_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
self.assertRaises(exc.HTTPNotImplemented, self.controller.show,
self.request, node.uuid)
def test_create_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.request, {'node': object()})
def test_delete_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.delete,
self.request, 'fake-id')
def test_add_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._add_interface,
self.request, 'fake-id', 'fake-body')
def test_remove_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request, 'fake-id', 'fake-body')
@mock.patch.object(b_nodes_v2, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
class BareMetalNodesTestV2(BareMetalNodesTestV21):
mod = b_nodes_v2
def _setup(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = b_nodes_v2.BareMetalNodeController(self.ext_mgr)
| |
########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
NOTE: this is an IPython-patched version to work on IronPython. See
FIXED comment below.
"""
from __future__ import print_function
__version__ = '3.3.3'
__all__ = ["decorator", "FunctionMaker", "partial"]
import sys
import re
import inspect
try:
from functools import partial
except ImportError: # for Python version < 2.5
class partial(object):
"A simple replacement of functools.partial"
def __init__(self, func, *args, **kw):
self.func = func
self.args = args
self.keywords = kw
def __call__(self, *otherargs, **otherkw):
kw = self.keywords.copy()
kw.update(otherkw)
return self.func(*(self.args + otherargs), **kw)
if sys.version >= '3':
from inspect import getfullargspec
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
self.signature = self.shortsignature = ', '.join(self.args)
if self.varargs:
self.signature += ', *' + self.varargs
self.shortsignature += ', *' + self.varargs
if self.kwonlyargs:
for a in self.kwonlyargs:
self.signature += ', %s=None' % a
self.shortsignature += ', %s=%s' % (a, a)
if self.varkw:
self.signature += ', **' + self.varkw
self.shortsignature += ', **' + self.varkw
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
# FIXED: The following is try/excepted in IPython to work
# with IronPython.
try:
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
# IronPython _getframe only exists with FullFrames
except AttributeError:
callermodule = '?'
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec(code, evaldict)
except:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if isinstance(caller, partial):
return partial(decorator, caller)
# otherwise assume caller is a function
first = inspect.getargspec(caller)[0][0] # first arg
evaldict = caller.__globals__.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (caller.__name__, first),
'return decorator(_call_, %s)' % first,
evaldict, undecorated=caller, __wrapped__=caller,
doc=caller.__doc__, module=caller.__module__)
| |
# --------------------------------------------------------
# Written by Bharat Singh
# Modified version of py-R-FCN
# --------------------------------------------------------
"""Train a Fast R-CNN network."""
import caffe
from fast_rcnn.config import cfg
import roi_data_layer.roidb as rdl_roidb
from utils.timer import Timer
import numpy as np
import os
from caffe.proto import caffe_pb2
import google.protobuf as pb2
import google.protobuf.text_format
from multiprocessing import Process
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, solver_prototxt, roidb, output_dir, gpu_id,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
self.gpu_id = gpu_id
if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS):
# RPN can only use precomputed normalization because there are no
# fixed statistics to compute a priori
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED
if cfg.TRAIN.BBOX_REG:
print 'Computing bounding-box regression targets...'
self.bbox_means, self.bbox_stds = \
rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_roidb(roidb, gpu_id)
def snapshot(self):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.solver.net
scale_bbox_params_faster_rcnn = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
net.params.has_key('bbox_pred'))
scale_bbox_params_rfcn = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
net.params.has_key('rfcn_bbox'))
scale_bbox_params_rpn = (cfg.TRAIN.RPN_NORMALIZE_TARGETS and
net.params.has_key('rpn_bbox_pred'))
if scale_bbox_params_faster_rcnn:
# save original values
orig_0 = net.params['bbox_pred'][0].data.copy()
orig_1 = net.params['bbox_pred'][1].data.copy()
# scale and shift with bbox reg unnormalization; then save snapshot
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data *
self.bbox_stds[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data *
self.bbox_stds + self.bbox_means)
if scale_bbox_params_rpn:
rpn_orig_0 = net.params['rpn_bbox_pred'][0].data.copy()
rpn_orig_1 = net.params['rpn_bbox_pred'][1].data.copy()
num_anchor = rpn_orig_0.shape[0] / 4
# scale and shift with bbox reg unnormalization; then save snapshot
self.rpn_means = np.tile(np.asarray(cfg.TRAIN.RPN_NORMALIZE_MEANS),
num_anchor)
self.rpn_stds = np.tile(np.asarray(cfg.TRAIN.RPN_NORMALIZE_STDS),
num_anchor)
net.params['rpn_bbox_pred'][0].data[...] = \
(net.params['rpn_bbox_pred'][0].data *
self.rpn_stds[:, np.newaxis, np.newaxis, np.newaxis])
net.params['rpn_bbox_pred'][1].data[...] = \
(net.params['rpn_bbox_pred'][1].data *
self.rpn_stds + self.rpn_means)
if scale_bbox_params_rfcn:
# save original values
orig_0 = net.params['rfcn_bbox'][0].data.copy()
orig_1 = net.params['rfcn_bbox'][1].data.copy()
repeat = orig_1.shape[0] / self.bbox_means.shape[0]
# scale and shift with bbox reg unnormalization; then save snapshot
net.params['rfcn_bbox'][0].data[...] = \
(net.params['rfcn_bbox'][0].data *
np.repeat(self.bbox_stds, repeat).reshape((orig_1.shape[0], 1, 1, 1)))
net.params['rfcn_bbox'][1].data[...] = \
(net.params['rfcn_bbox'][1].data *
np.repeat(self.bbox_stds, repeat) + np.repeat(self.bbox_means, repeat))
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
filename = os.path.join(self.output_dir, filename)
if self.gpu_id == 0:
net.save(str(filename))
print 'Wrote snapshot to: {:s}'.format(filename)
if scale_bbox_params_faster_rcnn:
# restore net to original state
net.params['bbox_pred'][0].data[...] = orig_0
net.params['bbox_pred'][1].data[...] = orig_1
if scale_bbox_params_rfcn:
# restore net to original state
net.params['rfcn_bbox'][0].data[...] = orig_0
net.params['rfcn_bbox'][1].data[...] = orig_1
if scale_bbox_params_rpn:
# restore net to original state
net.params['rpn_bbox_pred'][0].data[...] = rpn_orig_0
net.params['rpn_bbox_pred'][1].data[...] = rpn_orig_1
return filename
def track_memory(self):
net = self.solver.net
print 'Memory Usage:'
total = 0.0
data = 0.0
params = 0.0
for k,v in net.blobs.iteritems():
gb = float(v.data.nbytes)/1024/1024/1024
print '%s : %.3f GB %s' % (k,gb,v.data.shape)
total += gb
data += gb
print 'Memory Usage: Data %.3f GB' % data
for k,v in net.params.iteritems():
for i,p in enumerate(v):
gb = float(p.data.nbytes)/1024/1024/1024
total += gb
params += gb
print '%s[%d] : %.3f GB %s' % (k,i,gb,p.data.shape)
print 'Memory Usage: Params %.3f GB' % params
print 'Memory Usage: Total %.3f GB' % total
def getSolver(self):
return self.solver
def solve(proto, roidb, pretrained_model, gpus, uid, rank, output_dir, max_iter):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
cfg.GPU_ID = gpus[rank]
solverW = SolverWrapper(proto, roidb, output_dir,rank,pretrained_model)
solver = solverW.getSolver()
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
count = 0
while count < max_iter:
print 'Solver step'
solver.step(cfg.TRAIN.SNAPSHOT_ITERS)
if rank == 0:
solverW.snapshot()
#solverW.track_memory()
count = count + cfg.TRAIN.SNAPSHOT_ITERS
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
rdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print 'Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after)
return filtered_roidb
def train_net_multi_gpu(solver_prototxt, roidb, output_dir, pretrained_model, max_iter, gpus):
"""Train a Fast R-CNN network."""
uid = caffe.NCCL.new_uid()
caffe.init_log()
caffe.log('Using devices %s' % str(gpus))
procs = []
for rank in range(len(gpus)):
p = Process(target=solve,
args=(solver_prototxt, roidb, pretrained_model, gpus, uid, rank, output_dir, max_iter))
p.daemon = False
p.start()
procs.append(p)
for p in procs:
p.join()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import router_info
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class TestRouterInfo(base.BaseTestCase):
def setUp(self):
super(TestRouterInfo, self).setUp()
conf = agent_config.setup_conf()
conf.use_namespaces = True
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.ri_kwargs = {'agent_conf': conf,
'interface_driver': mock.sentinel.interface_driver}
def _check_agent_method_called(self, calls):
self.mock_ip.netns.execute.assert_has_calls(
[mock.call(call, check_exit_code=False) for call in calls],
any_order=True)
def test_routing_table_update(self):
ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ri.router = {}
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
ri.update_routing_table('replace', fake_route1)
expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('delete', fake_route1)
expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('replace', fake_route2)
expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('delete', fake_route2)
expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
def test_update_routing_table(self):
# Just verify the correct namespace was used in the call
uuid = _uuid()
netns = 'qrouter-' + uuid
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
ri = router_info.RouterInfo(uuid, {'id': uuid}, **self.ri_kwargs)
ri._update_routing_table = mock.Mock()
ri.update_routing_table('replace', fake_route1)
ri._update_routing_table.assert_called_once_with('replace',
fake_route1,
netns)
def test_routes_updated(self):
ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ri.router = {}
fake_old_routes = []
fake_new_routes = [{'destination': "110.100.31.0/24",
'nexthop': "10.100.10.30"},
{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.routes = fake_old_routes
ri.router['routes'] = fake_new_routes
ri.routes_updated()
expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
'via', '10.100.10.30'],
['ip', 'route', 'replace', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
fake_new_routes = [{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.router['routes'] = fake_new_routes
ri.routes_updated()
expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
fake_new_routes = []
ri.router['routes'] = fake_new_routes
ri.routes_updated()
expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
class BasicRouterTestCaseFramework(base.BaseTestCase):
def _create_router(self, router=None, **kwargs):
if not router:
router = mock.MagicMock()
self.agent_conf = mock.Mock()
# NOTE The use_namespaces config will soon be deprecated
self.agent_conf.use_namespaces = True
self.router_id = _uuid()
return router_info.RouterInfo(self.router_id,
router,
self.agent_conf,
mock.sentinel.interface_driver,
**kwargs)
class TestBasicRouterOperations(BasicRouterTestCaseFramework):
def test_get_floating_ips(self):
router = mock.MagicMock()
router.get.return_value = [mock.sentinel.floating_ip]
ri = self._create_router(router)
fips = ri.get_floating_ips()
self.assertEqual([mock.sentinel.floating_ip], fips)
def test_process_floating_ip_nat_rules(self):
ri = self._create_router()
fips = [{'fixed_ip_address': mock.sentinel.ip,
'floating_ip_address': mock.sentinel.fip}]
ri.get_floating_ips = mock.Mock(return_value=fips)
ri.iptables_manager = mock.MagicMock()
ipv4_nat = ri.iptables_manager.ipv4['nat']
ri.floating_forward_rules = mock.Mock(
return_value=[(mock.sentinel.chain, mock.sentinel.rule)])
ri.process_floating_ip_nat_rules()
# Be sure that the rules are cleared first and apply is called last
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_nat.mock_calls[0])
self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1])
# Be sure that add_rule is called somewhere in the middle
ipv4_nat.add_rule.assert_called_once_with(mock.sentinel.chain,
mock.sentinel.rule,
tag='floating_ip')
def test_process_floating_ip_nat_rules_removed(self):
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[])
ri.iptables_manager = mock.MagicMock()
ipv4_nat = ri.iptables_manager.ipv4['nat']
ri.process_floating_ip_nat_rules()
# Be sure that the rules are cleared first and apply is called last
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_nat.mock_calls[0])
self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1])
# Be sure that add_rule is called somewhere in the middle
self.assertFalse(ipv4_nat.add_rule.called)
def _test_add_fip_addr_to_device_error(self, device):
ri = self._create_router()
ip = '15.1.2.3'
result = ri._add_fip_addr_to_device(
{'id': mock.sentinel.id, 'floating_ip_address': ip}, device)
device.addr.add.assert_called_with(ip + '/32')
return result
def test__add_fip_addr_to_device(self):
result = self._test_add_fip_addr_to_device_error(mock.Mock())
self.assertTrue(result)
def test__add_fip_addr_to_device_error(self):
device = mock.Mock()
device.addr.add.side_effect = RuntimeError
result = self._test_add_fip_addr_to_device_error(device)
self.assertFalse(result)
def test_process_snat_dnat_for_fip(self):
ri = self._create_router()
ri.process_floating_ip_nat_rules = mock.Mock(side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
ri.process_snat_dnat_for_fip)
ri.process_floating_ip_nat_rules.assert_called_once_with()
def test_put_fips_in_error_state(self):
ri = self._create_router()
ri.router = mock.Mock()
ri.router.get.return_value = [{'id': mock.sentinel.id1},
{'id': mock.sentinel.id2}]
statuses = ri.put_fips_in_error_state()
expected = [{mock.sentinel.id1: l3_constants.FLOATINGIP_STATUS_ERROR,
mock.sentinel.id2: l3_constants.FLOATINGIP_STATUS_ERROR}]
self.assertNotEqual(expected, statuses)
def test_configure_fip_addresses(self):
ri = self._create_router()
ri.process_floating_ip_addresses = mock.Mock(
side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
ri.configure_fip_addresses,
mock.sentinel.interface_name)
ri.process_floating_ip_addresses.assert_called_once_with(
mock.sentinel.interface_name)
def test_get_router_cidrs_returns_cidrs(self):
ri = self._create_router()
addresses = ['15.1.2.2/24', '15.1.2.3/32']
device = mock.MagicMock()
device.addr.list.return_value = [{'cidr': addresses[0]},
{'cidr': addresses[1]}]
self.assertEqual(set(addresses), ri.get_router_cidrs(device))
@mock.patch.object(ip_lib, 'IPDevice')
class TestFloatingIpWithMockDevice(BasicRouterTestCaseFramework):
def test_process_floating_ip_addresses_remap(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[fip])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
self.assertFalse(device.addr.add.called)
self.assertFalse(device.addr.delete.called)
def test_process_router_with_disabled_floating_ip(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = self._create_router()
ri.floating_ips = [fip]
ri.get_floating_ips = mock.Mock(return_value=[])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertIsNone(fip_statuses.get(fip_id))
def test_process_router_floating_ip_with_device_add_error(self, IPDevice):
IPDevice.return_value = device = mock.Mock(side_effect=RuntimeError)
device.addr.list.return_value = []
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2',
'status': 'DOWN'
}
ri = self._create_router()
ri.add_floating_ip = mock.Mock(
return_value=l3_constants.FLOATINGIP_STATUS_ERROR)
ri.get_floating_ips = mock.Mock(return_value=[fip])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR},
fip_statuses)
# TODO(mrsmith): refactor for DVR cases
def test_process_floating_ip_addresses_remove(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = self._create_router()
ri.remove_floating_ip = mock.Mock()
ri.router.get = mock.Mock(return_value=[])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({}, fip_statuses)
ri.remove_floating_ip.assert_called_once_with(device, '15.1.2.3/32')
| |
from __future__ import print_function
import Tkinter as tk
import ttk
import tkMessageBox
import tkFileDialog
import os.path
DEFAULT_COLUMNS = 3
class SectionLabel(object):
def __init__(self, text):
self.text = text
def body(self, master, row, columns=DEFAULT_COLUMNS, **kwargs):
label = ttk.Label(master, text=self.text)
label.grid(row=row, column=0, columnspan=columns, sticky="w")
return 1
def validate(self):
return True
def apply(self):
return None
def enable(self):
pass
def disable(self):
pass
class Checkbox(object):
def __init__(self, text, cfg, key):
self.checkbox = None
self.enabled = True
self.value = tk.BooleanVar()
self.text = text
self.cfg = cfg
self.key = key
try:
if self.cfg[self.key] not in (True, False):
self.value.set(False)
else:
self.value.set(self.cfg[self.key])
except KeyError:
self.value.set(False) # default to False
def body(self, master, row, columns=DEFAULT_COLUMNS, **kwargs):
"""
Place the required elements using the grid layout method.
Returns the number of rows taken by this element.
"""
self.checkbox = ttk.Checkbutton(master, text=self.text, variable=self.value)
self.checkbox.grid(row=row, column=0, columnspan=columns, sticky="w")
return 1
def validate(self):
return True
def apply(self):
if self.enabled:
self.cfg[self.key] = self.value.get()
else:
self.cfg[self.key] = None
def enable(self):
self.enabled = True
self.checkbox.state(["!disabled"])
def disable(self):
self.enabled = False
self.checkbox.state(["disabled"])
class MyEntry(object):
"""
Base class for labeled Entry fields.
*text* is the Label/error box text.
"""
def __init__(self, text, cfg, key, optional=False):
self.entry = None
self.enabled = True
self.value = tk.StringVar()
self.text = text
self.cfg = cfg
self.key = key
self.optional = optional
try:
if self.cfg[self.key] is None:
self.value.set("")
else:
self.value.set(self.cfg[self.key])
except KeyError:
self.value.set("")
def body(self, master, row, columns=DEFAULT_COLUMNS, **kwargs):
"""
Place the required elements using the grid layout method.
Returns the number of rows taken by this element.
"""
label = ttk.Label(master, text=self.text)
label.grid(row=row, column=0, columnspan=1, sticky="e")
self.entry = ttk.Entry(master, textvariable=self.value)
self.entry.grid(row=row, column=1, columnspan=columns - 1, sticky="ew")
return 1
def validate(self):
"""
Validates the input. Returns ``True`` unless the field is blank and
*optional* is ``False``.
"""
if not self.enabled:
return True
elif not self.optional and len(self.value.get()) == 0:
tkMessageBox.showwarning("", "{} not specified.".format(self.text))
return False
else:
return True
def apply(self):
if self.enabled and len(self.value.get()) > 0:
self.cfg[self.key] = self.value.get()
else:
self.cfg[self.key] = None
def enable(self):
self.enabled = True
self.entry.state(["!disabled"])
def disable(self):
self.enabled = False
self.entry.state(["disabled"])
class FileEntry(MyEntry):
"""
Creates a labeled Entry field for a file or directory.
*text* is the Label/error box text.
*directory* is ``True`` if selecting a directory (instead of a file).
*extensions* is a list of valid file endings
"""
def __init__(
self, text, cfg, key, optional=False, directory=False, extensions=None
):
MyEntry.__init__(self, text, cfg, key, optional)
self.choose = None
self.clear = None
self.directory = directory
if extensions is not None:
self.extensions = [x.lower() for x in extensions]
else:
self.extensions = None
def body(self, master, row, columns=DEFAULT_COLUMNS, **kwargs):
"""
Place the required elements using the grid layout method.
Returns the number of rows taken by this element.
"""
label = ttk.Label(master, text=self.text)
label.grid(row=row, column=0, columnspan=1, sticky="e")
self.entry = ttk.Entry(master, textvariable=self.value)
self.entry.grid(row=row, column=1, columnspan=columns - 1, sticky="ew")
if self.directory:
self.choose = ttk.Button(
master,
text="Choose...",
command=lambda: self.value.set(tkFileDialog.askdirectory()),
)
else:
self.choose = ttk.Button(
master,
text="Choose...",
command=lambda: self.value.set(tkFileDialog.askopenfilename()),
)
self.choose.grid(row=row + 1, column=1, sticky="w")
if self.optional:
self.clear = ttk.Button(
master, text="Clear", command=lambda: self.value.set("")
)
self.clear.grid(row=row + 1, column=2, sticky="e")
return 2
def validate(self):
if not self.enabled:
return True
elif len(self.value.get()) == 0:
if not self.optional:
tkMessageBox.showwarning("", "{} not specified.".format(self.text))
return False
else:
return True
else:
if os.path.exists(self.value.get()):
if self.extensions is not None:
if any(
self.value.get().lower().endswith(x) for x in self.extensions
):
return True
else:
tkMessageBox.showwarning(
"", "Invalid file extension " "for {}.".format(self.text)
)
return False
else: # no extension restriction
return True
else:
tkMessageBox.showwarning(
"", "{} file does not exist." "".format(self.text)
)
return False
def enable(self):
self.enabled = True
self.entry.state(["!disabled"])
self.choose.state(["!disabled"])
if self.optional:
self.clear.state(["!disabled"])
def disable(self):
self.enabled = False
self.entry.state(["disabled"])
self.choose.state(["disabled"])
if self.optional:
self.clear.state(["disabled"])
class StringEntry(MyEntry):
"""
Creates a labeled Entry field for a string.
*text* is the Label/error box text.
"""
def __init__(self, text, cfg, key, optional=False):
MyEntry.__init__(self, text, cfg, key, optional)
def body(self, master, row, columns=DEFAULT_COLUMNS, **kwargs):
"""
Place the required elements using the grid layout method.
Returns the number of rows taken by this element.
"""
label = ttk.Label(master, text=self.text)
label.grid(row=row, column=0, columnspan=1, sticky="e")
self.entry = ttk.Entry(master, textvariable=self.value)
self.entry.grid(row=row, column=1, columnspan=columns - 1, sticky="ew")
return 1
class IntegerEntry(MyEntry):
"""
Creates a labeled Entry field for an integer.
*text* is the Label/error box text.
"""
def __init__(self, text, cfg, key, optional=False, minvalue=0):
MyEntry.__init__(self, text, cfg, key, optional)
self.minvalue = minvalue
def body(self, master, row, columns=DEFAULT_COLUMNS, width=4, left=False, **kwargs):
"""
Add the labeled entry to the Frame *master* using grid at *row*.
*width* controls the width of the Entry.
*left* is ``True`` if the Entry is to the left of the Label.
*columns* is the number of columns in *master*.
Returns the number of rows taken by this element.
"""
if left:
entry_column = 0
entry_sticky = "e"
entry_width = 1
label_column = 1
label_sticky = "w"
label_width = columns - 1
else:
entry_column = 1
entry_sticky = "w"
entry_width = columns - 1
label_column = 0
label_sticky = "e"
label_width = 1
label = ttk.Label(master, text=self.text)
label.grid(
row=row, column=label_column, columnspan=label_width, sticky=label_sticky
)
self.entry = ttk.Entry(master, textvariable=self.value, width=width)
self.entry.grid(
row=row, column=entry_column, columnspan=entry_width, sticky=entry_sticky
)
return 1
def validate(self):
"""
Returns ``True`` if the value entered validates; else ``False``.
If *self.optional* is ``True``, the field can be empty.
Checks the *self.minvalue* that was passed on creation.
"""
if not self.enabled:
return True
else:
try:
intvalue = int(self.value.get())
except ValueError:
if len(self.value.get()) == 0:
if not self.optional:
tkMessageBox.showwarning(
"", "{} not specified." "".format(self.text)
)
return False
else:
return True
else:
tkMessageBox.showwarning(
"", "{} is not an integer." "".format(self.text)
)
return False
else:
if intvalue < self.minvalue:
tkMessageBox.showwarning(
"",
"{} lower than minimum value "
"({}).".format(self.text, self.minvalue),
)
return False
else:
return True
def apply(self):
if self.enabled and len(self.value.get()) > 0:
self.cfg[self.key] = int(self.value.get())
else:
self.cfg[self.key] = None
| |
# -*- coding: utf-8 -*-
"""Test support of the various forms of tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from tabulate import tabulate
from common import assert_equal, assert_in, assert_raises, SkipTest
def test_iterable_of_iterables():
"Input: an interable of iterables."
ii = iter(map(lambda x: iter(x), [range(5), range(5,0,-1)]))
expected = "\n".join(
['- - - - -',
'0 1 2 3 4',
'5 4 3 2 1',
'- - - - -'])
result = tabulate(ii)
assert_equal(expected, result)
def test_iterable_of_iterables_headers():
"Input: an interable of iterables with headers."
ii = iter(map(lambda x: iter(x), [range(5), range(5,0,-1)]))
expected = "\n".join(
[' a b c d e',
'--- --- --- --- ---',
' 0 1 2 3 4',
' 5 4 3 2 1'])
result = tabulate(ii, "abcde")
assert_equal(expected, result)
def test_iterable_of_iterables_firstrow():
"Input: an interable of iterables with the first row as headers"
ii = iter(map(lambda x: iter(x), ["abcde", range(5), range(5,0,-1)]))
expected = "\n".join(
[' a b c d e',
'--- --- --- --- ---',
' 0 1 2 3 4',
' 5 4 3 2 1'])
result = tabulate(ii, "firstrow")
assert_equal(expected, result)
def test_list_of_lists():
"Input: a list of lists with headers."
ll = [["a","one",1],["b","two",None]]
expected = "\n".join([
' string number',
'-- -------- --------',
'a one 1',
'b two'])
result = tabulate(ll, headers=["string","number"])
assert_equal(expected, result)
def test_list_of_lists_firstrow():
"Input: a list of lists with the first row as headers."
ll = [["string","number"],["a","one",1],["b","two",None]]
expected = "\n".join([
' string number',
'-- -------- --------',
'a one 1',
'b two'])
result = tabulate(ll, headers="firstrow")
assert_equal(expected, result)
def test_list_of_lists_keys():
"Input: a list of lists with column indices as headers."
ll = [["a","one",1],["b","two",None]]
expected = "\n".join([
'0 1 2',
'--- --- ---',
'a one 1',
'b two'])
result = tabulate(ll, headers="keys")
assert_equal(expected, result)
def test_dict_like():
"Input: a dict of iterables with keys as headers."
# columns should be padded with None, keys should be used as headers
dd = {"a": range(3), "b": range(101,105)}
# keys' order (hence columns' order) is not deterministic in Python 3
# => we have to consider both possible results as valid
expected1 = "\n".join([
' a b',
'--- ---',
' 0 101',
' 1 102',
' 2 103',
' 104'])
expected2 = "\n".join([
' b a',
'--- ---',
'101 0',
'102 1',
'103 2',
'104'])
result = tabulate(dd, "keys")
print("Keys' order: %s" % dd.keys())
assert_in(result, [expected1, expected2])
def test_numpy_2d():
"Input: a 2D NumPy array with headers."
try:
import numpy
na = (numpy.arange(1,10, dtype=numpy.float32).reshape((3,3))**3)*0.5
expected = "\n".join([
' a b c',
'----- ----- -----',
' 0.5 4 13.5',
' 32 62.5 108',
'171.5 256 364.5'])
result = tabulate(na, ["a", "b", "c"])
assert_equal(expected, result)
except ImportError:
print("test_numpy_2d is skipped")
raise SkipTest() # this test is optional
def test_numpy_2d_firstrow():
"Input: a 2D NumPy array with the first row as headers."
try:
import numpy
na = (numpy.arange(1,10, dtype=numpy.int32).reshape((3,3))**3)
expected = "\n".join([
' 1 8 27',
'--- --- ----',
' 64 125 216',
'343 512 729'])
result = tabulate(na, headers="firstrow")
assert_equal(expected, result)
except ImportError:
print("test_numpy_2d_firstrow is skipped")
raise SkipTest() # this test is optional
def test_numpy_2d_keys():
"Input: a 2D NumPy array with column indices as headers."
try:
import numpy
na = (numpy.arange(1,10, dtype=numpy.float32).reshape((3,3))**3)*0.5
expected = "\n".join([
' 0 1 2',
'----- ----- -----',
' 0.5 4 13.5',
' 32 62.5 108',
'171.5 256 364.5'])
result = tabulate(na, headers="keys")
assert_equal(expected, result)
except ImportError:
print("test_numpy_2d_keys is skipped")
raise SkipTest() # this test is optional
def test_numpy_record_array():
"Input: a 2D NumPy record array without header."
try:
import numpy
na = numpy.asarray([("Alice", 23, 169.5),
("Bob", 27, 175.0)],
dtype={"names":["name","age","height"],
"formats":["a32","uint8","float32"]})
expected = "\n".join([
"----- -- -----",
"Alice 23 169.5",
"Bob 27 175",
"----- -- -----" ])
result = tabulate(na)
assert_equal(expected, result)
except ImportError:
print("test_numpy_2d_keys is skipped")
raise SkipTest() # this test is optional
def test_numpy_record_array_keys():
"Input: a 2D NumPy record array with column names as headers."
try:
import numpy
na = numpy.asarray([("Alice", 23, 169.5),
("Bob", 27, 175.0)],
dtype={"names":["name","age","height"],
"formats":["a32","uint8","float32"]})
expected = "\n".join([
"name age height",
"------ ----- --------",
"Alice 23 169.5",
"Bob 27 175" ])
result = tabulate(na, headers="keys")
assert_equal(expected, result)
except ImportError:
print("test_numpy_2d_keys is skipped")
raise SkipTest() # this test is optional
def test_numpy_record_array_headers():
"Input: a 2D NumPy record array with user-supplied headers."
try:
import numpy
na = numpy.asarray([("Alice", 23, 169.5),
("Bob", 27, 175.0)],
dtype={"names":["name","age","height"],
"formats":["a32","uint8","float32"]})
expected = "\n".join([
"person years cm",
"-------- ------- -----",
"Alice 23 169.5",
"Bob 27 175" ])
result = tabulate(na, headers=["person", "years", "cm"])
assert_equal(expected, result)
except ImportError:
print("test_numpy_2d_keys is skipped")
raise SkipTest() # this test is optional
def test_pandas():
"Input: a Pandas DataFrame."
try:
import pandas
df = pandas.DataFrame([["one",1],["two",None]], index=["a","b"])
expected = "\n".join([
' string number',
'-- -------- --------',
'a one 1',
'b two nan'])
result = tabulate(df, headers=["string", "number"])
assert_equal(expected, result)
except ImportError:
print("test_pandas is skipped")
raise SkipTest() # this test is optional
def test_pandas_firstrow():
"Input: a Pandas DataFrame with the first row as headers."
try:
import pandas
df = pandas.DataFrame([["one",1],["two",None]],
columns=["string","number"],
index=["a","b"])
expected = "\n".join([
'a one 1.0',
'--- ----- -----',
'b two nan'])
result = tabulate(df, headers="firstrow")
assert_equal(expected, result)
except ImportError:
print("test_pandas_firstrow is skipped")
raise SkipTest() # this test is optional
def test_pandas_keys():
"Input: a Pandas DataFrame with keys as headers."
try:
import pandas
df = pandas.DataFrame([["one",1],["two",None]],
columns=["string","number"],
index=["a","b"])
expected = "\n".join(
[' string number',
'-- -------- --------',
'a one 1',
'b two nan'])
result = tabulate(df, headers="keys")
assert_equal(expected, result)
except ImportError:
print("test_pandas_keys is skipped")
raise SkipTest() # this test is optional
def test_sqlite3():
"Input: an sqlite3 cursor"
try:
import sqlite3
conn = sqlite3.connect(':memory:')
cursor = conn.cursor()
cursor.execute('CREATE TABLE people (name, age, height)')
for values in [
("Alice", 23, 169.5),
("Bob", 27, 175.0)]:
cursor.execute('INSERT INTO people VALUES (?, ?, ?)', values)
cursor.execute('SELECT name, age, height FROM people ORDER BY name')
result = tabulate(cursor, headers=["whom", "how old", "how tall"])
expected = """\
whom how old how tall
------ --------- ----------
Alice 23 169.5
Bob 27 175"""
assert_equal(expected, result)
except ImportError:
print("test_sqlite3 is skipped")
raise SkipTest() # this test is optional
def test_sqlite3_keys():
"Input: an sqlite3 cursor with keys as headers"
try:
import sqlite3
conn = sqlite3.connect(':memory:')
cursor = conn.cursor()
cursor.execute('CREATE TABLE people (name, age, height)')
for values in [
("Alice", 23, 169.5),
("Bob", 27, 175.0)]:
cursor.execute('INSERT INTO people VALUES (?, ?, ?)', values)
cursor.execute('SELECT name "whom", age "how old", height "how tall" FROM people ORDER BY name')
result = tabulate(cursor, headers="keys")
expected = """\
whom how old how tall
------ --------- ----------
Alice 23 169.5
Bob 27 175"""
assert_equal(expected, result)
except ImportError:
print("test_sqlite3_keys is skipped")
raise SkipTest() # this test is optional
def test_list_of_namedtuples():
"Input: a list of named tuples with field names as headers."
from collections import namedtuple
NT = namedtuple("NT", ['foo', 'bar'])
lt = [NT(1,2), NT(3,4)]
expected = "\n".join([
'- -',
'1 2',
'3 4',
'- -'])
result = tabulate(lt)
assert_equal(expected, result)
def test_list_of_namedtuples_keys():
"Input: a list of named tuples with field names as headers."
from collections import namedtuple
NT = namedtuple("NT", ['foo', 'bar'])
lt = [NT(1,2), NT(3,4)]
expected = "\n".join([
' foo bar',
'----- -----',
' 1 2',
' 3 4'])
result = tabulate(lt, headers="keys")
assert_equal(expected, result)
def test_list_of_dicts():
"Input: a list of dictionaries."
lod = [{'foo' : 1, 'bar' : 2}, {'foo' : 3, 'bar' : 4}]
expected1 = "\n".join([
'- -',
'1 2',
'3 4',
'- -'])
expected2 = "\n".join([
'- -',
'2 1',
'4 3',
'- -'])
result = tabulate(lod)
assert_in(result, [expected1, expected2])
def test_list_of_dicts_keys():
"Input: a list of dictionaries, with keys as headers."
lod = [{'foo' : 1, 'bar' : 2}, {'foo' : 3, 'bar' : 4}]
expected1 = "\n".join([
' foo bar',
'----- -----',
' 1 2',
' 3 4'])
expected2 = "\n".join([
' bar foo',
'----- -----',
' 2 1',
' 4 3'])
result = tabulate(lod, headers="keys")
assert_in(result, [expected1, expected2])
def test_list_of_dicts_with_missing_keys():
"Input: a list of dictionaries, with missing keys."
lod = [{"foo": 1}, {"bar": 2}, {"foo":4, "baz": 3}]
expected = "\n".join([
' foo bar baz',
'----- ----- -----',
' 1',
' 2',
' 4 3'])
result = tabulate(lod, headers="keys")
assert_equal(expected, result)
def test_list_of_dicts_firstrow():
"Input: a list of dictionaries, with the first dict as headers."
lod = [{'foo' : "FOO", 'bar' : "BAR"}, {'foo' : 3, 'bar': 4, 'baz': 5}]
# if some key is missing in the first dict, use the key name instead
expected1 = "\n".join([
' FOO BAR baz',
'----- ----- -----',
' 3 4 5'])
expected2 = "\n".join([
' BAR FOO baz',
'----- ----- -----',
' 4 3 5'])
result = tabulate(lod, headers="firstrow")
assert_in(result, [expected1, expected2])
def test_list_of_dicts_with_dict_of_headers():
"Input: a dict of user headers for a list of dicts (issue #23)"
table = [{"letters": "ABCDE", "digits": 12345}]
headers = {"digits": "DIGITS", "letters": "LETTERS"}
expected1 = "\n".join([
' DIGITS LETTERS',
'-------- ---------',
' 12345 ABCDE'])
expected2 = "\n".join([
'LETTERS DIGITS',
'--------- --------',
'ABCDE 12345'])
result = tabulate(table, headers=headers)
assert_in(result, [expected1, expected2])
def test_list_of_dicts_with_list_of_headers():
"Input: ValueError on a list of headers with a list of dicts (issue #23)"
table = [{"letters": "ABCDE", "digits": 12345}]
headers = ["DIGITS", "LETTERS"]
with assert_raises(ValueError):
tabulate(table, headers=headers)
def test_py27orlater_list_of_ordereddicts():
"Input: a list of OrderedDicts."
from collections import OrderedDict
od = OrderedDict([('b', 1), ('a', 2)])
lod = [od, od]
expected = "\n".join([
' b a',
'--- ---',
' 1 2',
' 1 2'])
result = tabulate(lod, headers="keys")
assert_equal(expected, result)
| |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import shutil
import unittest
import imath
import random
import six
import IECore
import IECoreImage
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class OpenImageIOReaderTest( GafferImageTest.ImageTestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
offsetDataWindowFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" )
negativeDataWindowFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checkerWithNegativeDataWindow.200x150.exr" )
negativeDisplayWindowFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/negativeDisplayWindow.exr" )
circlesExrFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles.exr" )
circlesJpgFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles.jpg" )
alignmentTestSourceFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/colorbars_half_max.exr" )
multipartFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/multipart.exr" )
unsupportedMultipartFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/unsupportedMultipart.exr" )
def testInternalImageSpaceConversion( self ) :
r = IECore.Reader.create( self.negativeDataWindowFileName )
image = r.read()
exrDisplayWindow = image.displayWindow
exrDataWindow = image.dataWindow
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.negativeDataWindowFileName )
gafferFormat = n["out"]["format"].getValue()
self.assertEqual(
gafferFormat.toEXRSpace( gafferFormat.getDisplayWindow() ),
exrDisplayWindow,
)
self.assertEqual(
gafferFormat.toEXRSpace( n["out"]["dataWindow"].getValue() ),
exrDataWindow,
)
def test( self ) :
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.fileName )
self.assertEqual( n["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 150 ) ) )
self.assertEqual( n["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 150 ) ) )
expectedMetadata = IECore.CompoundData( {
"oiio:ColorSpace" : IECore.StringData( 'Linear' ),
"compression" : IECore.StringData( 'zips' ),
"PixelAspectRatio" : IECore.FloatData( 1 ),
"screenWindowCenter" : IECore.V2fData( imath.V2f( 0, 0 ) ),
"screenWindowWidth" : IECore.FloatData( 1 ),
"fileFormat" : IECore.StringData( "openexr" ),
"dataType" : IECore.StringData( "float" ),
} )
self.assertEqual( n["out"]["metadata"].getValue(), expectedMetadata )
channelNames = n["out"]["channelNames"].getValue()
self.assertIsInstance( channelNames, IECore.StringVectorData )
self.assertIn( "R", channelNames )
self.assertIn( "G", channelNames )
self.assertIn( "B", channelNames )
self.assertIn( "A", channelNames )
image = GafferImage.ImageAlgo.image( n["out"] )
self.assertEqual( image.blindData(), IECore.CompoundData( dict(expectedMetadata) ) )
image2 = IECore.Reader.create( self.fileName ).read()
image.blindData().clear()
image2.blindData().clear()
self.assertEqual( image, image2 )
def testNegativeDisplayWindowRead( self ) :
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.negativeDisplayWindowFileName )
f = n["out"]["format"].getValue()
d = n["out"]["dataWindow"].getValue()
self.assertEqual( f.getDisplayWindow(), imath.Box2i( imath.V2i( -5, -5 ), imath.V2i( 21, 21 ) ) )
self.assertEqual( d, imath.Box2i( imath.V2i( 2, -14 ), imath.V2i( 36, 20 ) ) )
expectedImage = IECore.Reader.create( self.negativeDisplayWindowFileName ).read()
outImage = GafferImage.ImageAlgo.image( n["out"] )
expectedImage.blindData().clear()
outImage.blindData().clear()
self.assertEqual( expectedImage, outImage )
def testNegativeDataWindow( self ) :
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.negativeDataWindowFileName )
self.assertEqual( n["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( -25, -30 ), imath.V2i( 175, 120 ) ) )
self.assertEqual( n["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 150 ) ) )
channelNames = n["out"]["channelNames"].getValue()
self.assertIsInstance( channelNames, IECore.StringVectorData )
self.assertIn( "R", channelNames )
self.assertIn( "G", channelNames )
self.assertIn( "B", channelNames )
image = GafferImage.ImageAlgo.image( n["out"] )
image2 = IECore.Reader.create( self.negativeDataWindowFileName ).read()
op = IECoreImage.ImageDiffOp()
res = op(
imageA = image,
imageB = image2
)
self.assertFalse( res.value )
def testTileSize( self ) :
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.fileName )
tile = n["out"].channelData( "R", imath.V2i( 0 ) )
self.assertEqual( len( tile ), GafferImage.ImagePlug().tileSize() **2 )
def testUnspecifiedFilename( self ) :
n = GafferImage.OpenImageIOReader()
n["out"]["channelNames"].getValue()
n["out"].channelData( "R", imath.V2i( 0 ) )
def testChannelDataHashes( self ) :
# Test that two tiles within the same image have different hashes.
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.fileName )
h1 = n["out"].channelData( "R", imath.V2i( 0 ) ).hash()
h2 = n["out"].channelData( "R", imath.V2i( GafferImage.ImagePlug().tileSize() ) ).hash()
self.assertNotEqual( h1, h2 )
def testDisabledChannelDataHashes( self ) :
# Test that two tiles within the same image have the same hash when disabled.
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.fileName )
n["enabled"].setValue( False )
h1 = n["out"].channelData( "R", imath.V2i( 0 ) ).hash()
h2 = n["out"].channelData( "R", imath.V2i( GafferImage.ImagePlug().tileSize() ) ).hash()
self.assertEqual( h1, h2 )
def testOffsetDataWindowOrigin( self ) :
n = GafferImage.OpenImageIOReader()
n["fileName"].setValue( self.offsetDataWindowFileName )
image = GafferImage.ImageAlgo.image( n["out"] )
image2 = IECore.Reader.create( self.offsetDataWindowFileName ).read()
image.blindData().clear()
image2.blindData().clear()
self.assertEqual( image, image2 )
def testJpgRead( self ) :
exrReader = GafferImage.OpenImageIOReader()
exrReader["fileName"].setValue( self.circlesExrFileName )
jpgReader = GafferImage.OpenImageIOReader()
jpgReader["fileName"].setValue( self.circlesJpgFileName )
jpgOCIO = GafferImage.ColorSpace()
jpgOCIO["in"].setInput( jpgReader["out"] )
jpgOCIO["inputSpace"].setValue( "sRGB" )
jpgOCIO["outputSpace"].setValue( "linear" )
self.assertImagesEqual( exrReader["out"], jpgOCIO["out"], ignoreMetadata = True, maxDifference = 0.001 )
def testSupportedExtensions( self ) :
e = GafferImage.OpenImageIOReader.supportedExtensions()
self.assertTrue( "exr" in e )
self.assertTrue( "jpg" in e )
self.assertTrue( "tif" in e )
self.assertTrue( "png" in e )
self.assertTrue( "cin" in e )
self.assertTrue( "dpx" in e )
def testFileRefresh( self ) :
testFile = self.temporaryDirectory() + "/refresh.exr"
shutil.copyfile( self.fileName, testFile )
reader = GafferImage.OpenImageIOReader()
reader["fileName"].setValue( testFile )
image1 = GafferImage.ImageAlgo.image( reader["out"] )
# even though we've change the image on disk, gaffer will
# still have the old one in its cache.
shutil.copyfile( self.offsetDataWindowFileName, testFile )
self.assertEqual( GafferImage.ImageAlgo.image( reader["out"] ), image1 )
# until we force a refresh
reader["refreshCount"].setValue( reader["refreshCount"].getValue() + 1 )
self.assertNotEqual( GafferImage.ImageAlgo.image( reader["out"] ), image1 )
def testNonexistentFiles( self ) :
reader = GafferImage.OpenImageIOReader()
reader["fileName"].setValue( "wellIDontExist.exr" )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["format"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["dataWindow"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["metadata"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["channelNames"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"].channelData, "R", imath.V2i( 0 ) )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
def testAvailableFrames( self ) :
testSequence = IECore.FileSequence( self.temporaryDirectory() + "/incompleteSequence.####.exr" )
shutil.copyfile( self.fileName, testSequence.fileNameForFrame( 1 ) )
shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 3 ) )
reader = GafferImage.OpenImageIOReader()
reader["fileName"].setValue( testSequence.fileName )
self.assertEqual( reader["availableFrames"].getValue(), IECore.IntVectorData( [ 1, 3 ] ) )
# it doesn't update until we refresh
shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 5 ) )
self.assertEqual( reader["availableFrames"].getValue(), IECore.IntVectorData( [ 1, 3 ] ) )
reader["refreshCount"].setValue( reader["refreshCount"].getValue() + 1 )
self.assertEqual( reader["availableFrames"].getValue(), IECore.IntVectorData( [ 1, 3, 5 ] ) )
# explicit file paths aren't considered a sequence
reader["fileName"].setValue( self.fileName )
self.assertEqual( reader["availableFrames"].getValue(), IECore.IntVectorData( [] ) )
reader["fileName"].setValue( testSequence.fileNameForFrame( 1 ) )
self.assertEqual( reader["availableFrames"].getValue(), IECore.IntVectorData( [] ) )
def testMissingFrameMode( self ) :
testSequence = IECore.FileSequence( self.temporaryDirectory() + "/incompleteSequence.####.exr" )
shutil.copyfile( self.fileName, testSequence.fileNameForFrame( 1 ) )
shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 3 ) )
reader = GafferImage.OpenImageIOReader()
reader["fileName"].setValue( testSequence.fileName )
context = Gaffer.Context()
# get frame 1 data for comparison
context.setFrame( 1 )
with context :
f1Image = GafferImage.ImageAlgo.image( reader["out"] )
f1Format = reader["out"]["format"].getValue()
f1DataWindow = reader["out"]["dataWindow"].getValue()
f1Metadata = reader["out"]["metadata"].getValue()
f1ChannelNames = reader["out"]["channelNames"].getValue()
f1Tile = reader["out"].channelData( "R", imath.V2i( 0 ) )
# make sure the tile we're comparing isn't black
# so we can tell if MissingFrameMode::Black is working.
blackTile = IECore.FloatVectorData( [ 0 ] * GafferImage.ImagePlug.tileSize() * GafferImage.ImagePlug.tileSize() )
self.assertNotEqual( f1Tile, blackTile )
# set to a missing frame
context.setFrame( 2 )
# everything throws
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Error )
with context :
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["format"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["dataWindow"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["metadata"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["channelNames"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"].channelData, "R", imath.V2i( 0 ) )
# everything matches frame 1
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
self.assertEqual( GafferImage.ImageAlgo.image( reader["out"] ), f1Image )
self.assertEqual( reader["out"]["format"].getValue(), f1Format )
self.assertEqual( reader["out"]["dataWindow"].getValue(), f1DataWindow )
self.assertEqual( reader["out"]["metadata"].getValue(), f1Metadata )
self.assertEqual( reader["out"]["channelNames"].getValue(), f1ChannelNames )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), f1Tile )
# the windows match frame 1, but everything else is default
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
self.assertNotEqual( GafferImage.ImageAlgo.image( reader["out"] ), f1Image )
self.assertEqual( reader["out"]["format"].getValue(), f1Format )
self.assertEqual( reader["out"]["dataWindow"].getValue(), reader["out"]["dataWindow"].defaultValue() )
self.assertEqual( reader["out"]["metadata"].getValue(), reader["out"]["metadata"].defaultValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), reader["out"]["channelNames"].defaultValue() )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), blackTile )
# get frame 3 data for comparison
context.setFrame( 3 )
with context :
f3Image = GafferImage.ImageAlgo.image( reader["out"] )
f3Format = reader["out"]["format"].getValue()
f3DataWindow = reader["out"]["dataWindow"].getValue()
f3Metadata = reader["out"]["metadata"].getValue()
f3ChannelNames = reader["out"]["channelNames"].getValue()
f3Tile = reader["out"].channelData( "R", imath.V2i( 0 ) )
# set to a different missing frame
context.setFrame( 4 )
# everything matches frame 3
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
self.assertNotEqual( GafferImage.ImageAlgo.image( reader["out"] ), f1Image )
self.assertNotEqual( reader["out"]["format"].getValue(), f1Format )
self.assertNotEqual( reader["out"]["dataWindow"].getValue(), f1DataWindow )
self.assertNotEqual( reader["out"]["metadata"].getValue(), f1Metadata )
# same channel names is fine
self.assertEqual( reader["out"]["channelNames"].getValue(), f1ChannelNames )
self.assertNotEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), f1Tile )
self.assertEqual( GafferImage.ImageAlgo.image( reader["out"] ), f3Image )
self.assertEqual( reader["out"]["format"].getValue(), f3Format )
self.assertEqual( reader["out"]["dataWindow"].getValue(), f3DataWindow )
self.assertEqual( reader["out"]["metadata"].getValue(), f3Metadata )
self.assertEqual( reader["out"]["channelNames"].getValue(), f3ChannelNames )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), f3Tile )
# the windows match frame 3, but everything else is default
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
self.assertNotEqual( reader["out"]["format"].getValue(), f1Format )
self.assertEqual( reader["out"]["format"].getValue(), f3Format )
self.assertEqual( reader["out"]["dataWindow"].getValue(), reader["out"]["dataWindow"].defaultValue() )
self.assertEqual( reader["out"]["metadata"].getValue(), reader["out"]["metadata"].defaultValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), reader["out"]["channelNames"].defaultValue() )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), blackTile )
# set to a missing frame before the start of the sequence
context.setFrame( 0 )
# everything matches frame 1
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
self.assertEqual( GafferImage.ImageAlgo.image( reader["out"] ), f1Image )
self.assertEqual( reader["out"]["format"].getValue(), f1Format )
self.assertEqual( reader["out"]["dataWindow"].getValue(), f1DataWindow )
self.assertEqual( reader["out"]["metadata"].getValue(), f1Metadata )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), f1Tile )
# the windows match frame 1, but everything else is default
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
self.assertEqual( reader["out"]["format"].getValue(), f1Format )
self.assertEqual( reader["out"]["dataWindow"].getValue(), reader["out"]["dataWindow"].defaultValue() )
self.assertEqual( reader["out"]["metadata"].getValue(), reader["out"]["metadata"].defaultValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), reader["out"]["channelNames"].defaultValue() )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), blackTile )
# explicit fileNames do not support MissingFrameMode
reader["fileName"].setValue( testSequence.fileNameForFrame( 0 ) )
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["format"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["dataWindow"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["metadata"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["channelNames"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"].channelData, "R", imath.V2i( 0 ) )
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["format"].getValue )
self.assertEqual( reader["out"]["dataWindow"].getValue(), reader["out"]["dataWindow"].defaultValue() )
self.assertEqual( reader["out"]["metadata"].getValue(), reader["out"]["metadata"].defaultValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), reader["out"]["channelNames"].defaultValue() )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), blackTile )
def testHashesFrame( self ) :
# the fileName excludes FrameSubstitutions, but
# the internal implementation can still rely on
# frame, so we need to check that the output
# still responds to frame changes.
testSequence = IECore.FileSequence( self.temporaryDirectory() + "/incompleteSequence.####.exr" )
shutil.copyfile( self.fileName, testSequence.fileNameForFrame( 0 ) )
shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 1 ) )
reader = GafferImage.OpenImageIOReader()
reader["fileName"].setValue( testSequence.fileName )
context = Gaffer.Context()
# get frame 0 data for comparison
context.setFrame( 0 )
with context :
sequenceMetadataHash = reader["out"]["metadata"].hash()
sequenceMetadataValue = reader["out"]["metadata"].getValue()
context.setFrame( 1 )
with context :
self.assertNotEqual( reader["out"]["metadata"].hash(), sequenceMetadataHash )
self.assertNotEqual( reader["out"]["metadata"].getValue(), sequenceMetadataValue )
# but when we set an explicit fileName,
# we no longer re-compute per frame.
reader["fileName"].setValue( testSequence.fileNameForFrame( 0 ) )
# get frame 0 data for comparison
context.setFrame( 0 )
with context :
explicitMetadataHash = reader["out"]["metadata"].hash()
self.assertNotEqual( explicitMetadataHash, sequenceMetadataHash )
self.assertEqual( reader["out"]["metadata"].getValue(), sequenceMetadataValue )
context.setFrame( 1 )
with context :
self.assertNotEqual( reader["out"]["metadata"].hash(), sequenceMetadataHash )
self.assertEqual( reader["out"]["metadata"].hash(), explicitMetadataHash )
self.assertEqual( reader["out"]["metadata"].getValue(), sequenceMetadataValue )
def testFileFormatMetadata( self ) :
r = GafferImage.OpenImageIOReader()
r["fileName"].setValue( self.circlesJpgFileName )
self.assertEqual( r["out"]["metadata"].getValue()["dataType"].value, "uint8" )
self.assertEqual( r["out"]["metadata"].getValue()["fileFormat"].value, "jpeg" )
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/rgb.100x100.dpx" )
self.assertEqual( r["out"]["metadata"].getValue()["dataType"].value, "uint10" )
self.assertEqual( r["out"]["metadata"].getValue()["fileFormat"].value, "dpx" )
def testOffsetAlignment( self ) :
# Test a bunch of different data window alignments on disk. This exercises code for reading
# weirdly aligned scanlines and partial tiles
tempFile = self.temporaryDirectory() + "/tempOffsetImage.exr"
r = GafferImage.OpenImageIOReader()
r["fileName"].setValue( self.alignmentTestSourceFileName )
offsetOut = GafferImage.Offset()
offsetOut["in"].setInput( r["out"] )
w = GafferImage.ImageWriter()
w["in"].setInput( offsetOut["out"] )
w["fileName"].setValue( tempFile )
rBack = GafferImage.OpenImageIOReader()
rBack["fileName"].setValue( tempFile )
offsetIn = GafferImage.Offset()
offsetIn["in"].setInput( rBack["out"] )
random.seed( 42 )
offsets = [ imath.V2i(x,y) for x in [-1,0,1] for y in [-1, 0, 1] ] + [
imath.V2i( random.randint( -32, 32 ), random.randint( -32, 32 ) ) for i in range( 10 ) ]
for mode in [ GafferImage.ImageWriter.Mode.Scanline, GafferImage.ImageWriter.Mode.Tile ]:
w['openexr']['mode'].setValue( mode )
for offset in offsets:
offsetOut['offset'].setValue( offset )
offsetIn['offset'].setValue( -offset )
w.execute()
rBack['refreshCount'].setValue( rBack['refreshCount'].getValue() + 1 )
self.assertImagesEqual( r["out"], offsetIn["out"], ignoreMetadata = True )
def testMultipartRead( self ) :
rgbReader = GafferImage.OpenImageIOReader()
rgbReader["fileName"].setValue( self.offsetDataWindowFileName )
compareDelete = GafferImage.DeleteChannels()
compareDelete["in"].setInput( rgbReader["out"] )
# This test multipart file contains a "rgb" subimage, an "rgba" subimage, and a "depth" subimage, with
# one channel named "Z" ( copied from the green channel of our reference image.
# It was created using this command:
# > oiiotool rgb.100x100.exr --attrib "oiio:subimagename" rgb -ch "R,G,B" rgb.100x100.exr --attrib "oiio:subimagename" rgba rgb.100x100.exr --attrib "oiio:subimagename" depth --ch "G" --chnames "Z" --siappendall -o multipart.exr
multipartReader = GafferImage.OpenImageIOReader()
multipartReader["fileName"].setValue( self.multipartFileName )
multipartShuffle = GafferImage.Shuffle()
multipartShuffle["in"].setInput( multipartReader["out"] )
multipartDelete = GafferImage.DeleteChannels()
multipartDelete["in"].setInput( multipartShuffle["out"] )
multipartDelete['channels'].setValue( "*.*" )
self.assertEqual( set( multipartReader["out"]["channelNames"].getValue() ),
set([ "rgba.R", "rgba.G", "rgba.B", "rgba.A", "rgb.R", "rgb.G", "rgb.B", "depth.Z" ])
)
multipartShuffle["channels"].clearChildren()
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "R", "rgba.R" ) )
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "G", "rgba.G" ) )
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "B", "rgba.B" ) )
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "A", "rgba.A" ) )
self.assertImagesEqual( compareDelete["out"], multipartDelete["out"], ignoreMetadata = True )
multipartShuffle["channels"].clearChildren()
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "R", "rgb.R" ) )
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "G", "rgb.G" ) )
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "B", "rgb.B" ) )
compareDelete['channels'].setValue( "A" )
self.assertImagesEqual( compareDelete["out"], multipartDelete["out"], ignoreMetadata = True )
multipartShuffle["channels"].clearChildren()
multipartShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "G", "depth.Z" ) )
compareDelete['channels'].setValue( "R B A" )
self.assertImagesEqual( compareDelete["out"], multipartDelete["out"], ignoreMetadata = True )
def testUnsupportedMultipartRead( self ) :
rgbReader = GafferImage.OpenImageIOReader()
rgbReader["fileName"].setValue( self.offsetDataWindowFileName )
compareShuffle = GafferImage.Shuffle()
compareShuffle["in"].setInput( rgbReader["out"] )
compareShuffle["channels"].clearChildren()
compareShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "rgba.R", "R" ) )
compareShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "rgba.G", "G" ) )
compareShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "rgba.B", "B" ) )
compareShuffle["channels"].addChild( GafferImage.Shuffle.ChannelPlug( "rgba.A", "A" ) )
compareDelete = GafferImage.DeleteChannels()
compareDelete["in"].setInput( compareShuffle["out"] )
compareDelete["channels"].setValue( "R G B A" )
# This test multipart file contains a "rgba" subimage, and a second subimage with a
# differing data window. The second part can currently not be loaded, because Gaffer images
# have a single data window for the whole image.
#
# In the future, should we union the data windows? Are subimages with differing data windows common?
# This would probably happen with stereo images, but we should probably put work into handling stereo
# images differently - with a context variable to control which eye we get, rather than loading everything
# as channels.
#
# It was created using this command:
# > oiiotool rgb.100x100.exr --attrib "oiio:subimagename" rgba checkerboard.100x100.exr --attrib "oiio:subimagename" fullDataWindow --siappendall -o unsupportedMultipart.exr
multipartReader = GafferImage.OpenImageIOReader()
multipartReader["fileName"].setValue( self.unsupportedMultipartFileName )
# When we compare to the single part comparison file, the image will come out the same, because
# the second part is ignored - and we should get a message about it being ignored
with IECore.CapturingMessageHandler() as mh :
self.assertImagesEqual( compareDelete["out"], multipartReader["out"], ignoreMetadata = True )
self.assertEqual( len( mh.messages ), 1 )
self.assertTrue( mh.messages[0].message.startswith( "Ignoring subimage 1 of " ) )
def testDefaultFormatHash( self ) :
r = GafferImage.OpenImageIOReader()
with Gaffer.Context() as c :
GafferImage.FormatPlug.setDefaultFormat( c, GafferImage.Format( 100, 200 ) )
h1 = r["out"].formatHash()
GafferImage.FormatPlug.setDefaultFormat( c, GafferImage.Format( 200, 300 ) )
h2 = r["out"].formatHash()
GafferImage.FormatPlug.setDefaultFormat( c, GafferImage.Format( 100, 300, 2.0 ) )
h3 = r["out"].formatHash()
GafferImage.FormatPlug.setDefaultFormat( c, GafferImage.Format( 100, 200 ) )
h4 = r["out"].formatHash()
self.assertNotEqual( h1, h2 )
self.assertNotEqual( h1, h3 )
self.assertNotEqual( h2, h3 )
self.assertEqual( h1, h4 )
def testOpenFilesLimit( self ) :
l = GafferImage.OpenImageIOReader.getOpenFilesLimit()
try :
GafferImage.OpenImageIOReader.setOpenFilesLimit( l + 1 )
self.assertEqual( GafferImage.OpenImageIOReader.getOpenFilesLimit(), l + 1 )
finally :
GafferImage.OpenImageIOReader.setOpenFilesLimit( l )
if __name__ == "__main__":
unittest.main()
| |
import datetime
from collections import defaultdict, Counter
from django.contrib.auth.decorators import user_passes_test
from django.db.models import Count, F
from django.contrib.auth.models import User
from django.shortcuts import render
from allauth.socialaccount.models import SocialAccount
from profiles.models import Subscription, Notification, UsageReport, Profile, KEY_TIERS
from utils.common import abbr_to_jid, sessions_with_bills, states
from utils.orgs import get_chambers_from_abbr
from dashboards.models import DataQualityReport
from openstates.data.models import LegislativeSession
def dqr_listing(request):
state_dqr_data = {}
for state in states:
try:
session = sessions_with_bills(abbr_to_jid(state.abbr))[0]
except KeyError:
continue
dashboards = list(
DataQualityReport.objects.filter(session=session).order_by("chamber")
)
session_name = session.name
# if there are two, lower is first (b/c of ordering above), otherwise figure it out
if len(dashboards) == 2:
lower_dashboard, upper_dashboard = dashboards
elif len(dashboards) == 1:
if dashboards[0].chamber == "lower":
lower_dashboard = dashboards[0]
upper_dashboard = None
else:
upper_dashboard = dashboards[0]
lower_dashboard = None
state_dqr_data[state.abbr.lower()] = {
"state": state.name,
"session_name": session_name,
"lower_dashboard": lower_dashboard,
"upper_dashboard": upper_dashboard,
}
return render(
request, "dashboards/dqr_listing.html", {"state_dqr_data": state_dqr_data}
)
def dq_overview(request, state):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
dashboards = []
session = "Dashboards Not Generated Yet"
if all_sessions:
session = all_sessions[0]
dashboards = DataQualityReport.objects.filter(session=session)
chambers = get_chambers_from_abbr(state)
context = {
"state": state,
"chambers": chambers,
"session": session,
"all_sessions": all_sessions,
"dashboards": dashboards,
}
return render(request, "dashboards/dqr_page.html", context)
def dq_overview_session(request, state, session):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
session = LegislativeSession.objects.get(identifier=session, jurisdiction_id=jid)
dashboards = DataQualityReport.objects.filter(session=session)
chambers = get_chambers_from_abbr(state)
context = {
"state": state,
"chambers": chambers,
"session": session,
"all_sessions": all_sessions,
"dashboards": dashboards,
}
return render(request, "dashboards/dqr_page.html", context)
@user_passes_test(lambda u: u.is_superuser)
def user_overview(request):
bill_subscriptions = Subscription.objects.filter(bill_id__isnull=False).count()
query_subscriptions = Subscription.objects.exclude(query="").count()
users_by_day = list(
User.objects.extra(select={"day": "date(date_joined)"})
.values("day")
.annotate(Count("id"))
.order_by("day")
.filter(date_joined__gte="2020-01-01")
)
# get counts by each provider (ignore small % with multiple providers)
providers = list(
SocialAccount.objects.values(name=F("provider")).annotate(value=Count("id"))
)
# append the number of users that only have an OS-account
providers.append(
{
"name": "openstates",
"value": User.objects.exclude(
id__in=SocialAccount.objects.values("user_id")
).count(),
}
)
active_users = list(
User.objects.annotate(sub_count=Count("subscriptions"))
.values(
"id",
"profile__subscription_emails_html",
"profile__subscription_frequency",
"sub_count",
)
.filter(sub_count__gt=0)
)
# show what users prefer
frequencies = {"w": 0, "d": 0}
for user in active_users:
frequencies[user["profile__subscription_frequency"]] += 1
frequencies = [
{"name": "weekly", "value": frequencies["w"]},
{"name": "daily", "value": frequencies["d"]},
]
notifications_by_day = list(
Notification.objects.extra(select={"day": "date(sent)"})
.values("day")
.annotate(Count("id"))
.order_by("day")
)
context = {
"user_count": User.objects.count(),
"subscriber_count": len(active_users),
"bill_subscriptions": bill_subscriptions,
"query_subscriptions": query_subscriptions,
"users_by_day": users_by_day,
"providers": providers,
"notifications_by_day": notifications_by_day,
"email_frequencies": frequencies,
}
return render(request, "dashboards/users.html", {"context": context})
def _counter_to_chartdata(counter):
"""restructure data from a format like "date -> value -> num"
to "{date: date, value1: num1, value2: num2}"
for use in charts
"""
ret_data = []
for date, subcounter in counter.items():
cur_item = {"date": date}
for k, v in subcounter.items():
cur_item[k] = v
ret_data.append(cur_item)
return sorted(ret_data, key=lambda x: x["date"])
@user_passes_test(lambda u: u.is_superuser)
def api_overview(request):
endpoint_usage = defaultdict(lambda: defaultdict(int))
key_usage = defaultdict(lambda: defaultdict(int))
key_totals = Counter()
v2_key_totals = Counter()
v3_key_totals = Counter()
all_keys = set()
days = int(request.GET.get("days", 60))
since = datetime.datetime.today() - datetime.timedelta(days=days)
reports = list(
UsageReport.objects.filter(date__gte=since, calls__gt=0).select_related(
"profile__user"
)
)
for report in reports:
date = str(report.date)
key = f"{report.profile.api_key} - {report.profile.user.email}"
endpoint_usage[date][report.endpoint] += report.calls
key_usage[date][key] += report.calls
key_totals[key] += report.calls
if report.endpoint == "graphql":
v2_key_totals[key] += report.calls
elif report.endpoint == "v3":
v3_key_totals[key] += report.calls
all_keys.add(key)
context = {
"endpoint_usage": _counter_to_chartdata(endpoint_usage),
"key_usage": _counter_to_chartdata(key_usage),
"most_common": key_totals.most_common(),
"v2_totals": v2_key_totals,
"v3_totals": v3_key_totals,
"key_tiers": list(KEY_TIERS.values()),
"total_keys": Profile.objects.exclude(
api_tier__in=("inactive", "suspended")
).count(),
"active_keys": len(all_keys),
"days": days,
}
return render(request, "dashboards/api.html", {"context": context})
| |
from . import slicestyles as styles
from cadnano.virtualhelix import VirtualHelix
from cadnano.enum import Parity, StrandType
from cadnano.gui.controllers.itemcontrollers.virtualhelixitemcontroller import VirtualHelixItemController
import cadnano.util as util
from PyQt5.QtCore import QPointF, Qt, QRectF, QEvent
from PyQt5.QtGui import QBrush, QPen, QPainterPath, QColor, QPolygonF
from PyQt5.QtWidgets import QGraphicsItem, QGraphicsEllipseItem
from PyQt5.QtWidgets import QGraphicsSimpleTextItem, QGraphicsLineItem
class VirtualHelixItem(QGraphicsEllipseItem):
"""
The VirtualHelixItem is an individual circle that gets drawn in the SliceView
as a child of the PartItem. Taken as a group, many SliceHelix
instances make up the crossection of the DNAPart. Clicking on a SliceHelix
adds a VirtualHelix to the DNAPart. The SliceHelix then changes appearence
and paints its corresponding VirtualHelix number.
"""
# set up default, hover, and active drawing styles
_USE_BRUSH = QBrush(styles.ORANGE_FILL)
_USE_PEN = QPen(styles.ORANGE_STROKE, styles.SLICE_HELIX_STROKE_WIDTH)
_RADIUS = styles.SLICE_HELIX_RADIUS
_OUT_OF_SLICE_PEN = QPen(styles.LIGHT_ORANGE_STROKE,\
styles.SLICE_HELIX_STROKE_WIDTH)
_OUT_OF_SLICE_BRUSH = QBrush(styles.LIGHT_ORANGE_FILL)
_RECT = QRectF(0, 0, 2 * _RADIUS, 2 * _RADIUS)
_FONT = styles.SLICE_NUM_FONT
_ZVALUE = styles.ZSLICEHELIX+3
def __init__(self, model_virtual_helix, empty_helix_item):
"""
empty_helix_item is a EmptyHelixItem that will act as a QGraphicsItem parent
"""
super(VirtualHelixItem, self).__init__(parent=empty_helix_item)
self._virtual_helix = model_virtual_helix
self._empty_helix_item = empty_helix_item
self.hide()
# drawing related
self.isHovered = False
self.setAcceptHoverEvents(True)
# self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setZValue(self._ZVALUE)
self.lastMousePressAddedBases = False
self.setBrush(self._OUT_OF_SLICE_BRUSH)
self.setPen(self._OUT_OF_SLICE_PEN)
self.setRect(self._RECT)
# handle the label specific stuff
self._label = self.createLabel()
self.setNumber()
self._pen1, self._pen2 = (QPen(), QPen())
self.createArrows()
self._controller = VirtualHelixItemController(self, model_virtual_helix)
self.show()
# end def
### SIGNALS ###
### SLOTS ###
def virtualHelixNumberChangedSlot(self, virtualHelix):
"""
receives a signal containing a virtualHelix and the oldNumber
as a safety check
"""
self.setNumber()
# end def
def virtualHelixRemovedSlot(self, virtualHelix):
self._controller.disconnectSignals()
self._controller = None
self._empty_helix_item.setNotHovered()
self._virtual_helix = None
self._empty_helix_item = None
self.scene().removeItem(self._label)
self._label = None
self.scene().removeItem(self)
# end def
def strandAddedSlot(self, sender, strand):
pass
# end def
def createLabel(self):
label = QGraphicsSimpleTextItem("%d" % self._virtual_helix.number())
label.setFont(self._FONT)
label.setZValue(self._ZVALUE)
label.setParentItem(self)
return label
# end def
def createArrows(self):
rad = self._RADIUS
pen1 = self._pen1
pen2 = self._pen2
pen1.setWidth(3)
pen2.setWidth(3)
pen1.setBrush(Qt.gray)
pen2.setBrush(Qt.lightGray)
if self._virtual_helix.isEvenParity():
arrow1 = QGraphicsLineItem(rad, rad, 2*rad, rad, self)
arrow2 = QGraphicsLineItem(0, rad, rad, rad, self)
else:
arrow1 = QGraphicsLineItem(0, rad, rad, rad, self)
arrow2 = QGraphicsLineItem(rad, rad, 2*rad, rad, self)
arrow1.setTransformOriginPoint(rad, rad)
arrow2.setTransformOriginPoint(rad, rad)
arrow1.setZValue(400)
arrow2.setZValue(400)
arrow1.setPen(pen1)
arrow2.setPen(pen2)
self.arrow1 = arrow1
self.arrow2 = arrow2
self.arrow1.hide()
self.arrow2.hide()
# end def
def updateScafArrow(self, idx):
scafStrand = self._virtual_helix.scaf(idx)
if scafStrand:
scafStrandColor = QColor(scafStrand.oligo().color())
scafAlpha = 0.9 if scafStrand.hasXoverAt(idx) else 0.3
else:
scafStrandColor = QColor(Qt.gray)
scafAlpha = 0.1
scafStrandColor.setAlphaF(scafAlpha)
self._pen1.setBrush(scafStrandColor)
self.arrow1.setPen(self._pen1)
part = self.part()
tpb = part._TWIST_PER_BASE
angle = idx*tpb
# for some reason rotation is CW and not CCW with increasing angle
self.arrow1.setRotation(angle + part._TWIST_OFFSET)
def updateStapArrow(self, idx):
stapStrand = self._virtual_helix.stap(idx)
if stapStrand:
stapStrandColor = QColor(stapStrand.oligo().color())
stapAlpha = 0.9 if stapStrand.hasXoverAt(idx) else 0.3
else:
stapStrandColor = QColor(Qt.lightGray)
stapAlpha = 0.1
stapStrandColor.setAlphaF(stapAlpha)
self._pen2.setBrush(stapStrandColor)
self.arrow2.setPen(self._pen2)
part = self.part()
tpb = part._TWIST_PER_BASE
angle = idx*tpb
self.arrow2.setRotation(angle + part._TWIST_OFFSET)
# end def
def setNumber(self):
"""docstring for setNumber"""
vh = self._virtual_helix
num = vh.number()
label = self._label
radius = self._RADIUS
if num != None:
label.setText("%d" % num)
else:
return
y_val = radius / 3
if num < 10:
label.setPos(radius / 1.5, y_val)
elif num < 100:
label.setPos(radius / 3, y_val)
else: # _number >= 100
label.setPos(0, y_val)
b_rect = label.boundingRect()
posx = b_rect.width()/2
posy = b_rect.height()/2
label.setPos(radius-posx, radius-posy)
# end def
def part(self):
return self._empty_helix_item.part()
def virtualHelix(self):
return self._virtual_helix
# end def
def number(self):
return self.virtualHelix().number()
def setActiveSliceView(self, idx, has_scaf, has_stap):
if has_scaf:
self.setPen(self._USE_PEN)
self.setBrush(self._USE_BRUSH)
self.updateScafArrow(idx)
self.arrow1.show()
else:
self.setPen(self._OUT_OF_SLICE_PEN)
self.setBrush(self._OUT_OF_SLICE_BRUSH)
self.arrow1.hide()
if has_stap:
self.updateStapArrow(idx)
self.arrow2.show()
else:
self.arrow2.hide()
# end def
############################ User Interaction ############################
def sceneEvent(self, event):
"""Included for unit testing in order to grab events that are sent
via QGraphicsScene.sendEvent()."""
# if self._parent.sliceController.testRecorder:
# coord = (self._row, self._col)
# self._parent.sliceController.testRecorder.sliceSceneEvent(event, coord)
if event.type() == QEvent.MouseButtonPress:
self.mousePressEvent(event)
return True
elif event.type() == QEvent.MouseButtonRelease:
self.mouseReleaseEvent(event)
return True
elif event.type() == QEvent.MouseMove:
self.mouseMoveEvent(event)
return True
QGraphicsItem.sceneEvent(self, event)
return False
def hoverEnterEvent(self, event):
"""
If the selection is configured to always select
everything, we don't draw a focus ring around everything,
instead we only draw a focus ring around the hovered obj.
"""
# if self.selectAllBehavior():
# self.setSelected(True)
# forward the event to the empty_helix_item as well
self._empty_helix_item.hoverEnterEvent(event)
# end def
def hoverLeaveEvent(self, event):
# if self.selectAllBehavior():
# self.setSelected(False)
self._empty_helix_item.hoverEnterEvent(event)
# end def
# def mousePressEvent(self, event):
# action = self.decideAction(event.modifiers())
# action(self)
# self.dragSessionAction = action
#
# def mouseMoveEvent(self, event):
# parent = self._helixItem
# posInParent = parent.mapFromItem(self, QPointF(event.pos()))
# # Qt doesn't have any way to ask for graphicsitem(s) at a
# # particular position but it *can* do intersections, so we
# # just use those instead
# parent.probe.setPos(posInParent)
# for ci in parent.probe.collidingItems():
# if isinstance(ci, SliceHelix):
# self.dragSessionAction(ci)
# # end def
# def mouseReleaseEvent(self, event):
# self.part().needsFittingToView.emit()
# def decideAction(self, modifiers):
# """ On mouse press, an action (add scaffold at the active slice, add
# segment at the active slice, or create virtualhelix if missing) is
# decided upon and will be applied to all other slices happened across by
# mouseMoveEvent. The action is returned from this method in the form of a
# callable function."""
# vh = self.virtualHelix()
# if vh == None: return SliceHelix.addVHIfMissing
# idx = self.part().activeSlice()
# if modifiers & Qt.ShiftModifier:
# if vh.stap().get(idx) == None:
# return SliceHelix.addStapAtActiveSliceIfMissing
# else:
# return SliceHelix.nop
# if vh.scaf().get(idx) == None:
# return SliceHelix.addScafAtActiveSliceIfMissing
# return SliceHelix.nop
#
# def nop(self):
# pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.