repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
decvalts/iris
|
refs/heads/master
|
lib/iris/fileformats/cf.py
|
2
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides the capability to load netCDF files and interprete them
according to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
References:
[CF] NetCDF Climate and Forecast (CF) Metadata conventions, Version 1.5, October, 2010.
[NUG] NetCDF User's Guide, http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from abc import ABCMeta, abstractmethod
from collections import Iterable, MutableMapping
import os
import re
import warnings
import netCDF4
import numpy as np
import numpy.ma as ma
import iris.util
#
# CF parse pattern common to both formula terms and measure CF variables.
#
_CF_PARSE = re.compile(r'''
\s*
(?P<lhs>[\w_]+)
\s*:\s*
(?P<rhs>[\w_]+)
\s*
''', re.VERBOSE)
# NetCDF variable attributes handled by the netCDF4 module and
# therefore automatically classed as "used" attributes.
_CF_ATTRS_IGNORE = set(['_FillValue', 'add_offset', 'missing_value', 'scale_factor', ])
#: Supported dimensionless vertical coordinate reference surface/phemomenon
#: formula terms. Ref: [CF] Appendix D.
reference_terms = dict(atmosphere_sigma_coordinate=['ps'],
atmosphere_hybrid_sigma_pressure_coordinate=['ps'],
atmosphere_hybrid_height_coordinate=['orog'],
atmosphere_sleve_coordinate=['zsurf1', 'zsurf2'],
ocean_sigma_coordinate=['eta', 'depth'],
ocean_s_coordinate=['eta', 'depth'],
ocean_sigma_z_coordinate=['eta', 'depth'],
ocean_s_coordinate_g1=['eta', 'depth'],
ocean_s_coordinate_g2=['eta', 'depth'])
################################################################################
class CFVariable(six.with_metaclass(ABCMeta, object)):
"""Abstract base class wrapper for a CF-netCDF variable."""
#: Name of the netCDF variable attribute that identifies this
#: CF-netCDF variable.
cf_identity = None
def __init__(self, name, data):
# Accessing the list of netCDF attributes is surprisingly slow.
# Since it's used repeatedly, caching the list makes things
# quite a bit faster.
self._nc_attrs = data.ncattrs()
#: NetCDF variable name.
self.cf_name = name
#: NetCDF4 Variable data instance.
self.cf_data = data
#: Collection of CF-netCDF variables associated with this variable.
self.cf_group = None
#: CF-netCDF formula terms that his variable participates in.
self.cf_terms_by_root = {}
self.cf_attrs_reset()
@staticmethod
def _identify_common(variables, ignore, target):
if ignore is None:
ignore = []
if target is None:
target = variables
elif isinstance(target, six.string_types):
if target not in variables:
raise ValueError('Cannot identify unknown target CF-netCDF variable %r' % target)
target = {target: variables[target]}
else:
raise TypeError('Expect a target CF-netCDF variable name')
return (ignore, target)
@abstractmethod
def identify(self, variables, ignore=None, target=None, warn=True):
"""
Identify all variables that match the criterion for this CF-netCDF variable class.
Args:
* variables:
Dictionary of netCDF4.Variable instance by variable name.
Kwargs:
* ignore:
List of variable names to ignore.
* target:
Name of a single variable to check.
* warn:
Issue a warning if a missing variable is referenced.
Returns:
Dictionary of CFVariable instance by variable name.
"""
pass
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
result = set(self.dimensions).issubset(cf_variable.dimensions)
return result
def __eq__(self, other):
# CF variable names are unique.
return self.cf_name == other.cf_name
def __ne__(self, other):
# CF variable names are unique.
return self.cf_name != other.cf_name
def __hash__(self):
# CF variable names are unique.
return hash(self.cf_name)
def __getattr__(self, name):
# Accessing netCDF attributes is surprisingly slow. Since
# they're often read repeatedly, caching the values makes things
# quite a bit faster.
if name in self._nc_attrs:
self._cf_attrs.add(name)
value = getattr(self.cf_data, name)
setattr(self, name, value)
return value
def __getitem__(self, key):
return self.cf_data.__getitem__(key)
def __len__(self):
return self.cf_data.__len__()
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.cf_name, self.cf_data)
def cf_attrs(self):
"""Return a list of all attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr))
for attr in sorted(self._nc_attrs))
def cf_attrs_ignored(self):
"""Return a list of all ignored attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) & _CF_ATTRS_IGNORE))
def cf_attrs_used(self):
"""Return a list of all accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(self._cf_attrs))
def cf_attrs_unused(self):
"""Return a list of all non-accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) - self._cf_attrs))
def cf_attrs_reset(self):
"""Reset the history of accessed attribute names of the CF-netCDF variable."""
self._cf_attrs = set([item[0] for item in self.cf_attrs_ignored()])
def add_formula_term(self, root, term):
"""
Register the participation of this CF-netCDF variable in a CF-netCDF formula term.
Args:
* root (string):
The name of CF-netCDF variable that defines the CF-netCDF formula_terms attribute.
* term (string):
The associated term name of this variable in the formula_terms definition.
Returns:
None.
"""
self.cf_terms_by_root[root] = term
def has_formula_terms(self):
"""
Determine whether this CF-netCDF variable participates in a CF-netcdf formula term.
Returns:
Boolean.
"""
return bool(self.cf_terms_by_root)
class CFAncillaryDataVariable(CFVariable):
"""
A CF-netCDF ancillary data variable is a variable that provides metadata
about the individual values of another data variable.
Identified by the CF-netCDF variable attribute 'ancillary_variables'.
Ref: [CF] Section 3.4. Ancillary Data.
"""
cf_identity = 'ancillary_variables'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF ancillary data variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for ancillary data variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF ancillary data variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFAncillaryDataVariable(name, variables[name])
return result
class CFAuxiliaryCoordinateVariable(CFVariable):
"""
A CF-netCDF auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a CF-netCDF coordinate variable by definition.
There is no relationship between the name of a CF-netCDF auxiliary coordinate
variable and the name(s) of its dimension(s).
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFLabelVariable`.
Ref: [CF] Chapter 5. Coordinate Systems.
[CF] Section 6.2. Alternative Coordinates.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF auxiliary coordinate variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for auxiliary coordinate variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF auxiliary coordinate variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to non-string type i.e. not a CFLabelVariable.
if not np.issubdtype(variables[name].dtype, np.str):
result[name] = CFAuxiliaryCoordinateVariable(name, variables[name])
return result
class CFBoundaryVariable(CFVariable):
"""
A CF-netCDF boundary variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the boundary variable
provides a description of cell extent.
A CF-netCDF boundary variable will have one more dimension than its associated
CF-netCDF coordinate variable or CF-netCDF auxiliary coordinate variable.
Identified by the CF-netCDF variable attribute 'bounds'.
Ref: [CF] Section 7.1. Cell Boundaries.
"""
cf_identity = 'bounds'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF boundary variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a boundary variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF boundary variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFBoundaryVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the bounds extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFClimatologyVariable(CFVariable):
"""
A CF-netCDF climatology variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the climatology variable
provides a climatological description of cell extent.
A CF-netCDF climatology variable will have one more dimension than its associated
CF-netCDF coordinate variable.
Identified by the CF-netCDF variable attribute 'climatology'.
Ref: [CF] Section 7.4. Climatological Statistics
"""
cf_identity = 'climatology'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF climatology variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a climatology variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF climatology variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFClimatologyVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the climatology extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFCoordinateVariable(CFVariable):
"""
A CF-netCDF coordinate variable is a one-dimensional variable with the same name
as its dimension, and it is defined as a numeric data type with values that are
ordered monotonically. Missing values are not allowed in CF-netCDF coordinate
variables. Also see [NUG] Section 2.3.1.
Identified by the above criterion, there is no associated CF-netCDF variable
attribute.
Ref: [CF] 1.2. Terminology.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True, monotonic=False):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF coordinate variables.
for nc_var_name, nc_var in six.iteritems(target):
if nc_var_name in ignore:
continue
# String variables can't be coordinates
if np.issubdtype(nc_var.dtype, np.str):
continue
# Restrict to one-dimensional with name as dimension OR zero-dimensional scalar
if not ((nc_var.ndim == 1 and nc_var_name in nc_var.dimensions) or (nc_var.ndim == 0)):
continue
# Restrict to monotonic?
if monotonic:
data = nc_var[:]
# Gracefully fill a masked coordinate.
if ma.isMaskedArray(data):
data = ma.filled(data)
if nc_var.shape == () or nc_var.shape == (1,) or iris.util.monotonic(data):
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
else:
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
return result
class CFDataVariable(CFVariable):
"""
A CF-netCDF variable containing data pay-load that maps to an Iris :class:`iris.cube.Cube`.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
raise NotImplementedError
class _CFFormulaTermsVariable(CFVariable):
"""
A CF-netCDF formula terms variable corresponds to a term in a formula that
allows dimensional vertical coordinate values to be computed from dimensionless
vertical coordinate values and associated variables at specific grid points.
Identified by the CF-netCDF variable attribute 'formula_terms'.
Ref: [CF] Section 4.3.2. Dimensional Vertical Coordinate.
[CF] Appendix D. Dimensionless Vertical Coordinates.
"""
cf_identity = 'formula_terms'
def __init__(self, name, data, formula_root, formula_term):
CFVariable.__init__(self, name, data)
# Register the formula root and term relationship.
self.add_formula_term(formula_root, formula_term)
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF formula terms variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for formula terms variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
# Ensure that term name is lower case, as expected.
term_name = match_group['lhs'].lower()
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF formula term variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
if variable_name not in result:
result[variable_name] = _CFFormulaTermsVariable(variable_name,
variables[variable_name],
nc_var_name, term_name)
else:
result[variable_name].add_formula_term(nc_var_name, term_name)
return result
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.cf_name, self.cf_data,
self.cf_terms_by_root)
class CFGridMappingVariable(CFVariable):
"""
A CF-netCDF grid mapping variable contains a list of specific attributes that
define a particular grid mapping. A CF-netCDF grid mapping variable must contain
the attribute 'grid_mapping_name'.
Based on the value of the 'grid_mapping_name' attribute, there are associated
standard names of CF-netCDF coordinate variables that contain the mapping's
independent variables.
Identified by the CF-netCDF variable attribute 'grid_mapping'.
Ref: [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections.
[CF] Appendix F. Grid Mappings.
"""
cf_identity = 'grid_mapping'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all grid mapping variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a grid mapping variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF grid mapping variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFGridMappingVariable(name, variables[name])
return result
class CFLabelVariable(CFVariable):
"""
A CF-netCDF CF label variable is any netCDF variable that contain string
textual information, or labels.
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFAuxiliaryCoordinateVariable`.
Ref: [CF] Section 6.1. Labels.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF label variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for label variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF label variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to only string type.
if np.issubdtype(variables[name].dtype, np.str):
result[name] = CFLabelVariable(name, variables[name])
return result
def cf_label_data(self, cf_data_var):
"""
Return the associated CF-netCDF label variable strings.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
String labels.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
# Determine the name of the label string (or length) dimension by
# finding the dimension name that doesn't exist within the data dimensions.
str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions))
if len(str_dim_name) != 1:
raise ValueError('Invalid string dimensions for CF-netCDF label variable %r' % self.cf_name)
str_dim_name = str_dim_name[0]
label_data = self[:]
if isinstance(label_data, ma.MaskedArray):
label_data = label_data.filled()
# Determine whether we have a string-valued scalar label
# i.e. a character variable that only has one dimension (the length of the string).
if self.ndim == 1:
data = np.array([''.join(label_data).strip()])
else:
# Determine the index of the string dimension.
str_dim = self.dimensions.index(str_dim_name)
# Calculate new label data shape (without string dimension) and create payload array.
new_shape = tuple(dim_len for i, dim_len in enumerate(self.shape) if i != str_dim)
data = np.empty(new_shape, dtype='|S%d' % self.shape[str_dim])
for index in np.ndindex(new_shape):
# Create the slice for the label data.
if str_dim == 0:
label_index = (slice(None, None),) + index
else:
label_index = index + (slice(None, None),)
data[index] = ''.join(label_data[label_index]).strip()
return data
def cf_label_dimensions(self, cf_data_var):
"""
Return the name of the associated CF-netCDF label variable data dimensions.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
Tuple of label data dimension names.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
return tuple([dim_name for dim_name in self.dimensions if dim_name in cf_data_var.dimensions])
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore label string length dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFMeasureVariable(CFVariable):
"""
A CF-netCDF measure variable is a variable that contains cell areas or volumes.
Identified by the CF-netCDF variable attribute 'cell_measures'.
Ref: [CF] Section 7.2. Cell Measures.
"""
cf_identity = 'cell_measures'
def __init__(self, name, data, measure):
CFVariable.__init__(self, name, data)
#: Associated cell measure of the cell variable
self.cf_measure = measure
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF measure variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for measure variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
measure = match_group['lhs']
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF measure variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
result[variable_name] = CFMeasureVariable(variable_name, variables[variable_name], measure)
return result
################################################################################
class CFGroup(MutableMapping, object):
"""
Represents a collection of 'NetCDF Climate and Forecast (CF) Metadata
Conventions' variables and netCDF global attributes.
"""
def __init__(self):
#: Collection of CF-netCDF variables
self._cf_variables = {}
#: Collection of netCDF global attributes
self.global_attributes = {}
#: Collection of CF-netCDF variables promoted to a CFDataVariable.
self.promoted = {}
def _cf_getter(self, cls):
# Generate dictionary with dictionary comprehension.
return {cf_name: cf_var
for cf_name, cf_var in six.iteritems(self._cf_variables)
if isinstance(cf_var, cls)}
@property
def ancillary_variables(self):
"""Collection of CF-netCDF ancillary variables."""
return self._cf_getter(CFAncillaryDataVariable)
@property
def auxiliary_coordinates(self):
"""Collection of CF-netCDF auxiliary coordinate variables."""
return self._cf_getter(CFAuxiliaryCoordinateVariable)
@property
def bounds(self):
"""Collection of CF-netCDF boundary variables."""
return self._cf_getter(CFBoundaryVariable)
@property
def climatology(self):
"""Collection of CF-netCDF climatology variables."""
return self._cf_getter(CFClimatologyVariable)
@property
def coordinates(self):
"""Collection of CF-netCDF coordinate variables."""
return self._cf_getter(CFCoordinateVariable)
@property
def data_variables(self):
"""Collection of CF-netCDF data pay-load variables."""
return self._cf_getter(CFDataVariable)
@property
def formula_terms(self):
"""Collection of CF-netCDF variables that participate in a CF-netCDF formula term."""
return {cf_name: cf_var
for cf_name, cf_var in six.iteritems(self._cf_variables)
if cf_var.has_formula_terms()}
@property
def grid_mappings(self):
"""Collection of CF-netCDF grid mapping variables."""
return self._cf_getter(CFGridMappingVariable)
@property
def labels(self):
"""Collection of CF-netCDF label variables."""
return self._cf_getter(CFLabelVariable)
@property
def cell_measures(self):
"""Collection of CF-netCDF measure variables."""
return self._cf_getter(CFMeasureVariable)
def keys(self):
"""Return the names of all the CF-netCDF variables in the group."""
return self._cf_variables.keys()
def __len__(self):
return len(self._cf_variables)
def __iter__(self):
for item in self._cf_variables:
yield item
def __setitem__(self, name, variable):
if not isinstance(variable, CFVariable):
raise TypeError('Attempted to add an invalid CF-netCDF variable to the %s' % self.__class__.__name__)
if name != variable.cf_name:
raise ValueError('Mismatch between key name %r and CF-netCDF variable name %r' % (str(name), variable.cf_name))
self._cf_variables[name] = variable
def __getitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot get unknown CF-netCDF variable name %r' % str(name))
return self._cf_variables[name]
def __delitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot delete unknown CF-netcdf variable name %r' % str(name))
del self._cf_variables[name]
def __repr__(self):
result = []
result.append('variables:%d' % len(self._cf_variables))
result.append('global_attributes:%d' % len(self.global_attributes))
result.append('promoted:%d' % len(self.promoted))
return '<%s of %s>' % (self.__class__.__name__, ', '.join(result))
################################################################################
class CFReader(object):
"""
This class allows the contents of a netCDF file to be interpreted according
to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
"""
def __init__(self, filename, warn=False, monotonic=False):
self._filename = os.path.expanduser(filename)
# All CF variable types EXCEPT for the "special cases" of
# CFDataVariable, CFCoordinateVariable and _CFFormulaTermsVariable.
self._variable_types = (CFAncillaryDataVariable, CFAuxiliaryCoordinateVariable,
CFBoundaryVariable, CFClimatologyVariable,
CFGridMappingVariable, CFLabelVariable, CFMeasureVariable)
#: Collection of CF-netCDF variables associated with this netCDF file
self.cf_group = CFGroup()
self._dataset = netCDF4.Dataset(self._filename, mode='r')
# Issue load optimisation warning.
if warn and self._dataset.file_format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
warnings.warn('Optimise CF-netCDF loading by converting data from NetCDF3 ' \
'to NetCDF4 file format using the "nccopy" command.')
self._check_monotonic = monotonic
self._translate()
self._build_cf_groups()
self._reset()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._filename)
def _translate(self):
"""Classify the netCDF variables into CF-netCDF variables."""
netcdf_variable_names = list(self._dataset.variables.keys())
# Identify all CF coordinate variables first. This must be done
# first as, by CF convention, the definition of a CF auxiliary
# coordinate variable may include a scalar CF coordinate variable,
# whereas we want these two types of variables to be mutually exclusive.
coords = CFCoordinateVariable.identify(self._dataset.variables,
monotonic=self._check_monotonic)
self.cf_group.update(coords)
coordinate_names = list(self.cf_group.coordinates.keys())
# Identify all CF variables EXCEPT for the "special cases".
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
self.cf_group.update(variable_type.identify(self._dataset.variables, ignore=ignore))
# Identify global netCDF attributes.
attr_dict = {attr_name: _getncattr(self._dataset, attr_name, '') for
attr_name in self._dataset.ncattrs()}
self.cf_group.global_attributes.update(attr_dict)
# Identify and register all CF formula terms.
formula_terms = _CFFormulaTermsVariable.identify(self._dataset.variables)
for cf_var in six.itervalues(formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
# Ignore formula terms owned by a bounds variable.
if cf_root not in self.cf_group.bounds:
cf_name = cf_var.cf_name
if cf_var.cf_name not in self.cf_group:
self.cf_group[cf_name] = CFAuxiliaryCoordinateVariable(cf_name, cf_var.cf_data)
self.cf_group[cf_name].add_formula_term(cf_root, cf_term)
# Determine the CF data variables.
data_variable_names = set(netcdf_variable_names) - set(self.cf_group.ancillary_variables) - \
set(self.cf_group.auxiliary_coordinates) - set(self.cf_group.bounds) - \
set(self.cf_group.climatology) - set(self.cf_group.coordinates) - \
set(self.cf_group.grid_mappings) - set(self.cf_group.labels) - \
set(self.cf_group.cell_measures)
for name in data_variable_names:
self.cf_group[name] = CFDataVariable(name, self._dataset.variables[name])
def _build_cf_groups(self):
"""Build the first order relationships between CF-netCDF variables."""
def _build(cf_variable):
coordinate_names = list(self.cf_group.coordinates.keys())
cf_group = CFGroup()
# Build CF variable relationships.
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as
# CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
match = variable_type.identify(self._dataset.variables, ignore=ignore,
target=cf_variable.cf_name, warn=False)
# Sanity check dimensionality coverage.
for cf_name, cf_var in six.iteritems(match):
if cf_var.spans(cf_variable):
cf_group[cf_name] = self.cf_group[cf_name]
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_name)
msg = 'Ignoring variable {!r} referenced ' \
'by variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_name,
cf_variable.cf_name,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Build CF data variable relationships.
if isinstance(cf_variable, CFDataVariable):
# Add global netCDF attributes.
cf_group.global_attributes.update(self.cf_group.global_attributes)
# Add appropriate "dimensioned" CF coordinate variables.
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in cf_variable.dimensions if cf_name in
self.cf_group.coordinates})
# Add appropriate "dimensionless" CF coordinate variables.
coordinates_attr = getattr(cf_variable, 'coordinates', '')
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in coordinates_attr.split() if cf_name in
self.cf_group.coordinates})
# Add appropriate formula terms.
for cf_var in six.itervalues(self.cf_group.formula_terms):
for cf_root in cf_var.cf_terms_by_root:
if cf_root in cf_group and cf_var.cf_name not in cf_group:
# Sanity check dimensionality.
if cf_var.spans(cf_variable):
cf_group[cf_var.cf_name] = cf_var
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_var.cf_name)
msg = 'Ignoring formula terms variable {!r} ' \
'referenced by data variable {!r} via ' \
'variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_var.cf_name,
cf_variable.cf_name,
cf_root,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Add the CF group to the variable.
cf_variable.cf_group = cf_group
# Ignored variables are those that cannot be attached to a
# data variable as the dimensionality of that variable is not
# a subset of the dimensionality of the data variable.
ignored = set()
for cf_variable in six.itervalues(self.cf_group):
_build(cf_variable)
# Determine whether there are any formula terms that
# may be promoted to a CFDataVariable.
if iris.FUTURE.netcdf_promote:
# Restrict promotion to only those formula terms
# that are reference surface/phenomenon.
for cf_var in six.itervalues(self.cf_group.formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
cf_root_var = self.cf_group[cf_root]
name = cf_root_var.standard_name or cf_root_var.long_name
terms = reference_terms.get(name, [])
if isinstance(terms, six.string_types) or \
not isinstance(terms, Iterable):
terms = [terms]
cf_var_name = cf_var.cf_name
if cf_term in terms and \
cf_var_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_var_name, cf_var.cf_data)
self.cf_group.promoted[cf_var_name] = data_var
_build(data_var)
break
# Promote any ignored variables.
promoted = set()
not_promoted = ignored.difference(promoted)
while not_promoted:
cf_name = not_promoted.pop()
if cf_name not in self.cf_group.data_variables and \
cf_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_name,
self.cf_group[cf_name].cf_data)
self.cf_group.promoted[cf_name] = data_var
_build(data_var)
# Determine whether there are still any ignored variables
# yet to be promoted.
promoted.add(cf_name)
not_promoted = ignored.difference(promoted)
def _reset(self):
"""Reset the attribute touch history of each variable."""
for nc_var_name in six.iterkeys(self._dataset.variables):
self.cf_group[nc_var_name].cf_attrs_reset()
def __del__(self):
# Explicitly close dataset to prevent file remaining open.
self._dataset.close()
def _getncattr(dataset, attr, default=None):
"""
Simple wrapper round `netCDF4.Dataset.getncattr` to make it behave
more like `getattr`.
"""
try:
value = dataset.getncattr(attr)
except AttributeError:
value = default
return value
|
diox/olympia
|
refs/heads/master
|
src/olympia/promoted/migrations/0007_auto_20200924_1328.py
|
5
|
# Generated by Django 2.2.16 on 2020-09-24 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('promoted', '0006_auto_20200819_0951'),
]
operations = [
migrations.AddField(
model_name='promotedapproval',
name='application_id',
field=models.SmallIntegerField(choices=[(1, 'Firefox'), (61, 'Firefox for Android')], default=None, null=True, verbose_name='Application'),
),
migrations.AlterField(
model_name='promotedaddon',
name='group_id',
field=models.SmallIntegerField(choices=[(0, 'Not Promoted'), (1, 'Recommended'), (2, 'Sponsored'), (3, 'Verified'), (4, 'By Firefox'), (5, 'Spotlight'), (6, 'Strategic')], default=0, help_text='Can be set to Not Promoted to disable promotion without deleting it. Note: changing the group does *not* change approvals of versions.', verbose_name='Group'),
),
migrations.AlterField(
model_name='promotedapproval',
name='group_id',
field=models.SmallIntegerField(choices=[(1, 'Recommended'), (2, 'Sponsored'), (3, 'Verified'), (4, 'By Firefox'), (5, 'Spotlight')], null=True, verbose_name='Group'),
),
migrations.RemoveConstraint(
model_name='promotedapproval',
name='unique_promoted_version',
),
migrations.AddConstraint(
model_name='promotedapproval',
constraint=models.UniqueConstraint(fields=('group_id', 'version', 'application_id'), name='unique_promoted_version'),
),
]
|
nhicher/ansible
|
refs/heads/devel
|
lib/ansible/compat/selectors/_selectors2.py
|
36
|
# This file is from the selectors2.py package. It backports the PSF Licensed
# selectors module from the Python-3.5 stdlib to older versions of Python.
# The author, Seth Michael Larson, dual licenses his modifications under the
# PSF License and MIT License:
# https://github.com/SethMichaelLarson/selectors2#license
#
# Copyright (c) 2016 Seth Michael Larson
#
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
#
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple
from ansible.module_utils.common._collections_compat import Mapping
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
__author__ = 'Seth Michael Larson'
__email__ = 'sethmichaellarson@protonmail.com'
__version__ = '1.1.0'
__license__ = 'MIT'
__all__ = [
'EVENT_READ',
'EVENT_WRITE',
'SelectorError',
'SelectorKey',
'DefaultSelector'
]
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Python 3.5 uses a more direct route to wrap system calls to increase speed.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all selectors restart system calls. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as err:
if err.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout=timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('SelectSelector')
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('PollSelector')
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
__all__.append('EpollSelector')
if hasattr(select, "devpoll"):
class DevpollSelector(BaseSelector):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.devpoll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._devpoll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
__all__.append('DevpollSelector')
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._wrap_control, True,
None, max_events, timeout=timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
def _wrap_control(self, changelist, max_events, timeout):
return self._kqueue.control(changelist, max_events, timeout)
__all__.append('KqueueSelector')
# Choose the best implementation, roughly:
# kqueue == epoll == devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
DefaultSelector = KqueueSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'EpollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = EpollSelector
elif 'PollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = PollSelector
elif 'SelectSelector' in globals(): # Platform-specific: Windows
DefaultSelector = SelectSelector
else: # Platform-specific: AppEngine
def no_selector(_):
raise ValueError("Platform does not have a selector")
DefaultSelector = no_selector
HAS_SELECT = False
|
aajtodd/zipline
|
refs/heads/master
|
tests/test_execution_styles.py
|
34
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from six.moves import range
from nose_parameterized import parameterized
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.utils.test_utils import(
setup_logger,
teardown_logger,
)
from zipline.errors import(
BadOrderParameters
)
class ExecutionStyleTestCase(TestCase):
"""
Tests for zipline ExecutionStyle classes.
"""
epsilon = .000001
# Input, expected on limit buy/stop sell, expected on limit sell/stop buy.
EXPECTED_PRICE_ROUNDING = [
(0.00, 0.00, 0.00),
(0.0005, 0.00, 0.00),
(1.0005, 1.00, 1.00), # Lowest value to round down on sell.
(1.0005 + epsilon, 1.00, 1.01),
(1.0095 - epsilon, 1.0, 1.01),
(1.0095, 1.01, 1.01), # Highest value to round up on buy.
(0.01, 0.01, 0.01)
]
# Test that the same rounding behavior is maintained if we add between 1
# and 10 to all values, because floating point math is made of lies.
EXPECTED_PRICE_ROUNDING += [
(x + delta, y + delta, z + delta)
for (x, y, z) in EXPECTED_PRICE_ROUNDING
for delta in range(1, 10)
]
class ArbitraryObject():
def __str__(self):
return """This should yield a bad order error when
passed as a stop or limit price."""
INVALID_PRICES = [
(-1,),
(-1.0,),
(0 - epsilon,),
(float('nan'),),
(float('inf'),),
(ArbitraryObject(),),
]
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@parameterized.expand(INVALID_PRICES)
def test_invalid_prices(self, price):
"""
Test that execution styles throw appropriate exceptions upon receipt
of an invalid price field.
"""
with self.assertRaises(BadOrderParameters):
LimitOrder(price)
with self.assertRaises(BadOrderParameters):
StopOrder(price)
for lmt, stp in [(price, 1), (1, price), (price, price)]:
with self.assertRaises(BadOrderParameters):
StopLimitOrder(lmt, stp)
def test_market_order_prices(self):
"""
Basic unit tests for the MarketOrder class.
"""
style = MarketOrder()
self.assertEqual(style.get_limit_price(True), None)
self.assertEqual(style.get_limit_price(False), None)
self.assertEqual(style.get_stop_price(True), None)
self.assertEqual(style.get_stop_price(False), None)
@parameterized.expand(EXPECTED_PRICE_ROUNDING)
def test_limit_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy):
"""
Test price getters for the LimitOrder class.
"""
style = LimitOrder(price)
self.assertEqual(expected_limit_buy_or_stop_sell,
style.get_limit_price(True))
self.assertEqual(expected_limit_sell_or_stop_buy,
style.get_limit_price(False))
self.assertEqual(None, style.get_stop_price(True))
self.assertEqual(None, style.get_stop_price(False))
@parameterized.expand(EXPECTED_PRICE_ROUNDING)
def test_stop_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy):
"""
Test price getters for StopOrder class. Note that the expected rounding
direction for stop prices is the reverse of that for limit prices.
"""
style = StopOrder(price)
self.assertEqual(None, style.get_limit_price(False))
self.assertEqual(None, style.get_limit_price(True))
self.assertEqual(expected_limit_buy_or_stop_sell,
style.get_stop_price(False))
self.assertEqual(expected_limit_sell_or_stop_buy,
style.get_stop_price(True))
@parameterized.expand(EXPECTED_PRICE_ROUNDING)
def test_stop_limit_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy):
"""
Test price getters for StopLimitOrder class. Note that the expected
rounding direction for stop prices is the reverse of that for limit
prices.
"""
style = StopLimitOrder(price, price + 1)
self.assertEqual(expected_limit_buy_or_stop_sell,
style.get_limit_price(True))
self.assertEqual(expected_limit_sell_or_stop_buy,
style.get_limit_price(False))
self.assertEqual(expected_limit_buy_or_stop_sell + 1,
style.get_stop_price(False))
self.assertEqual(expected_limit_sell_or_stop_buy + 1,
style.get_stop_price(True))
|
wreckJ/intellij-community
|
refs/heads/master
|
python/testData/mover/multiLineSelection9.py
|
83
|
class Test(object):
<caret><selection> a = 1
b = 2
</selection> def q(self):
c = 3
|
anthonynguyen/UrTSB
|
refs/heads/master
|
urtsb_src/ui/rconpassdialog.py
|
3
|
#
# Copyright (C) 2010 Sorcerer
#
# This file is part of UrTSB.
#
# UrTSB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UrTSB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UrTSB. If not, see <http://www.gnu.org/licenses/>.
#
from rconwindow import RconWindow
from urtsb_src.filemanager import FileManager
import gtk
class RconPassDialog(gtk.Dialog):
"""
A dialog for entering the RCON password for a game server
"""
def __init__(self, server):
"""
Constructor
@param server - the server which needs a password
"""
gtk.Dialog.__init__(self, 'RCON Password needed!', None,\
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)
self.server = server
#buttons
okbutton = gtk.Button('OK')
cancelbutton = gtk.Button('Cancel')
okbutton.connect("clicked", self.on_ok_clicked)
cancelbutton.connect("clicked", self.on_cancel_clicked)
self.action_area.pack_start(cancelbutton, False, False)
self.action_area.pack_start(okbutton, False, False)
#content area
desc_label = gtk.Label('Please enter the RCON password for:')
namelabel = gtk.Label('Servername: ' + server.getName())
addresslabel = gtk.Label('Serveraddress: ' + server.getaddress())
self.passentry = gtk.Entry()
self.passentry.set_visibility(False)
self.passentry.set_text('')
self.remembercheckbutton = gtk.CheckButton('remember password')
self.remembercheckbutton.set_active(False)
self.vbox.pack_start(desc_label, False, False)
self.vbox.pack_start(namelabel, False, False)
self.vbox.pack_start(addresslabel, False, False)
self.vbox.pack_start(self.passentry, False, False)
self.vbox.pack_start(self.remembercheckbutton, False, False)
#check if there is a stored rcon pw for the server
fm = FileManager()
rconpws = fm.get_rcon_passwords()
if server.getaddress() in rconpws:
pw = rconpws[server.getaddress()]
self.passentry.set_text(pw)
self.remembercheckbutton.set_active(True)
self.show_all()
def on_ok_clicked(self, widget):
"""
Callback of the OK button
"""
#get the entered password
self.server.rconpass = self.passentry.get_text()
#store the password if checkbox is active
if self.remembercheckbutton.get_active():
fm = FileManager()
rconpws = fm.get_rcon_passwords()
rconpws[self.server.getaddress()] = self.server.rconpass
fm.save_rcon_passwords()
#display the rcon window
rconwin= RconWindow(self.server)
rconwin.show_now()
self.destroy()
def on_cancel_clicked(self, widger):
"""
Callback of the Cancel button
"""
#do nothing just close the dialog
self.destroy()
|
Kabele/ZCR-Shellcoder
|
refs/heads/master
|
lib/generator/freebsd_x64/write.py
|
80
|
#!/usr/bin/env python
'''
ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
'''
def run(filename,content):
return 'N'
|
mjgrav2001/scikit-learn
|
refs/heads/master
|
sklearn/utils/tests/test_sparsefuncs.py
|
157
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
|
nagyistoce/devide.johannes
|
refs/heads/master
|
install_packages/ip_scipy.py
|
5
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
# this was just an experiment to see how far we could get with
# gohlke's MKL built scipy binaries on Windows. Not far enough
# (and there are licensing issues in any case). Ask me (cpbotha)
# about the details if you're interested, they're in my simplenotes.
import config
from install_package import InstallPackage
import os
import shutil
import sys
import utils
SCIPY_VER = "0.10.1"
SCIPY_BASENAME = "scipy-" + SCIPY_VER
SCIPY_DIRBASE = SCIPY_BASENAME
if os.name == 'posix':
SCIPY_ARCHIVE = "%s.tar.gz" % (SCIPY_BASENAME,)
SCIPY_URL = "http://sourceforge.net/projects/scipy/files/SciPy/%s/%s/download" % (SCIPY_VER, SCIPY_ARCHIVE)
elif os.name == 'nt':
SCIPY_URL_BASE = "http://graphics.tudelft.nl/~cpbotha/files/devide/johannes_support/gohlke/%s"
if config.WINARCH == '32bit':
SCIPY_ARCHIVE = "scipy-%s.win32-py2.7.exe" % (SCIPY_VER,)
else:
SCIPY_ARCHIVE = "scipy-%s.win-amd64-py2.7.exe" % (SCIPY_VER,)
# now construct the full URL
SCIPY_URL = SCIPY_URL_BASE % (SCIPY_ARCHIVE,)
dependencies = []
class SciPy(InstallPackage):
def __init__(self):
self.tbfilename = os.path.join(config.archive_dir, SCIPY_ARCHIVE)
self.build_dir = os.path.join(config.build_dir, SCIPY_DIRBASE)
self.inst_dir = os.path.join(config.inst_dir, 'scipy')
def get(self):
if os.path.exists(self.tbfilename):
utils.output("%s already present, not downloading." %
(SCIPY_ARCHIVE,))
else:
utils.goto_archive()
utils.urlget(SCIPY_URL, SCIPY_ARCHIVE)
def unpack(self):
if os.path.isdir(self.build_dir):
utils.output("SCIPY source already unpacked, not redoing.")
return
utils.output("Unpacking SCIPY source.")
if os.name == 'posix':
utils.unpack_build(self.tbfilename)
else:
os.mkdir(self.build_dir)
os.chdir(self.build_dir)
utils.unpack(self.tbfilename)
def configure(self):
pass
def build(self):
if os.name == 'nt':
utils.output("Nothing to build (Windows).")
else:
# for posix, we have to build the whole shebang.
os.chdir(self.build_dir)
# weak test... there are .so files deeper, but they're in platform
# specific directories
if os.path.exists('build'):
utils.output('scipy already built. Skipping step.')
else:
# the build_ext -lg2c is needed on the VLE Centos3 system, else
# we get lapack related (symbol not found) errors at import scipy
#ret = os.system('%s setup.py build build_ext -lg2c' % (sys.executable,))
ret = os.system('%s setup.py build build_ext' % (sys.executable,))
if ret != 0:
utils.error('scipy build failed. Please fix and try again.')
def install(self):
# to test for install, just do python -c "import scipy"
# and test the result (we could just import directly, but that would
# only work once our invoking python has been stopped and started
# again)
os.chdir(config.working_dir) # we need to be elsewhere!
ret = os.system('%s -c "import scipy"' % (sys.executable,))
if ret == 0:
utils.output('scipy already installed. Skipping step.')
else:
utils.output('ImportError test shows that scipy is not '
'installed. Installing...')
if os.name == 'posix':
os.chdir(self.build_dir)
ret = os.system('%s setup.py install' % (sys.executable,))
if ret != 0:
utils.error('scipy install failed. Please fix and try again.')
elif os.name == 'nt':
# unpack relevant ZIP into Python site-packages dir.
from distutils import sysconfig
spd = sysconfig.get_python_lib()
# copy self.build_dir/PLATLIB/* to python/libs/site-packages/
# we're not copying SCRIPTS/f2py.py
pl_dir = os.path.join(self.build_dir, 'PLATLIB')
utils.copy_glob(os.path.join(pl_dir, '*'), spd)
def clean_build(self):
utils.output("Removing build and install directories.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
from distutils import sysconfig
scipy_instdir = os.path.join(sysconfig.get_python_lib(), 'scipy')
if os.path.exists(scipy_instdir):
shutil.rmtree(scipy_instdir)
def get_installed_version(self):
import scipy
return scipy.__version__
|
xwolf12/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/test_forest.py
|
57
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
|
yasoob/PythonRSSReader
|
refs/heads/master
|
venv/lib/python2.7/dist-packages/oauthlib/oauth1/rfc5849/request_validator.py
|
9
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
"""
oauthlib.oauth1.rfc5849
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
from . import SIGNATURE_METHODS, utils
class RequestValidator(object):
"""A validator/datastore interaction base class for OAuth 1 providers.
OAuth providers should inherit from RequestValidator and implement the
methods and properties outlined below. Further details are provided in the
documentation for each method and property.
Methods used to check the format of input parameters. Common tests include
length, character set, membership, range or pattern. These tests are
referred to as `whitelisting or blacklisting`_. Whitelisting is better
but blacklisting can be usefull to spot malicious activity.
The following have methods a default implementation:
- check_client_key
- check_request_token
- check_access_token
- check_nonce
- check_verifier
- check_realms
The methods above default to whitelist input parameters, checking that they
are alphanumerical and between a minimum and maximum length. Rather than
overloading the methods a few properties can be used to configure these
methods.
* @safe_characters -> (character set)
* @client_key_length -> (min, max)
* @request_token_length -> (min, max)
* @access_token_length -> (min, max)
* @nonce_length -> (min, max)
* @verifier_length -> (min, max)
* @realms -> [list, of, realms]
Methods used to validate input parameters. These checks usually hit either
persistent or temporary storage such as databases or the filesystem. See
each methods documentation for detailed usage.
The following methods must be implemented:
- validate_client_key
- validate_request_token
- validate_access_token
- validate_timestamp_and_nonce
- validate_redirect_uri
- validate_requested_realms
- validate_realms
- validate_verifier
Methods used to retrieve sensitive information from storage.
The following methods must be implemented:
- get_client_secret
- get_request_token_secret
- get_access_token_secret
- get_rsa_key
Methods used to save credentials.
The following methods must be implemented:
- save_request_token
- save_verifier
- save_access_token
To prevent timing attacks it is necessary to not exit early even if the
client key or resource owner key is invalid. Instead dummy values should
be used during the remaining verification process. It is very important
that the dummy client and token are valid input parameters to the methods
get_client_secret, get_rsa_key and get_(access/request)_token_secret and
that the running time of those methods when given a dummy value remain
equivalent to the running time when given a valid client/resource owner.
The following properties must be implemented:
* @dummy_client
* @dummy_request_token
* @dummy_access_token
Example implementations have been provided, note that the database used is
a simple dictionary and serves only an illustrative purpose. Use whichever
database suits your project and how to access it is entirely up to you.
The methods are introduced in an order which should make understanding
their use more straightforward and as such it could be worth reading what
follows in chronological order.
.. _`whitelisting or blacklisting`: http://www.schneier.com/blog/archives/2011/01/whitelisting_vs.html
"""
def __init__(self):
pass
@property
def allowed_signature_methods(self):
return SIGNATURE_METHODS
@property
def safe_characters(self):
return set(utils.UNICODE_ASCII_CHARACTER_SET)
@property
def client_key_length(self):
return 20, 30
@property
def request_token_length(self):
return 20, 30
@property
def access_token_length(self):
return 20, 30
@property
def timestamp_lifetime(self):
return 600
@property
def nonce_length(self):
return 20, 30
@property
def verifier_length(self):
return 20, 30
@property
def realms(self):
return []
@property
def enforce_ssl(self):
return True
def check_client_key(self, client_key):
"""Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper)
def check_request_token(self, request_token):
"""Checks that the request token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.request_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_nonce(self, nonce):
"""Checks that the nonce only contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.nonce_length
return (set(nonce) <= self.safe_characters and
lower <= len(nonce) <= upper)
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper)
def check_realms(self, realms):
"""Check that the realm is one of a set allowed realms."""
return all((r in self.realms for r in realms))
@property
def dummy_client(self):
"""Dummy client used when an invalid client key is supplied.
:returns: The dummy client key string.
The dummy client should be associated with either a client secret,
a rsa key or both depending on which signature methods are supported.
Providers should make sure that
get_client_secret(dummy_client)
get_rsa_key(dummy_client)
return a valid secret or key for the dummy client.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
@property
def dummy_request_token(self):
"""Dummy request token used when an invalid token was supplied.
:returns: The dummy request token string.
The dummy request token should be associated with a request token
secret such that get_request_token_secret(.., dummy_request_token)
returns a valid secret.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
@property
def dummy_access_token(self):
"""Dummy access token used when an invalid token was supplied.
:returns: The dummy access token string.
The dummy access token should be associated with an access token
secret such that get_access_token_secret(.., dummy_access_token)
returns a valid secret.
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_client_secret(self, client_key, request):
"""Retrieves the client secret associated with the client key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The client secret as a string.
This method must allow the use of a dummy client_key value.
Fetching the secret using the dummy key must take the same amount of
time as fetching a secret for a valid client::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import ClientSecret
if ClientSecret.has(client_key):
return ClientSecret.get(client_key)
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import ClientSecret
return ClientSecret.get(client_key, 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_request_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the request token.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import RequestTokenSecret
if RequestTokenSecret.has(client_key):
return RequestTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import RequestTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_access_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the access token.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import AccessTokenSecret
if AccessTokenSecret.has(client_key):
return AccessTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import AccessTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_default_realms(self, client_key, request):
"""Get the default realms for a client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The list of default realms associated with the client.
The list of default realms will be set during client registration and
is outside the scope of OAuthLib.
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_realms(self, token, request):
"""Get realms associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The list of realms associated with the request token.
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_redirect_uri(self, token, request):
"""Get the redirect URI associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The redirect URI associated with the request token.
It may be desirable to return a custom URI if the redirect is set to "oob".
In this case, the user will be redirected to the returned URI and at that
endpoint the verifier can be displayed.
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_rsa_key(self, client_key, request):
"""Retrieves a previously stored client provided RSA key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The rsa public key as a string.
This method must allow the use of a dummy client_key value. Fetching
the rsa key using the dummy key must take the same amount of time
as fetching a key for a valid client. The dummy key must also be of
the same bit length as client keys.
Note that the key must be returned in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def invalidate_request_token(self, client_key, request_token, request):
"""Invalidates a used request token.
:param client_key: The client/consumer key.
:param request_token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The rsa public key as a string.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_client_key(self, client_key, request):
"""Validates that supplied client key is a registered and valid client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy client is supplied it should validate in same
or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import Client
try:
return Client.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import Client
if access_token == self.dummy_access_token:
return False
else:
return Client.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_request_token(self, client_key, token, request):
"""Validates that supplied request token is registered and valid.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy request_token is supplied it should validate in
the same nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import RequestToken
try:
return RequestToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import RequestToken
if access_token == self.dummy_access_token:
return False
else:
return RequestToken.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_access_token(self, client_key, token, request):
"""Validates that supplied access token is registered and valid.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy access token is supplied it should validate in
the same or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import AccessToken
try:
return AccessToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import AccessToken
if access_token == self.dummy_access_token:
return False
else:
return AccessToken.exists(client_key, access_token)
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None, access_token=None):
"""Validates that the nonce has not been used before.
:param client_key: The client/consumer key.
:param timestamp: The ``oauth_timestamp`` parameter.
:param nonce: The ``oauth_nonce`` parameter.
:param request_token: Request token string, if any.
:param access_token: Access token string, if any.
:param request: An oauthlib.common.Request object.
:returns: True or False
Per `Section 3.3`_ of the spec.
"A nonce is a random string, uniquely generated by the client to allow
the server to verify that a request has never been made before and
helps prevent replay attacks when requests are made over a non-secure
channel. The nonce value MUST be unique across all requests with the
same timestamp, client credentials, and token combinations."
.. _`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
One of the first validation checks that will be made is for the validity
of the nonce and timestamp, which are associated with a client key and
possibly a token. If invalid then immediately fail the request
by returning False. If the nonce/timestamp pair has been used before and
you may just have detected a replay attack. Therefore it is an essential
part of OAuth security that you not allow nonce/timestamp reuse.
Note that this validation check is done before checking the validity of
the client and token.::
nonces_and_timestamps_database = [
(u'foo', 1234567890, u'rannoMstrInghere', u'bar')
]
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request_token=None, access_token=None):
return ((client_key, timestamp, nonce, request_token or access_token)
not in self.nonces_and_timestamps_database)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_redirect_uri(self, client_key, redirect_uri, request):
"""Validates the client supplied redirection URI.
:param client_key: The client/consumer key.
:param redirect_uri: The URI the client which to redirect back to after
authorization is successful.
:param request: An oauthlib.common.Request object.
:returns: True or False
It is highly recommended that OAuth providers require their clients
to register all redirection URIs prior to using them in requests and
register them as absolute URIs. See `CWE-601`_ for more information
about open redirection attacks.
By requiring registration of all redirection URIs it should be
straightforward for the provider to verify whether the supplied
redirect_uri is valid or not.
Alternatively per `Section 2.1`_ of the spec:
"If the client is unable to receive callbacks or a callback URI has
been established via other means, the parameter value MUST be set to
"oob" (case sensitive), to indicate an out-of-band configuration."
.. _`CWE-601`: http://cwe.mitre.org/top25/index.html#CWE-601
.. _`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_requested_realms(self, client_key, realms, request):
"""Validates that the client may request access to the realm.
:param client_key: The client/consumer key.
:param realms: The list of realms that client is requesting access to.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is invoked when obtaining a request token and should
tie a realm to the request token and after user authorization
this realm restriction should transfer to the access token.
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""Validates access to the request realm.
:param client_key: The client/consumer key.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:param uri: The URI the realms is protecting.
:param realms: A list of realms that must have been granted to
the access token.
:returns: True or False
How providers choose to use the realm parameter is outside the OAuth
specification but it is commonly used to restrict access to a subset
of protected resources such as "photos".
realms is a convenience parameter which can be used to provide
a per view method pre-defined list of allowed realms.
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_verifier(self, client_key, token, verifier, request):
"""Validates a verification code.
:param client_key: The client/consumer key.
:param token: A request token string.
:param verifier: The authorization verifier string.
:param request: An oauthlib.common.Request object.
:returns: True or False
OAuth providers issue a verification code to clients after the
resource owner authorizes access. This code is used by the client to
obtain token credentials and the provider must verify that the
verifier is valid and associated with the client as well as the
resource owner.
Verifier validation should be done in near constant time
(to avoid verifier enumeration). To achieve this we need a
constant time string comparison which is provided by OAuthLib
in ``oauthlib.common.safe_string_equals``::
from your_datastore import Verifier
correct_verifier = Verifier.get(client_key, request_token)
from oauthlib.common import safe_string_equals
return safe_string_equals(verifier, correct_verifier)
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def verify_request_token(self, token, request):
"""Verify that the given OAuth1 request token is valid.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def verify_realms(self, token, realms, request):
"""Verify authorized realms to see if they match those given to token.
:param token: An access token string.
:param realms: A list of realms the client attempts to access.
:param request: An oauthlib.common.Request object.
:returns: True or False
This prevents the list of authorized realms sent by the client during
the authorization step to be altered to include realms outside what
was bound with the request token.
Can be as simple as::
valid_realms = self.get_realms(token)
return all((r in valid_realms for r in realms))
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def save_access_token(self, token, request):
"""Save an OAuth1 access token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the access token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_authorized_realms`` a space separated list of realms.
Client key can be obtained from ``request.client_key``.
The list of realms (not joined string) can be obtained from
``request.realm``.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def save_request_token(self, token, request):
"""Save an OAuth1 request token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the request token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_callback_confirmed`` the string ``true``.
Client key can be obtained from ``request.client_key``.
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def save_verifier(self, token, verifier, request):
"""Associate an authorization verifier with a request token.
:param token: A request token string.
:param verifier A dictionary containing the oauth_verifier and
oauth_token
:param request: An oauthlib.common.Request object.
We need to associate verifiers with tokens for validation during the
access token request.
Note that unlike save_x_token token here is the ``oauth_token`` token
string from the request token saved previously.
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
|
wistoch/meego-app-browser
|
refs/heads/master
|
tools/valgrind/common.py
|
3
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import signal
import subprocess
import sys
import time
class NotImplementedError(Exception):
pass
class TimeoutError(Exception):
pass
def _print_line(line, flush=True):
# Printing to a text file (including stdout) on Windows always winds up
# using \r\n automatically. On buildbot, this winds up being read by a master
# running on Linux, so we manually convert crlf to '\n'
print line.rstrip() + '\n',
if flush:
sys.stdout.flush()
def RunSubprocessInBackground(proc):
"""Runs a subprocess in the background. Returns a handle to the process."""
logging.info("running %s in the background" % " ".join(proc))
return subprocess.Popen(proc)
def RunSubprocess(proc, timeout=0, detach=False, background=False):
""" Runs a subprocess, until it finishes or |timeout| is exceeded and the
process is killed with taskkill. A |timeout| <= 0 means no timeout.
Args:
proc: list of process components (exe + args)
timeout: how long to wait before killing, <= 0 means wait forever
detach: Whether to pass the DETACHED_PROCESS argument to CreateProcess
on Windows. This is used by Purify subprocesses on buildbot which
seem to get confused by the parent console that buildbot sets up.
"""
logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout))
if detach:
# see MSDN docs for "Process Creation Flags"
DETACHED_PROCESS = 0x8
p = subprocess.Popen(proc, creationflags=DETACHED_PROCESS)
else:
# For non-detached processes, manually read and print out stdout and stderr.
# By default, the subprocess is supposed to inherit these from its parent,
# however when run under buildbot, it seems unable to read data from a
# grandchild process, so we have to read the child and print the data as if
# it came from us for buildbot to read it. We're not sure why this is
# necessary.
# TODO(erikkay): should we buffer stderr and stdout separately?
p = subprocess.Popen(proc, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logging.info("started subprocess")
# How long to wait (in seconds) before printing progress log messages.
progress_delay = 300
progress_delay_time = time.time() + progress_delay
did_timeout = False
if timeout > 0:
wait_until = time.time() + timeout
while p.poll() is None and not did_timeout:
if not detach:
line = p.stdout.readline()
while line and not did_timeout:
_print_line(line)
line = p.stdout.readline()
if timeout > 0:
did_timeout = time.time() > wait_until
else:
# When we detach, blocking on reading stdout doesn't work, so we sleep
# a short time and poll.
time.sleep(0.5)
if time.time() >= progress_delay_time:
# Force output on a periodic basis to avoid getting killed off by the
# buildbot.
# TODO(erikkay): I'd prefer a less obtrusive 'print ".",' with a flush
# but because of how we're doing subprocesses, this doesn't appear to
# work reliably.
logging.info("%s still running..." % os.path.basename(proc[0]))
progress_delay_time = time.time() + progress_delay
if timeout > 0:
did_timeout = time.time() > wait_until
if did_timeout:
logging.info("process timed out")
else:
logging.info("process ended, did not time out")
if did_timeout:
if IsWindows():
subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)])
else:
# Does this kill all children, too?
os.kill(p.pid, signal.SIGINT)
logging.error("KILLED %d" % p.pid)
# Give the process a chance to actually die before continuing
# so that cleanup can happen safely.
time.sleep(1.0)
logging.error("TIMEOUT waiting for %s" % proc[0])
raise TimeoutError(proc[0])
elif not detach:
for line in p.stdout.readlines():
_print_line(line, False)
if not IsMac(): # stdout flush fails on Mac
logging.info("flushing stdout")
p.stdout.flush()
logging.info("collecting result code")
result = p.poll()
if result:
logging.error("%s exited with non-zero result code %d" % (proc[0], result))
return result
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def PlatformNames():
"""Return an array of string to be used in paths for the platform
(e.g. suppressions, gtest filters, ignore files etc.)
The first element of the array describes the 'main' platform
"""
if IsLinux():
return ['linux']
if IsMac():
return ['mac']
if IsWindows():
return ['win32']
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
|
bennylope/elasticstack
|
refs/heads/master
|
tests/test_fields.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_fields
------------
Tests for `elasticstack` fields module.
"""
from django.test import TestCase
from elasticstack.fields import ConfigurableFieldMixin
class TestFields(TestCase):
def test_missing_analyzer(self):
"""No specified analyzer should result in an error"""
self.assertRaises(ValueError, ConfigurableFieldMixin)
|
palerdot/calibre
|
refs/heads/master
|
src/calibre/devices/bambook/libbambookcore.py
|
21
|
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2010, Li Fanxi <lifanxi at freemindworld.com>'
__docformat__ = 'restructuredtext en'
'''
Sanda library wrapper
'''
import ctypes, hashlib, os, sys
from threading import Event, Lock
from calibre.constants import iswindows
from calibre import load_library
try:
_lib_name = 'libBambookCore'
cdll = ctypes.cdll
if iswindows:
_lib_name = 'BambookCore'
if hasattr(sys, 'frozen') and iswindows:
lp = os.path.join(os.path.dirname(sys.executable), 'DLLs', 'BambookCore.dll')
lib_handle = cdll.LoadLibrary(lp)
elif hasattr(sys, 'frozen_path'):
lp = os.path.join(sys.frozen_path, 'lib', 'libBambookCore.so')
lib_handle = cdll.LoadLibrary(lp)
else:
lib_handle = load_library(_lib_name, cdll)
except:
lib_handle = None
text_encoding = 'utf-8'
if iswindows:
text_encoding = 'mbcs'
def is_bambook_lib_ready():
return lib_handle != None
# Constant
DEFAULT_BAMBOOK_IP = '192.168.250.2'
BAMBOOK_SDK_VERSION = 0x00090000
BR_SUCC = 0 # 操作成功
BR_FAIL = 1001 # 操作失败
BR_NOT_IMPL = 1002 # 该功能还未实现
BR_DISCONNECTED = 1003 # 与设备的连接已断开
BR_PARAM_ERROR = 1004 # 调用函数传入的参数错误
BR_TIMEOUT = 1005 # 操作或通讯超时
BR_INVALID_HANDLE = 1006 # 传入的句柄无效
BR_INVALID_FILE = 1007 # 传入的文件不存在或格式无效
BR_INVALID_DIR = 1008 # 传入的目录不存在
BR_BUSY = 1010 # 设备忙,另一个操作还未完成
BR_EOF = 1011 # 文件或操作已结束
BR_IO_ERROR = 1012 # 文件读写失败
BR_FILE_NOT_INSIDE = 1013 # 指定的文件不在包里
# 当前连接状态
CONN_CONNECTED = 0 # 已连接
CONN_DISCONNECTED = 1 # 未连接或连接已断开
CONN_CONNECTING = 2 # 正在连接
CONN_WAIT_FOR_AUTH = 3 # 已连接,正在等待身份验证(暂未实现)
#传输状态
TRANS_STATUS_TRANS = 0 #正在传输
TRANS_STATUS_DONE = 1 #传输完成
TRANS_STATUS_ERR = 2 #传输出错
# Key Enums
BBKeyNum0 = 0
BBKeyNum1 = 1
BBKeyNum2 = 2
BBKeyNum3 = 3
BBKeyNum4 = 4
BBKeyNum5 = 5
BBKeyNum6 = 6
BBKeyNum7 = 7
BBKeyNum8 = 8
BBKeyNum9 = 9
BBKeyStar = 10
BBKeyCross = 11
BBKeyUp = 12
BBKeyDown = 13
BBKeyLeft = 14
BBKeyRight = 15
BBKeyPageUp = 16
BBKeyPageDown = 17
BBKeyOK = 18
BBKeyESC = 19
BBKeyBookshelf = 20
BBKeyStore = 21
BBKeyTTS = 22
BBKeyMenu = 23
BBKeyInteract =24
class DeviceInfo(ctypes.Structure):
_fields_ = [ ("cbSize", ctypes.c_int),
("sn", ctypes.c_char * 20),
("firmwareVersion", ctypes.c_char * 20),
("deviceVolume", ctypes.c_int),
("spareVolume", ctypes.c_int),
]
def __init__(self):
self.cbSize = ctypes.sizeof(self)
class PrivBookInfo(ctypes.Structure):
_fields_ = [ ("cbSize", ctypes.c_int),
("bookGuid", ctypes.c_char * 20),
("bookName", ctypes.c_char * 80),
("bookAuthor", ctypes.c_char * 40),
("bookAbstract", ctypes.c_char * 256),
]
def Clone(self):
bookInfo = PrivBookInfo()
bookInfo.cbSize = self.cbSize
bookInfo.bookGuid = self.bookGuid
bookInfo.bookName = self.bookName
bookInfo.bookAuthor = self.bookAuthor
bookInfo.bookAbstract = self.bookAbstract
return bookInfo
def __init__(self):
self.cbSize = ctypes.sizeof(self)
# extern "C"_declspec(dllexport) BB_RESULT BambookConnect(const char* lpszIP, int timeOut, BB_HANDLE* hConn);
def BambookConnect(ip = DEFAULT_BAMBOOK_IP, timeout = 0):
if isinstance(ip, unicode):
ip = ip.encode('ascii')
handle = ctypes.c_void_p(0)
if lib_handle == None:
raise Exception(_('Bambook SDK has not been installed.'))
ret = lib_handle.BambookConnect(ip, timeout, ctypes.byref(handle))
if ret == BR_SUCC:
return handle
else:
return None
# extern "C" _declspec(dllexport) BB_RESULT BambookGetConnectStatus(BB_HANDLE hConn, int* status);
def BambookGetConnectStatus(handle):
status = ctypes.c_int(0)
ret = lib_handle.BambookGetConnectStatus(handle, ctypes.byref(status))
if ret == BR_SUCC:
return status.value
else:
return None
# extern "C" _declspec(dllexport) BB_RESULT BambookDisconnect(BB_HANDLE hConn);
def BambookDisconnect(handle):
ret = lib_handle.BambookDisconnect(handle)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" const char * BambookGetErrorString(BB_RESULT nCode)
def BambookGetErrorString(code):
func = lib_handle.BambookGetErrorString
func.restype = ctypes.c_char_p
return func(code)
# extern "C" BB_RESULT BambookGetSDKVersion(uint32_t * version);
def BambookGetSDKVersion():
version = ctypes.c_int(0)
lib_handle.BambookGetSDKVersion(ctypes.byref(version))
return version.value
# extern "C" BB_RESULT BambookGetDeviceInfo(BB_HANDLE hConn, DeviceInfo* pInfo);
def BambookGetDeviceInfo(handle):
deviceInfo = DeviceInfo()
ret = lib_handle.BambookGetDeviceInfo(handle, ctypes.byref(deviceInfo))
if ret == BR_SUCC:
return deviceInfo
else:
return None
# extern "C" BB_RESULT BambookKeyPress(BB_HANDLE hConn, BambookKey key);
def BambookKeyPress(handle, key):
ret = lib_handle.BambookKeyPress(handle, key)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookGetFirstPrivBookInfo(BB_HANDLE hConn, PrivBookInfo * pInfo);
def BambookGetFirstPrivBookInfo(handle, bookInfo):
bookInfo.contents.cbSize = ctypes.sizeof(bookInfo.contents)
ret = lib_handle.BambookGetFirstPrivBookInfo(handle, bookInfo)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookGetNextPrivBookInfo(BB_HANDLE hConn, PrivBookInfo * pInfo);
def BambookGetNextPrivBookInfo(handle, bookInfo):
bookInfo.contents.cbSize = ctypes.sizeof(bookInfo.contents)
ret = lib_handle.BambookGetNextPrivBookInfo(handle, bookInfo)
if ret == BR_SUCC:
return True
elif ret == BR_EOF:
return False
else:
return False
# extern "C" BB_RESULT BambookDeletePrivBook(BB_HANDLE hConn, const char * lpszBookID);
def BambookDeletePrivBook(handle, guid):
if isinstance(guid, unicode):
guid = guid.encode('ascii')
ret = lib_handle.BambookDeletePrivBook(handle, guid)
if ret == BR_SUCC:
return True
else:
return False
class JobQueue:
jobs = {}
maxID = 0
lock = Lock()
def __init__(self):
self.maxID = 0
def NewJob(self):
self.lock.acquire()
self.maxID = self.maxID + 1
maxid = self.maxID
self.lock.release()
event = Event()
self.jobs[maxid] = (event, TRANS_STATUS_TRANS)
return maxid
def FinishJob(self, jobID, status):
self.jobs[jobID] = (self.jobs[jobID][0], status)
self.jobs[jobID][0].set()
def WaitJob(self, jobID):
self.jobs[jobID][0].wait()
return (self.jobs[jobID][1] == TRANS_STATUS_DONE)
def DeleteJob(self, jobID):
del self.jobs[jobID]
job = JobQueue()
def BambookTransferCallback(status, progress, userData):
if status == TRANS_STATUS_DONE and progress == 100:
job.FinishJob(userData, status)
elif status == TRANS_STATUS_ERR:
job.FinishJob(userData, status)
TransCallback = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.c_int)
bambookTransferCallback = TransCallback(BambookTransferCallback)
# extern "C" BB_RESULT BambookAddPrivBook(BB_HANDLE hConn, const char * pszSnbFile,
# TransCallback pCallbackFunc, intptr_t userData);
def BambookAddPrivBook(handle, filename, callback, userData):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
ret = lib_handle.BambookAddPrivBook(handle, filename, callback, userData)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookReplacePrivBook(BB_HANDLE hConn, const char *
# pszSnbFile, const char * lpszBookID, TransCallback pCallbackFunc, intptr_t userData);
def BambookReplacePrivBook(handle, filename, bookID, callback, userData):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
if isinstance(bookID, unicode):
bookID = bookID.encode('ascii')
ret = lib_handle.BambookReplacePrivBook(handle, filename, bookID, callback, userData)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookFetchPrivBook(BB_HANDLE hConn, const char *
# lpszBookID, const char * lpszFilePath, TransCallback pCallbackFunc, intptr_t userData);
def BambookFetchPrivBook(handle, bookID, filename, callback, userData):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
if isinstance(bookID, unicode):
bookID = bookID.encode('ascii')
ret = lib_handle.BambookFetchPrivBook(handle, bookID, filename, bambookTransferCallback, userData)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookVerifySnbFile(const char * snbName)
def BambookVerifySnbFile(filename):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
if lib_handle.BambookVerifySnbFile(filename) == BR_SUCC:
return True
else:
return False
# BB_RESULT BambookPackSnbFromDir ( const char * snbName,, const char * rootDir );
def BambookPackSnbFromDir(snbFileName, rootDir):
if isinstance(snbFileName, unicode):
snbFileName = snbFileName.encode('ascii')
if isinstance(rootDir, unicode):
rootDir = rootDir.encode('ascii')
ret = lib_handle.BambookPackSnbFromDir(snbFileName, rootDir)
if ret == BR_SUCC:
return True
else:
return False
# BB_RESULT BambookUnpackFileFromSnb ( const char * snbName,, const char * relativePath, const char * outfname );
def BambookUnpackFileFromSnb(snbFileName, relPath, outFileName):
if isinstance(snbFileName, unicode):
snbFileName = snbFileName.encode('ascii')
if isinstance(relPath, unicode):
relPath = relPath.encode('ascii')
if isinstance(outFileName, unicode):
outFileName = outFileName.encode('ascii')
ret = lib_handle.BambookUnpackFileFromSnb(snbFileName, relPath, outFileName)
if ret == BR_SUCC:
return True
else:
return False
class Bambook:
def __init__(self):
self.handle = None
def Connect(self, ip = DEFAULT_BAMBOOK_IP, timeout = 10000):
if ip == None or ip == '':
ip = DEFAULT_BAMBOOK_IP
self.handle = BambookConnect(ip, timeout)
if self.handle and self.handle != 0:
return True
else:
return False
def Disconnect(self):
if self.handle:
return BambookDisconnect(self.handle)
return False
def GetState(self):
if self.handle:
return BambookGetConnectStatus(self.handle)
return CONN_DISCONNECTED
def GetDeviceInfo(self):
if self.handle:
return BambookGetDeviceInfo(self.handle)
return None
def SendFile(self, fileName, guid = None):
import uuid
if self.handle:
taskID = job.NewJob()
if guid:
if BambookReplacePrivBook(self.handle, fileName, guid,
bambookTransferCallback, taskID):
if(job.WaitJob(taskID)):
job.DeleteJob(taskID)
return guid
else:
job.DeleteJob(taskID)
return None
else:
job.DeleteJob(taskID)
return None
else:
guid = hashlib.md5(str(uuid.uuid4())).hexdigest()[0:15] + ".snb"
if BambookReplacePrivBook(self.handle, fileName, guid,
bambookTransferCallback, taskID):
if job.WaitJob(taskID):
job.DeleteJob(taskID)
return guid
else:
job.DeleteJob(taskID)
return None
else:
job.DeleteJob(taskID)
return None
return False
def GetFile(self, guid, fileName):
if self.handle:
taskID = job.NewJob()
ret = BambookFetchPrivBook(self.handle, guid, fileName, bambookTransferCallback, taskID)
if ret:
ret = job.WaitJob(taskID)
job.DeleteJob(taskID)
return ret
else:
job.DeleteJob(taskID)
return False
return False
def DeleteFile(self, guid):
if self.handle:
ret = BambookDeletePrivBook(self.handle, guid)
return ret
return False
def GetBookList(self):
if self.handle:
books = []
bookInfo = PrivBookInfo()
bi = ctypes.pointer(bookInfo)
ret = BambookGetFirstPrivBookInfo(self.handle, bi)
while ret:
books.append(bi.contents.Clone())
ret = BambookGetNextPrivBookInfo(self.handle, bi)
return books
@staticmethod
def GetSDKVersion():
return BambookGetSDKVersion()
@staticmethod
def VerifySNB(fileName):
return BambookVerifySnbFile(fileName);
@staticmethod
def ExtractSNBContent(fileName, relPath, path):
return BambookUnpackFileFromSnb(fileName, relPath, path)
@staticmethod
def ExtractSNB(fileName, path):
ret = BambookUnpackFileFromSnb(fileName, 'snbf/book.snbf', path + '/snbf/book.snbf')
if not ret:
return False
ret = BambookUnpackFileFromSnb(fileName, 'snbf/toc.snbf', path + '/snbf/toc.snbf')
if not ret:
return False
return True
@staticmethod
def PackageSNB(fileName, path):
return BambookPackSnbFromDir(fileName, path)
def passed():
print "> Pass"
def failed():
print "> Failed"
if __name__ == "__main__":
print "Bambook SDK Unit Test"
bb = Bambook()
print "Disconnect State"
if bb.GetState() == CONN_DISCONNECTED:
passed()
else:
failed()
print "Get SDK Version"
if bb.GetSDKVersion() == BAMBOOK_SDK_VERSION:
passed()
else:
failed()
print "Verify good SNB File"
if bb.VerifySNB(u'/tmp/f8268e6c1f4e78c.snb'):
passed()
else:
failed()
print "Verify bad SNB File"
if not bb.VerifySNB('./libwrapper.py'):
passed()
else:
failed()
print "Extract SNB File"
if bb.ExtractSNB('./test.snb', '/tmp/test'):
passed()
else:
failed()
print "Packet SNB File"
if bb.PackageSNB('/tmp/tmp.snb', '/tmp/test') and bb.VerifySNB('/tmp/tmp.snb'):
passed()
else:
failed()
print "Connect to Bambook"
if bb.Connect('192.168.250.2', 10000) and bb.GetState() == CONN_CONNECTED:
passed()
else:
failed()
print "Get Bambook Info"
devInfo = bb.GetDeviceInfo()
if devInfo:
# print "Info Size: ", devInfo.cbSize
# print "SN: ", devInfo.sn
# print "Firmware: ", devInfo.firmwareVersion
# print "Capacity: ", devInfo.deviceVolume
# print "Free: ", devInfo.spareVolume
if devInfo.cbSize == 52 and devInfo.deviceVolume == 1714232:
passed()
else:
failed()
print "Send file"
if bb.SendFile('/tmp/tmp.snb'):
passed()
else:
failed()
print "Get book list"
books = bb.GetBookList()
if len(books) > 10:
passed()
else:
failed()
print "Get book"
if bb.GetFile('f8268e6c1f4e78c.snb', '/tmp') and bb.VerifySNB('/tmp/f8268e6c1f4e78c.snb'):
passed()
else:
failed()
print "Disconnect"
if bb.Disconnect():
passed()
else:
failed()
|
deKupini/erp
|
refs/heads/master
|
addons/event_sale/models/event.py
|
5
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
from openerp.osv import fields as old_fields
class event_event(models.Model):
_inherit = 'event.event'
event_ticket_ids = fields.One2many(
'event.event.ticket', 'event_id', string='Event Ticket',
default=lambda rec: rec._default_tickets(), copy=True)
badge_back = fields.Html('Badge Back', translate=True, states={'done': [('readonly', True)]})
badge_innerleft = fields.Html('Badge Innner Left', translate=True, states={'done': [('readonly', True)]})
badge_innerright = fields.Html('Badge Inner Right', translate=True, states={'done': [('readonly', True)]})
@api.model
def _default_tickets(self):
try:
product = self.env.ref('event_sale.product_product_event')
return [{
'name': _('Subscription'),
'product_id': product.id,
'price': 0,
}]
except ValueError:
return self.env['event.event.ticket']
class event_ticket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
name = fields.Char('Name', required=True, translate=True)
event_id = fields.Many2one('event.event', "Event", required=True, ondelete='cascade')
product_id = fields.Many2one(
'product.product', 'Product',
required=True, domain=[("event_type_id", "!=", False)],
default=lambda self: self._default_product_id())
registration_ids = fields.One2many('event.registration', 'event_ticket_id', 'Registrations')
price = fields.Float('Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date("Sales End")
is_expired = fields.Boolean('Is Expired', compute='_is_expired')
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale', 'product_product_event')
return product.id
except ValueError:
return False
@api.one
@api.depends('deadline')
def _is_expired(self):
if self.deadline:
current_date = fields.Date.context_today(self.with_context({'tz': self.event_id.date_tz}))
self.is_expired = self.deadline < current_date
else:
self.is_expired = False
# FIXME non-stored fields wont ends up in _columns (and thus _all_columns), which forbid them
# to be used in qweb views. Waiting a fix, we create an old function field directly.
"""
price_reduce = fields.Float("Price Reduce", compute="_get_price_reduce", store=False,
digits=dp.get_precision('Product Price'))
@api.one
@api.depends('price', 'product_id.lst_price', 'product_id.price')
def _get_price_reduce(self):
product = self.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
self.price_reduce = (1.0 - discount) * self.price
"""
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
_columns = {
'price_reduce': old_fields.function(_get_price_reduce, type='float', string='Price Reduce',
digits_compute=dp.get_precision('Product Price')),
}
# seats fields
seats_availability = fields.Selection(
[('limited', 'Limited'), ('unlimited', 'Unlimited')],
'Available Seat', required=True, store=True, compute='_compute_seats', default="limited")
seats_max = fields.Integer('Maximum Available Seats',
help="Define the number of available tickets. If you have too much registrations you will"
"not BE able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.")
seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0 + compute seats availability
for ticket in self:
ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
# aggregate registrations by ticket and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open': 'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
# compute seats_available
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
# in addition to origin generic fields, add real relational fields to correctly
# handle attendees linked to sale orders and their lines
# TDE FIXME: maybe add an onchange on sale_order_id + origin
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order', ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line', 'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if self.event_ticket_id.seats_max and self.event_ticket_id.seats_available < 0:
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _("The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s") % ({
'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (('<i>%s</i>') % res.event_ticket_id.name) or '',
'order': res.origin or res.sale_order_id.name})
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({
'event_id': line_id.event_id.id,
'event_id': line_id.event_id.id,
'event_ticket_id': line_id.event_ticket_id.id,
'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id,
'sale_order_line_id': line_id.id,
})
return att_data
|
AFMD/smallProjects
|
refs/heads/master
|
DC_sourcemeter/run_me.py
|
1
|
#!/usr/bin/env python3
# written by grey@christoforo.net
# on 20 Feb 2019
# Example usage:
# ./run_me.py --sm-address ASRL/dev/ttyUSB0::INSTR --duration 10 --set-point 0.001 --compliance 2 --nplc 10 > data.csv
# for serial communication with a sourcemeter attached to /dev/ttyUSB0
# sourcing 1mA
# for 10 seconds
# with a voltage compliance setting of 2V
# with NPLC=10 integration
# and storing the data generated in a file called data.csv
# data generated here can be read in nicely with pandas:
# pandas.read_csv('data.csv')
from k2400 import k2400
import argparse
parser = argparse.ArgumentParser(description='Make DC sourcemeter measurements under constant current or voltage and dump csv to stdout: time, current, voltage, status')
parser.add_argument('-a', '--sm-address', type=str, required=True, help='Sourcemeter connection address, eg. "ASRL0::INSTR"')
parser.add_argument('-v', '--source-voltage', default=False, action='store_true', help="Source voltage (if this argument is absent current will be sourced)")
parser.add_argument('-s', '--set-point', type=float, required=True, help="Source value in amps or volts")
parser.add_argument('-t', '--duration', type=float, required=True, help="Number of seconds to measure for")
parser.add_argument('-c', '--compliance', type=float, required=True, help="Compliance value in amps or volts")
parser.add_argument('-n', '--nplc', type=float, default=10.0, help='NPLC value')
parser.add_argument('-f', '--front', default=False, action='store_true', help='Use the front terminals')
parser.add_argument('-w', '--four-wire', default=False, action='store_true', help='Use four wire mode')
args = parser.parse_args()
baud = 57600
terminator = bytearray.fromhex('0A').decode()
sm = k2400(visa_lib='@py', addressString=args.sm_address, terminator=terminator, serialBaud=baud, front=args.front, twoWire=not args.four_wire, quiet=True)
sm.setNPLC(args.nplc)
sm.setupDC(sourceVoltage=args.source_voltage, compliance=args.compliance, setPoint=args.set_point, senseRange='a')
sm.write(':arm:source immediate')
def measurement_callback(measurement):
m = measurement
# time,current,voltage,status
print("{:0.3f},{:0.6f},{:0.6f},{:d}".format(m[2],m[1],m[0],int(m[3])),flush=True)
# print header
print('#Time [s],Current [A],Voltage [V],Status Bits',flush=True)
sm.measureUntil(t_dwell=args.duration, cb=measurement_callback)
sm.outOn(False)
|
ar4s/django
|
refs/heads/master
|
tests/logging_tests/logconfig.py
|
609
|
import logging
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
class MyHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.config = settings.LOGGING
class MyEmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
pass
|
hustlzp/zulip
|
refs/heads/master
|
confirmation/util.py
|
126
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: util.py 3 2008-11-18 07:33:52Z jarek.zgoda $'
from django.conf import settings
def get_status_field(app_label, model_name):
model = '%s.%s' % (app_label, model_name)
mapping = getattr(settings, 'STATUS_FIELDS', {})
return mapping.get(model, 'status')
|
petermalcolm/osf.io
|
refs/heads/develop
|
scripts/retract_registrations.py
|
21
|
"""Script for retracting pending retractions that are more than 48 hours old."""
import datetime
import logging
import sys
from modularodm import Q
from framework.auth import Auth
from framework.transactions.context import TokuTransaction
from website import models, settings
from website.app import init_app
from website.project.model import NodeLog
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main(dry_run=True):
pending_retractions = models.Retraction.find(Q('state', 'eq', models.Retraction.UNAPPROVED))
for retraction in pending_retractions:
if should_be_retracted(retraction):
if dry_run:
logger.warn('Dry run mode')
try:
parent_registration = models.Node.find_one(Q('retraction', 'eq', retraction))
except Exception as err:
logger.error('Could not find registration associated with retraction {}'.format(retraction))
logger.error('Skipping...'.format(retraction))
continue
logger.warn(
'Retraction {0} approved. Retracting registration {1}'
.format(retraction._id, parent_registration._id)
)
if not dry_run:
with TokuTransaction():
retraction.state = models.Retraction.APPROVED
try:
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': parent_registration.retraction._id,
},
auth=Auth(parent_registration.retraction.initiated_by),
)
retraction.save()
parent_registration.update_search()
for node in parent_registration.get_descendants_recursive():
node.update_search()
except Exception as err:
logger.error(
'Unexpected error raised when retracting '
'registration {}. Continuing...'.format(parent_registration))
logger.exception(err)
def should_be_retracted(retraction):
"""Returns true if retraction was initiated more than 48 hours prior"""
return (datetime.datetime.utcnow() - retraction.initiation_date) >= settings.RETRACTION_PENDING_TIME
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
init_app(routes=False)
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
main(dry_run=dry_run)
|
aforalee/keystone
|
refs/heads/master
|
keystone/tests/unit/test_exception.py
|
3
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
import six
from keystone.common import wsgi
from keystone import exception
from keystone.tests import unit
class ExceptionTestCase(unit.BaseTestCase):
def assertValidJsonRendering(self, e):
resp = wsgi.render_exception(e)
self.assertEqual(e.code, resp.status_int)
self.assertEqual('%s %s' % (e.code, e.title), resp.status)
j = jsonutils.loads(resp.body)
self.assertIsNotNone(j.get('error'))
self.assertIsNotNone(j['error'].get('code'))
self.assertIsNotNone(j['error'].get('title'))
self.assertIsNotNone(j['error'].get('message'))
self.assertNotIn('\n', j['error']['message'])
self.assertNotIn(' ', j['error']['message'])
self.assertTrue(type(j['error']['code']) is int)
def test_all_json_renderings(self):
"""Everything callable in the exception module should be renderable.
... except for the base error class (exception.Error), which is not
user-facing.
This test provides a custom message to bypass docstring parsing, which
should be tested separately.
"""
for cls in [x for x in exception.__dict__.values() if callable(x)]:
if cls is not exception.Error and isinstance(cls, exception.Error):
self.assertValidJsonRendering(cls(message='Overridden.'))
def test_validation_error(self):
target = uuid.uuid4().hex
attribute = uuid.uuid4().hex
e = exception.ValidationError(target=target, attribute=attribute)
self.assertValidJsonRendering(e)
self.assertIn(target, six.text_type(e))
self.assertIn(attribute, six.text_type(e))
def test_not_found(self):
target = uuid.uuid4().hex
e = exception.NotFound(target=target)
self.assertValidJsonRendering(e)
self.assertIn(target, six.text_type(e))
def test_forbidden_title(self):
e = exception.Forbidden()
resp = wsgi.render_exception(e)
j = jsonutils.loads(resp.body)
self.assertEqual('Forbidden', e.title)
self.assertEqual('Forbidden', j['error'].get('title'))
def test_unicode_message(self):
message = u'Comment \xe7a va'
e = exception.Error(message)
try:
self.assertEqual(message, six.text_type(e))
except UnicodeEncodeError:
self.fail("unicode error message not supported")
def test_unicode_string(self):
e = exception.ValidationError(attribute='xx',
target='Long \xe2\x80\x93 Dash')
if six.PY2:
self.assertIn(u'\u2013', six.text_type(e))
else:
self.assertIn('Long \xe2\x80\x93 Dash', six.text_type(e))
def test_invalid_unicode_string(self):
# NOTE(jamielennox): This is a complete failure case so what is
# returned in the exception message is not that important so long
# as there is an error with a message
e = exception.ValidationError(attribute='xx',
target='\xe7a va')
if six.PY2:
self.assertIn('%(attribute)', six.text_type(e))
else:
# There's no UnicodeDecodeError on python 3.
self.assertIn('\xe7a va', six.text_type(e))
class UnexpectedExceptionTestCase(ExceptionTestCase):
"""Tests if internal info is exposed to the API user on UnexpectedError."""
class SubClassExc(exception.UnexpectedError):
debug_message_format = 'Debug Message: %(debug_info)s'
def setUp(self):
super(UnexpectedExceptionTestCase, self).setUp()
self.exc_str = uuid.uuid4().hex
self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF))
def test_unexpected_error_no_debug(self):
self.config_fixture.config(debug=False)
e = exception.UnexpectedError(exception=self.exc_str)
self.assertNotIn(self.exc_str, six.text_type(e))
def test_unexpected_error_debug(self):
self.config_fixture.config(debug=True)
e = exception.UnexpectedError(exception=self.exc_str)
self.assertIn(self.exc_str, six.text_type(e))
def test_unexpected_error_subclass_no_debug(self):
self.config_fixture.config(debug=False)
e = UnexpectedExceptionTestCase.SubClassExc(
debug_info=self.exc_str)
self.assertEqual(exception.UnexpectedError._message_format,
six.text_type(e))
def test_unexpected_error_subclass_debug(self):
self.config_fixture.config(debug=True)
subclass = self.SubClassExc
e = subclass(debug_info=self.exc_str)
expected = subclass.debug_message_format % {'debug_info': self.exc_str}
translated_amendment = six.text_type(exception.SecurityError.amendment)
self.assertEqual(
expected + six.text_type(' ') + translated_amendment,
six.text_type(e))
def test_unexpected_error_custom_message_no_debug(self):
self.config_fixture.config(debug=False)
e = exception.UnexpectedError(self.exc_str)
self.assertEqual(exception.UnexpectedError._message_format,
six.text_type(e))
def test_unexpected_error_custom_message_debug(self):
self.config_fixture.config(debug=True)
e = exception.UnexpectedError(self.exc_str)
translated_amendment = six.text_type(exception.SecurityError.amendment)
self.assertEqual(
self.exc_str + six.text_type(' ') + translated_amendment,
six.text_type(e))
class SecurityErrorTestCase(ExceptionTestCase):
"""Tests whether security-related info is exposed to the API user."""
def setUp(self):
super(SecurityErrorTestCase, self).setUp()
self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF))
def test_unauthorized_exposure(self):
self.config_fixture.config(debug=False)
risky_info = uuid.uuid4().hex
e = exception.Unauthorized(message=risky_info)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
def test_unauthorized_exposure_in_debug(self):
self.config_fixture.config(debug=True)
risky_info = uuid.uuid4().hex
e = exception.Unauthorized(message=risky_info)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
def test_forbidden_exposure(self):
self.config_fixture.config(debug=False)
risky_info = uuid.uuid4().hex
e = exception.Forbidden(message=risky_info)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
def test_forbidden_exposure_in_debug(self):
self.config_fixture.config(debug=True)
risky_info = uuid.uuid4().hex
e = exception.Forbidden(message=risky_info)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
def test_forbidden_action_exposure(self):
self.config_fixture.config(debug=False)
risky_info = uuid.uuid4().hex
action = uuid.uuid4().hex
e = exception.ForbiddenAction(message=risky_info, action=action)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
self.assertIn(action, six.text_type(e))
e = exception.ForbiddenAction(action=risky_info)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
def test_forbidden_action_exposure_in_debug(self):
self.config_fixture.config(debug=True)
risky_info = uuid.uuid4().hex
e = exception.ForbiddenAction(message=risky_info)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
e = exception.ForbiddenAction(action=risky_info)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
def test_unicode_argument_message(self):
self.config_fixture.config(debug=False)
risky_info = u'\u7ee7\u7eed\u884c\u7f29\u8fdb\u6216'
e = exception.Forbidden(message=risky_info)
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/_plotly_utils/tests/validators/test_number_validator.py
|
2
|
import pytest
from pytest import approx
from _plotly_utils.basevalidators import NumberValidator
import numpy as np
import pandas as pd
# Fixtures
# --------
@pytest.fixture
def validator(request):
return NumberValidator("prop", "parent")
@pytest.fixture
def validator_min_max(request):
return NumberValidator("prop", "parent", min=-1.0, max=2.0)
@pytest.fixture
def validator_min(request):
return NumberValidator("prop", "parent", min=-1.0)
@pytest.fixture
def validator_max(request):
return NumberValidator("prop", "parent", max=2.0)
@pytest.fixture
def validator_aok():
return NumberValidator("prop", "parent", min=-1, max=1.5, array_ok=True)
# Array not ok
# ------------
# ### Acceptance ###
@pytest.mark.parametrize(
"val", [1.0, 0.0, 1, -1234.5678, 54321, np.pi, np.nan, np.inf, -np.inf]
)
def test_acceptance(val, validator):
assert validator.validate_coerce(val) == approx(val, nan_ok=True)
# ### Rejection by value ###
@pytest.mark.parametrize("val", ["hello", (), [], [1, 2, 3], set(), "34"])
def test_rejection_by_value(val, validator):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert "Invalid value" in str(validation_failure.value)
# ### With min/max ###
@pytest.mark.parametrize("val", [0, 0.0, -0.5, 1, 1.0, 2, 2.0, np.pi / 2.0])
def test_acceptance_min_max(val, validator_min_max):
assert validator_min_max.validate_coerce(val) == approx(val)
@pytest.mark.parametrize("val", [-1.01, -10, 2.1, 234, -np.inf, np.nan, np.inf])
def test_rejection_min_max(val, validator_min_max):
with pytest.raises(ValueError) as validation_failure:
validator_min_max.validate_coerce(val)
assert "in the interval [-1.0, 2.0]" in str(validation_failure.value)
# ### With min only ###
@pytest.mark.parametrize("val", [0, 0.0, -0.5, 99999, np.inf])
def test_acceptance_min(val, validator_min):
assert validator_min.validate_coerce(val) == approx(val)
@pytest.mark.parametrize("val", [-1.01, -np.inf, np.nan])
def test_rejection_min(val, validator_min):
with pytest.raises(ValueError) as validation_failure:
validator_min.validate_coerce(val)
assert "in the interval [-1.0, inf]" in str(validation_failure.value)
# ### With max only ###
@pytest.mark.parametrize("val", [0, 0.0, -np.inf, -123456, np.pi / 2])
def test_acceptance_max(val, validator_max):
assert validator_max.validate_coerce(val) == approx(val)
@pytest.mark.parametrize("val", [2.01, np.inf, np.nan])
def test_rejection_max(val, validator_max):
with pytest.raises(ValueError) as validation_failure:
validator_max.validate_coerce(val)
assert "in the interval [-inf, 2.0]" in str(validation_failure.value)
# Array ok
# --------
# ### Acceptance ###
@pytest.mark.parametrize("val", [1.0, 0.0, 1, 0.4])
def test_acceptance_aok_scalars(val, validator_aok):
assert validator_aok.validate_coerce(val) == val
@pytest.mark.parametrize("val", [[1.0, 0.0], [1], [-0.1234, 0.41, -1.0]])
def test_acceptance_aok_list(val, validator_aok):
assert np.array_equal(
validator_aok.validate_coerce(val), np.array(val, dtype="float")
)
# ### Coerce ###
# Coerced to general consistent numeric type
@pytest.mark.parametrize(
"val,expected",
[
([1.0, 0], (1.0, 0)),
(np.array([1, -1]), np.array([1, -1])),
(pd.Series([1, -1]), np.array([1, -1])),
(pd.Index([1, -1]), np.array([1, -1])),
((-0.1234, 0, -1), (-0.1234, 0.0, -1.0)),
],
)
def test_coercion_aok_list(val, expected, validator_aok):
v = validator_aok.validate_coerce(val)
if isinstance(val, (np.ndarray, pd.Series, pd.Index)):
assert np.array_equal(v, expected)
else:
assert isinstance(v, list)
assert validator_aok.present(v) == tuple(val)
# ### Rejection ###
#
@pytest.mark.parametrize("val", [["a", 4]])
def test_rejection_aok(val, validator_aok):
with pytest.raises(ValueError) as validation_failure:
validator_aok.validate_coerce(val)
assert "Invalid element(s)" in str(validation_failure.value)
# ### Rejection by element ###
@pytest.mark.parametrize(
"val",
[[-1.6, 0.0], [1, 1.5, 2], [-0.1234, 0.41, np.nan], [0, np.inf], [0, -np.inf]],
)
def test_rejection_aok_min_max(val, validator_aok):
with pytest.raises(ValueError) as validation_failure:
validator_aok.validate_coerce(val)
assert "Invalid element(s)" in str(validation_failure.value)
assert "in the interval [-1, 1.5]" in str(validation_failure.value)
|
emory-libraries/readux
|
refs/heads/master
|
readux/annotations/management/commands/import_annotations.py
|
1
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth import get_user_model
import json
from readux.annotations.models import Annotation
class Command(BaseCommand):
'''Import a JSON file of annotation data in the format provided
by the annotator store API (i.e., search results) and create
corresponding local annotations for.
'''
def add_arguments(self, parser):
parser.add_argument('file',
help='JSON file with annotation data')
def handle(self, *args, **options):
print options['file']
with open(options['file']) as datafile:
data = json.loads(datafile.read())
for annotation in data['rows']:
self.import_annotation(annotation)
def import_annotation(self, data):
'''Create and save a new annotation, setting fields based on a
dictionary of data passed in. Raises an error if an annotation
author is not found as a user in the database.'''
note = Annotation()
# NOTE: because we are using uuid for annotation id field,
# importing an annotation twice does not error, but simply
# replaces the old copy. Might want to add checks for this...
# required fields that should always be present
# (not normally set by user)
for field in ['updated', 'created', 'id']:
setattr(note, field, data[field])
del data[field]
# user is special: annotation data only includes username,
# but we need a user object
# NOTE: this could result in making one person's annotations
# available to someone else, if someone is using a different
# username in another instance
if 'user' in data:
try:
note.user = get_user_model().objects.get(username=data['user'])
del data['user']
except get_user_model().DoesNotExist:
raise CommandError('Cannot import annotations for user %s (does not exist)' % data['user'])
for field in Annotation.common_fields:
if field in data:
setattr(note, field, data[field])
del data[field]
# put any other data that is left in extra data json field
if data:
note.extra_data.update(data)
note.save()
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/singleLine/Indent32.after.py
|
747
|
class C:
def foo(self):
x = 1
y = 2
|
OldFrostDragon/GcovParserLibrary
|
refs/heads/master
|
tests/googletest/googletest/scripts/pump.py
|
2471
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
blaggacao/OpenUpgrade
|
refs/heads/8.0
|
addons/web_api/__openerp__.py
|
384
|
{
'name': 'OpenERP Web API',
'category': 'Hidden',
'description': """
Openerp Web API.
================
""",
'version': '2.0',
'depends': ['web'],
'installable': True,
'auto_install': False,
}
|
dimpase/offlineimap
|
refs/heads/master
|
offlineimap/ui/debuglock.py
|
17
|
# Locking debugging code -- temporary
# Copyright (C) 2003 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from threading import Lock, currentThread
import traceback
logfile = open("/tmp/logfile", "wt")
loglock = Lock()
class DebuggingLock:
def __init__(self, name):
self.lock = Lock()
self.name = name
def acquire(self, blocking = 1):
self.print_tb("Acquire lock")
self.lock.acquire(blocking)
self.logmsg("===== %s: Thread %s acquired lock\n" % (self.name, currentThread().getName()))
def release(self):
self.print_tb("Release lock")
self.lock.release()
def logmsg(self, msg):
loglock.acquire()
logfile.write(msg + "\n")
logfile.flush()
loglock.release()
def print_tb(self, msg):
self.logmsg(".... %s: Thread %s attempting to %s\n" % \
(self.name, currentThread().getName(), msg) + \
"\n".join(traceback.format_list(traceback.extract_stack())))
|
gsmartway/odoo
|
refs/heads/8.0
|
addons/hr_recruitment/report/hr_recruitment_report.py
|
325
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from .. import hr_recruitment
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_recruitment_report(osv.Model):
_name = "hr.recruitment.report"
_description = "Recruitments Statistics"
_auto = False
_rec_name = 'date_create'
_order = 'date_create desc'
_columns = {
'user_id': fields.many2one('res.users', 'User', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'date_create': fields.datetime('Create Date', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'date_closed': fields.date('Closed', readonly=True),
'job_id': fields.many2one('hr.job', 'Applied Job',readonly=True),
'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage'),
'type_id': fields.many2one('hr.recruitment.degree', 'Degree'),
'department_id': fields.many2one('hr.department','Department',readonly=True),
'priority': fields.selection(hr_recruitment.AVAILABLE_PRIORITIES, 'Appreciation'),
'salary_prop' : fields.float("Salary Proposed", digits_compute=dp.get_precision('Account')),
'salary_prop_avg' : fields.float("Avg. Proposed Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'salary_exp' : fields.float("Salary Expected", digits_compute=dp.get_precision('Account')),
'salary_exp_avg' : fields.float("Avg. Expected Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'partner_id': fields.many2one('res.partner', 'Partner',readonly=True),
'available': fields.float("Availability"),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'last_stage_id': fields.many2one ('hr.recruitment.stage', 'Last Stage'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_recruitment_report')
cr.execute("""
create or replace view hr_recruitment_report as (
select
min(s.id) as id,
s.create_date as date_create,
date(s.date_closed) as date_closed,
s.date_last_stage_update as date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.job_id,
s.type_id,
sum(s.availability) as available,
s.department_id,
s.priority,
s.stage_id,
s.last_stage_id,
sum(salary_proposed) as salary_prop,
(sum(salary_proposed)/count(*)) as salary_prop_avg,
sum(salary_expected) as salary_exp,
(sum(salary_expected)/count(*)) as salary_exp_avg,
extract('epoch' from (s.write_date-s.create_date))/(3600*24) as delay_close,
count(*) as nbr
from hr_applicant s
group by
s.date_open,
s.create_date,
s.write_date,
s.date_closed,
s.date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.stage_id,
s.last_stage_id,
s.type_id,
s.priority,
s.job_id,
s.department_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pombredanne/or-tools
|
refs/heads/master
|
examples/tests/issue117.py
|
9
|
from collections import namedtuple
from ortools.constraint_solver import pywrapcp
VEHICLE_COUNT= 30
VEHICLE_CAPACITY = 200
Customer = namedtuple("Customer", ['index', 'demand', 'x', 'y'])
print (' ----------------------------------------------- Init ----------------------------------------------')
customers = list()
customers.append(Customer(0,0,0,0))
customers.append(Customer(1,1,1.0,1.0))
customers.append(Customer(1,1,2.0,2.0))
customer_count = len(customers)
routing = pywrapcp.RoutingModel(3, VEHICLE_COUNT)
routing.SetDepot(0)
print (' ----------------------------------------------- Demand Constraint ---------------------------------------')
demands=[]
for i in range(0, customer_count):
demands.append(customers[i][1])
routing.AddVectorDimension(demands, VEHICLE_CAPACITY, True,"Demand")
print (' ----------------------------------------------- Adding Costs ---------------------------------------')
def length(customer1, customer2):
#static just for the sake of the example
return 1
routing.SetCost(length)
routing.CloseModel()
assignment=routing.Solve(None)
# Inspect solution and extract routes
routes=[]
for i in range(0, routing.vehicles()):
route_number = i
routes.append([])
node = routing.Start(route_number)
route=[]
route.append(0)
if routing.IsVehicleUsed(assignment, i):
while True:
node = assignment.Value(routing.NextVar(node))
if not routing.IsEnd(node):
route.append(int(node))
else :
break
route.append(0)
routes[route_number].append(route)
#This are the routes as list of lists
routes = [el[0] for el in routes]
#Now try to read the routes into a new assigment object fails
assignment2 = routing.ReadAssignmentFromRoutes(routes,True)
|
mith1979/ansible_automation
|
refs/heads/master
|
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/system/alternatives.py
|
4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage symbolic link alternatives.
(c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
(c) 2015, David Wittman <dwittman@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
version_added: "1.6"
options:
name:
description:
- The generic name of the link.
required: true
path:
description:
- The path to the real executable that the link should point to.
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is required on RHEL-based distributions
required: false
requirements: [ update-alternatives ]
'''
EXAMPLES = '''
- name: correct java version selected
alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: alternatives link created
alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible
'''
DEFAULT_LINK_PRIORITY = 50
import re
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
path = dict(required=True),
link = dict(required=False),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True)
current_path = None
all_alternatives = []
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, _) = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
)
if rc == 0:
# Alternatives already exist for this link group
# Parse the output to determine the current path of the symlink and
# available alternatives
current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
current_path = current_path_regex.search(display_output).group(1)
all_alternatives = alternative_regex.findall(display_output)
if not link:
# Read the current symlink target from `update-alternatives --query`
# in case we need to install the new alternative before setting it.
#
# This is only compatible on Debian-based systems, as the other
# alternatives don't have --query available
rc, query_output, _ = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
)
if rc == 0:
for line in query_output.splitlines():
if line.startswith('Link:'):
link = line.split()[1]
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
if path not in all_alternatives:
if not link:
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)],
check_rc=True
)
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError, cpe:
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
|
saurabh6790/alert-med-app
|
refs/heads/master
|
stock/doctype/purchase_receipt/purchase_receipt.py
|
28
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr, flt, cint
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint, _
import webnotes.defaults
from stock.utils import update_bin
from controllers.buying_controller import BuyingController
class DocType(BuyingController):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
self.tname = 'Purchase Receipt Item'
self.fname = 'purchase_receipt_details'
self.count = 0
self.status_updater = [{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'prevdoc_detail_docname',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
}]
def onload(self):
billed_qty = webnotes.conn.sql("""select sum(ifnull(qty, 0)) from `tabPurchase Invoice Item`
where purchase_receipt=%s""", self.doc.name)
if billed_qty:
total_qty = sum((item.qty for item in self.doclist.get({"parentfield": "purchase_receipt_details"})))
self.doc.fields["__billing_complete"] = billed_qty[0][0] == total_qty
def validate(self):
super(DocType, self).validate()
self.po_required()
if not self.doc.status:
self.doc.status = "Draft"
import utilities
utilities.validate_status(self.doc.status, ["Draft", "Submitted", "Cancelled"])
self.validate_with_previous_doc()
self.validate_rejected_warehouse()
self.validate_accepted_rejected_qty()
self.validate_inspection()
self.validate_uom_is_integer("uom", ["qty", "received_qty"])
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_challan_no()
pc_obj = get_obj(dt='Purchase Common')
pc_obj.validate_for_items(self)
self.check_for_stopped_status(pc_obj)
# sub-contracting
self.validate_for_subcontracting()
self.update_raw_materials_supplied("pr_raw_material_details")
self.update_valuation_rate("purchase_receipt_details")
def validate_rejected_warehouse(self):
for d in self.doclist.get({"parentfield": "purchase_receipt_details"}):
if flt(d.rejected_qty) and not d.rejected_warehouse:
d.rejected_warehouse = self.doc.rejected_warehouse
if not d.rejected_warehouse:
webnotes.throw(_("Rejected Warehouse is mandatory against regected item"))
# validate accepted and rejected qty
def validate_accepted_rejected_qty(self):
for d in getlist(self.doclist, "purchase_receipt_details"):
if not flt(d.received_qty) and flt(d.qty):
d.received_qty = flt(d.qty) - flt(d.rejected_qty)
elif not flt(d.qty) and flt(d.rejected_qty):
d.qty = flt(d.received_qty) - flt(d.rejected_qty)
elif not flt(d.rejected_qty):
d.rejected_qty = flt(d.received_qty) - flt(d.qty)
# Check Received Qty = Accepted Qty + Rejected Qty
if ((flt(d.qty) + flt(d.rejected_qty)) != flt(d.received_qty)):
msgprint("Sum of Accepted Qty and Rejected Qty must be equal to Received quantity. Error for Item: " + cstr(d.item_code))
raise Exception
def validate_challan_no(self):
"Validate if same challan no exists for same supplier in a submitted purchase receipt"
if self.doc.challan_no:
exists = webnotes.conn.sql("""
SELECT name FROM `tabPurchase Receipt`
WHERE name!=%s AND supplier=%s AND challan_no=%s
AND docstatus=1""", (self.doc.name, self.doc.supplier, self.doc.challan_no))
if exists:
webnotes.msgprint("Another Purchase Receipt using the same Challan No. already exists.\
Please enter a valid Challan No.", raise_exception=1)
def validate_with_previous_doc(self):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Purchase Order": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["project_name", "="], ["uom", "="], ["item_code", "="]],
"is_child_table": True
}
})
if cint(webnotes.defaults.get_global_default('maintain_same_rate')):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["import_rate", "="]],
"is_child_table": True
}
})
def po_required(self):
if webnotes.conn.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in getlist(self.doclist,'purchase_receipt_details'):
if not d.prevdoc_docname:
msgprint("Purchse Order No. required against item %s"%d.item_code)
raise Exception
def update_stock(self):
sl_entries = []
stock_items = self.get_stock_items()
for d in getlist(self.doclist, 'purchase_receipt_details'):
if d.item_code in stock_items and d.warehouse:
pr_qty = flt(d.qty) * flt(d.conversion_factor)
if pr_qty:
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": flt(pr_qty),
"serial_no": cstr(d.serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
if flt(d.rejected_qty) > 0:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": d.rejected_warehouse,
"actual_qty": flt(d.rejected_qty) * flt(d.conversion_factor),
"serial_no": cstr(d.rejected_serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
self.bk_flush_supp_wh(sl_entries)
self.make_sl_entries(sl_entries)
def update_ordered_qty(self):
stock_items = self.get_stock_items()
for d in self.doclist.get({"parentfield": "purchase_receipt_details"}):
if d.item_code in stock_items and d.warehouse \
and cstr(d.prevdoc_doctype) == 'Purchase Order':
already_received_qty = self.get_already_received_qty(d.prevdoc_docname,
d.prevdoc_detail_docname)
po_qty, ordered_warehouse = self.get_po_qty_and_warehouse(d.prevdoc_detail_docname)
if not ordered_warehouse:
webnotes.throw(_("Warehouse is missing in Purchase Order"))
if already_received_qty + d.qty > po_qty:
ordered_qty = - (po_qty - already_received_qty) * flt(d.conversion_factor)
else:
ordered_qty = - flt(d.qty) * flt(d.conversion_factor)
update_bin({
"item_code": d.item_code,
"warehouse": ordered_warehouse,
"posting_date": self.doc.posting_date,
"ordered_qty": flt(ordered_qty) if self.doc.docstatus==1 else -flt(ordered_qty)
})
def get_already_received_qty(self, po, po_detail):
qty = webnotes.conn.sql("""select sum(qty) from `tabPurchase Receipt Item`
where prevdoc_detail_docname = %s and docstatus = 1
and prevdoc_doctype='Purchase Order' and prevdoc_docname=%s
and parent != %s""", (po_detail, po, self.doc.name))
return qty and flt(qty[0][0]) or 0.0
def get_po_qty_and_warehouse(self, po_detail):
po_qty, po_warehouse = webnotes.conn.get_value("Purchase Order Item", po_detail,
["qty", "warehouse"])
return po_qty, po_warehouse
def bk_flush_supp_wh(self, sl_entries):
for d in getlist(self.doclist, 'pr_raw_material_details'):
# negative quantity is passed as raw material qty has to be decreased
# when PR is submitted and it has to be increased when PR is cancelled
sl_entries.append(self.get_sl_entries(d, {
"item_code": d.rm_item_code,
"warehouse": self.doc.supplier_warehouse,
"actual_qty": -1*flt(d.consumed_qty),
"incoming_rate": 0
}))
def validate_inspection(self):
for d in getlist(self.doclist, 'purchase_receipt_details'): #Enter inspection date for all items that require inspection
ins_reqd = webnotes.conn.sql("select inspection_required from `tabItem` where name = %s",
(d.item_code,), as_dict = 1)
ins_reqd = ins_reqd and ins_reqd[0]['inspection_required'] or 'No'
if ins_reqd == 'Yes' and not d.qa_no:
msgprint("Item: " + d.item_code + " requires QA Inspection. Please enter QA No or report to authorized person to create Quality Inspection")
# Check for Stopped status
def check_for_stopped_status(self, pc_obj):
check_list =[]
for d in getlist(self.doclist, 'purchase_receipt_details'):
if d.fields.has_key('prevdoc_docname') and d.prevdoc_docname and d.prevdoc_docname not in check_list:
check_list.append(d.prevdoc_docname)
pc_obj.check_for_stopped_status( d.prevdoc_doctype, d.prevdoc_docname)
# on submit
def on_submit(self):
purchase_controller = webnotes.get_obj("Purchase Common")
# Check for Approving Authority
get_obj('Authorization Control').validate_approving_authority(self.doc.doctype, self.doc.company, self.doc.grand_total)
# Set status as Submitted
webnotes.conn.set(self.doc, 'status', 'Submitted')
self.update_prevdoc_status()
self.update_ordered_qty()
self.update_stock()
from stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "purchase_receipt_details")
purchase_controller.update_last_purchase_rate(self, 1)
self.make_gl_entries()
def check_next_docstatus(self):
submit_rv = webnotes.conn.sql("select t1.name from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2 where t1.name = t2.parent and t2.purchase_receipt = '%s' and t1.docstatus = 1" % (self.doc.name))
if submit_rv:
msgprint("Purchase Invoice : " + cstr(self.submit_rv[0][0]) + " has already been submitted !")
raise Exception , "Validation Error."
def on_cancel(self):
pc_obj = get_obj('Purchase Common')
self.check_for_stopped_status(pc_obj)
# Check if Purchase Invoice has been submitted against current Purchase Order
# pc_obj.check_docstatus(check = 'Next', doctype = 'Purchase Invoice', docname = self.doc.name, detail_doctype = 'Purchase Invoice Item')
submitted = webnotes.conn.sql("select t1.name from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2 where t1.name = t2.parent and t2.purchase_receipt = '%s' and t1.docstatus = 1" % self.doc.name)
if submitted:
msgprint("Purchase Invoice : " + cstr(submitted[0][0]) + " has already been submitted !")
raise Exception
webnotes.conn.set(self.doc,'status','Cancelled')
self.update_ordered_qty()
self.update_stock()
self.update_prevdoc_status()
pc_obj.update_last_purchase_rate(self, 0)
self.make_cancel_gl_entries()
def get_current_stock(self):
for d in getlist(self.doclist, 'pr_raw_material_details'):
if self.doc.supplier_warehouse:
bin = webnotes.conn.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.rm_item_code, self.doc.supplier_warehouse), as_dict = 1)
d.current_stock = bin and flt(bin[0]['actual_qty']) or 0
def get_rate(self,arg):
return get_obj('Purchase Common').get_rate(arg,self)
def get_gl_entries(self, warehouse_account=None):
against_stock_account = self.get_company_default("stock_received_but_not_billed")
gl_entries = super(DocType, self).get_gl_entries(warehouse_account, against_stock_account)
return gl_entries
@webnotes.whitelist()
def make_purchase_invoice(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
def set_missing_values(source, target):
bean = webnotes.bean(target)
bean.run_method("set_missing_values")
bean.run_method("set_supplier_defaults")
doclist = get_mapped_doclist("Purchase Receipt", source_name, {
"Purchase Receipt": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
}
},
"Purchase Receipt Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "pr_detail",
"parent": "purchase_receipt",
"prevdoc_detail_docname": "po_detail",
"prevdoc_docname": "purchase_order",
"purchase_rate": "rate"
},
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doclist, set_missing_values)
return [d.fields for d in doclist]
|
engdan77/edoAutoHomeMobile
|
refs/heads/master
|
twisted/conch/ttymodes.py
|
82
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
import tty
# this module was autogenerated.
VINTR = 1
VQUIT = 2
VERASE = 3
VKILL = 4
VEOF = 5
VEOL = 6
VEOL2 = 7
VSTART = 8
VSTOP = 9
VSUSP = 10
VDSUSP = 11
VREPRINT = 12
VWERASE = 13
VLNEXT = 14
VFLUSH = 15
VSWTCH = 16
VSTATUS = 17
VDISCARD = 18
IGNPAR = 30
PARMRK = 31
INPCK = 32
ISTRIP = 33
INLCR = 34
IGNCR = 35
ICRNL = 36
IUCLC = 37
IXON = 38
IXANY = 39
IXOFF = 40
IMAXBEL = 41
ISIG = 50
ICANON = 51
XCASE = 52
ECHO = 53
ECHOE = 54
ECHOK = 55
ECHONL = 56
NOFLSH = 57
TOSTOP = 58
IEXTEN = 59
ECHOCTL = 60
ECHOKE = 61
PENDIN = 62
OPOST = 70
OLCUC = 71
ONLCR = 72
OCRNL = 73
ONOCR = 74
ONLRET = 75
CS7 = 90
CS8 = 91
PARENB = 92
PARODD = 93
TTY_OP_ISPEED = 128
TTY_OP_OSPEED = 129
TTYMODES = {
1 : 'VINTR',
2 : 'VQUIT',
3 : 'VERASE',
4 : 'VKILL',
5 : 'VEOF',
6 : 'VEOL',
7 : 'VEOL2',
8 : 'VSTART',
9 : 'VSTOP',
10 : 'VSUSP',
11 : 'VDSUSP',
12 : 'VREPRINT',
13 : 'VWERASE',
14 : 'VLNEXT',
15 : 'VFLUSH',
16 : 'VSWTCH',
17 : 'VSTATUS',
18 : 'VDISCARD',
30 : (tty.IFLAG, 'IGNPAR'),
31 : (tty.IFLAG, 'PARMRK'),
32 : (tty.IFLAG, 'INPCK'),
33 : (tty.IFLAG, 'ISTRIP'),
34 : (tty.IFLAG, 'INLCR'),
35 : (tty.IFLAG, 'IGNCR'),
36 : (tty.IFLAG, 'ICRNL'),
37 : (tty.IFLAG, 'IUCLC'),
38 : (tty.IFLAG, 'IXON'),
39 : (tty.IFLAG, 'IXANY'),
40 : (tty.IFLAG, 'IXOFF'),
41 : (tty.IFLAG, 'IMAXBEL'),
50 : (tty.LFLAG, 'ISIG'),
51 : (tty.LFLAG, 'ICANON'),
52 : (tty.LFLAG, 'XCASE'),
53 : (tty.LFLAG, 'ECHO'),
54 : (tty.LFLAG, 'ECHOE'),
55 : (tty.LFLAG, 'ECHOK'),
56 : (tty.LFLAG, 'ECHONL'),
57 : (tty.LFLAG, 'NOFLSH'),
58 : (tty.LFLAG, 'TOSTOP'),
59 : (tty.LFLAG, 'IEXTEN'),
60 : (tty.LFLAG, 'ECHOCTL'),
61 : (tty.LFLAG, 'ECHOKE'),
62 : (tty.LFLAG, 'PENDIN'),
70 : (tty.OFLAG, 'OPOST'),
71 : (tty.OFLAG, 'OLCUC'),
72 : (tty.OFLAG, 'ONLCR'),
73 : (tty.OFLAG, 'OCRNL'),
74 : (tty.OFLAG, 'ONOCR'),
75 : (tty.OFLAG, 'ONLRET'),
# 90 : (tty.CFLAG, 'CS7'),
# 91 : (tty.CFLAG, 'CS8'),
92 : (tty.CFLAG, 'PARENB'),
93 : (tty.CFLAG, 'PARODD'),
128 : 'ISPEED',
129 : 'OSPEED'
}
|
awslabs/py-flask-signup-docker
|
refs/heads/master
|
application.py
|
14
|
# Copyright 2015. Amazon Web Services, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import flask
from flask import request, Response
from boto import dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.items import Item
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.exceptions import ConditionalCheckFailedException
from boto.exception import JSONResponseError
# Default config vals
THEME = 'default' if os.environ.get('THEME') is None else os.environ.get('THEME')
FLASK_DEBUG = 'false' if os.environ.get('FLASK_DEBUG') is None else os.environ.get('FLASK_DEBUG')
# Create the Flask app
application = flask.Flask(__name__)
# Load config values specified above
application.config.from_object(__name__)
# Load configuration vals from a file
application.config.from_pyfile('application.config', silent=True)
# Only enable Flask debugging if an env var is set to true
application.debug = application.config['FLASK_DEBUG'] in ['true', 'True']
# Connect to DynamoDB and get ref to Table
ddb_conn = dynamodb2.connect_to_region(application.config['AWS_REGION'])
ddb_table = Table(table_name=application.config['STARTUP_SIGNUP_TABLE'],
connection=ddb_conn)
@application.route('/')
def welcome():
theme = application.config['THEME']
return flask.render_template('index.html', theme=theme, flask_debug=application.debug)
@application.route('/signup', methods=['POST'])
def signup():
signup_data = dict()
for item in request.form:
signup_data[item] = request.form[item]
try:
store_in_dynamo(signup_data)
except ConditionalCheckFailedException:
return Response("", status=409, mimetype='application/json')
return Response(json.dumps(signup_data), status=201, mimetype='application/json')
def store_in_dynamo(signup_data):
signup_item = Item(ddb_table, data=signup_data)
signup_item.save()
def create_table():
signups = Table.create(application.config['STARTUP_SIGNUP_TABLE'],
schema=[
HashKey('email') # defaults to STRING data_type
],
throughput={
'read': 1,
'write': 1,
},
connection=ddb_conn
)
def init_db():
try:
ddb_table.describe()
except JSONResponseError:
print "DynamoDB table doesn't exist, creating..."
create_table()
if __name__ == '__main__':
init_db()
application.run(host='0.0.0.0')
|
notriddle/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/lint/tests/test_lint.py
|
12
|
from __future__ import unicode_literals
import os
import sys
import mock
import six
from ...localpaths import repo_root
from .. import lint as lint_mod
from ..lint import filter_whitelist_errors, parse_whitelist, lint, create_parser
_dummy_repo = os.path.join(os.path.dirname(__file__), "dummy")
def _mock_lint(name, **kwargs):
wrapped = getattr(lint_mod, name)
return mock.patch(lint_mod.__name__ + "." + name, wraps=wrapped, **kwargs)
def test_filter_whitelist_errors():
whitelist = {
'CONSOLE': {
'svg/*': {12}
},
'INDENT TABS': {
'svg/*': {None}
}
}
# parse_whitelist normalises the case/path of the match string so need to do the same
whitelist = {e: {os.path.normcase(k): v for k, v in p.items()}
for e, p in whitelist.items()}
# paths passed into filter_whitelist_errors are always Unix style
filteredfile = 'svg/test.html'
unfilteredfile = 'html/test.html'
# Tests for passing no errors
filtered = filter_whitelist_errors(whitelist, [])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, [])
assert filtered == []
# Tests for filtering on file and line number
filtered = filter_whitelist_errors(whitelist, [['CONSOLE', '', filteredfile, 12]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, [['CONSOLE', '', unfilteredfile, 12]])
assert filtered == [['CONSOLE', '', unfilteredfile, 12]]
filtered = filter_whitelist_errors(whitelist, [['CONSOLE', '', filteredfile, 11]])
assert filtered == [['CONSOLE', '', filteredfile, 11]]
# Tests for filtering on just file
filtered = filter_whitelist_errors(whitelist, [['INDENT TABS', '', filteredfile, 12]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, [['INDENT TABS', '', filteredfile, 11]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, [['INDENT TABS', '', unfilteredfile, 11]])
assert filtered == [['INDENT TABS', '', unfilteredfile, 11]]
def test_parse_whitelist():
input_buffer = six.StringIO("""
# Comment
CR AT EOL: svg/import/*
CR AT EOL: streams/resources/test-utils.js
INDENT TABS: .gitmodules
INDENT TABS: app-uri/*
INDENT TABS: svg/*
TRAILING WHITESPACE: app-uri/*
CONSOLE:streams/resources/test-utils.js: 12
CR AT EOL, INDENT TABS: html/test.js
CR AT EOL, INDENT TABS: html/test2.js: 42
*:*.pdf
*:resources/*
*, CR AT EOL: *.png
""")
expected_data = {
'INDENT TABS': {
'.gitmodules': {None},
'app-uri/*': {None},
'svg/*': {None},
'html/test.js': {None},
'html/test2.js': {42},
},
'TRAILING WHITESPACE': {
'app-uri/*': {None},
},
'CONSOLE': {
'streams/resources/test-utils.js': {12},
},
'CR AT EOL': {
'streams/resources/test-utils.js': {None},
'svg/import/*': {None},
'html/test.js': {None},
'html/test2.js': {42},
}
}
expected_data = {e: {os.path.normcase(k): v for k, v in p.items()}
for e, p in expected_data.items()}
expected_ignored = {os.path.normcase(x) for x in {"*.pdf", "resources/*", "*.png"}}
data, ignored = parse_whitelist(input_buffer)
assert data == expected_data
assert ignored == expected_ignored
def test_lint_no_files(caplog):
rv = lint(_dummy_repo, [], "normal")
assert rv == 0
assert caplog.text == ""
def test_lint_ignored_file(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken_ignored.html"], "normal")
assert rv == 0
assert not mocked_check_path.called
assert not mocked_check_file_contents.called
assert caplog.text == ""
def test_lint_not_existing_file(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
# really long path-linted filename
name = "a" * 256 + ".html"
rv = lint(_dummy_repo, [name], "normal")
assert rv == 0
assert not mocked_check_path.called
assert not mocked_check_file_contents.called
assert caplog.text == ""
def test_lint_passing(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["okay.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert caplog.text == ""
def test_lint_failing(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken.html"], "normal")
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert "TRAILING WHITESPACE" in caplog.text
assert "broken.html:1" in caplog.text
def test_ref_existent_relative(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["ref/existent_relative.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert caplog.text == ""
def test_ref_existent_root_relative(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["ref/existent_root_relative.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert caplog.text == ""
def test_ref_non_existent_relative(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["ref/non_existent_relative.html"], "normal")
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert "NON-EXISTENT-REF" in caplog.text
assert "ref/non_existent_relative.html" in caplog.text
assert "non_existent_file.html" in caplog.text
def test_ref_non_existent_root_relative(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["ref/non_existent_root_relative.html"], "normal")
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert "NON-EXISTENT-REF" in caplog.text
assert "ref/non_existent_root_relative.html" in caplog.text
assert "/non_existent_file.html" in caplog.text
def test_ref_absolute_url(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["ref/absolute.html"], "normal")
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert "ABSOLUTE-URL-REF" in caplog.text
assert "http://example.com/reference.html" in caplog.text
assert "ref/absolute.html" in caplog.text
def test_about_blank_as_ref(caplog):
with _mock_lint("check_path"):
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["about_blank.html"], "normal")
assert rv == 0
assert mocked_check_file_contents.call_count == 1
assert caplog.text == ""
def test_ref_same_file_empty(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["ref/same_file_empty.html"], "normal")
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert "SAME-FILE-REF" in caplog.text
assert "same_file_empty.html" in caplog.text
def test_ref_same_file_path(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["ref/same_file_path.html"], "normal")
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert "SAME-FILE-REF" in caplog.text
assert "same_file_path.html" in caplog.text
def test_manual_path_testharness(caplog):
rv = lint(_dummy_repo, ["tests/relative-testharness-manual.html"], "normal")
assert rv == 2
assert "TESTHARNESS-PATH" in caplog.text
assert "TESTHARNESSREPORT-PATH" in caplog.text
def test_css_visual_path_testharness(caplog):
rv = lint(_dummy_repo, ["css/css-unique/relative-testharness.html"], "normal")
assert rv == 3
assert "CONTENT-VISUAL" in caplog.text
assert "TESTHARNESS-PATH" in caplog.text
assert "TESTHARNESSREPORT-PATH" in caplog.text
def test_css_manual_path_testharness(caplog):
rv = lint(_dummy_repo, ["css/css-unique/relative-testharness-interact.html"], "normal")
assert rv == 3
assert "CONTENT-MANUAL" in caplog.text
assert "TESTHARNESS-PATH" in caplog.text
assert "TESTHARNESSREPORT-PATH" in caplog.text
def test_lint_passing_and_failing(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken.html", "okay.html"], "normal")
assert rv == 1
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert "TRAILING WHITESPACE" in caplog.text
assert "broken.html:1" in caplog.text
assert "okay.html" not in caplog.text
def test_check_css_globally_unique_identical_test(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/match/a.html", "css/css-unique/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_different_test(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/not-match/a.html", "css/css-unique/a.html"], "normal")
assert rv == 2
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert "CSS-COLLIDING-TEST-NAME" in caplog.text
def test_check_css_globally_unique_different_spec_test(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/selectors/a.html", "css/css-unique/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_support_ignored(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/support/a.html", "css/css-unique/support/tools/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_support_identical(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/support/a.html", "css/css-unique/match/support/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_support_different(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/not-match/support/a.html", "css/css-unique/support/a.html"], "normal")
assert rv == 2
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert "CSS-COLLIDING-SUPPORT-NAME" in caplog.text
def test_check_css_globally_unique_test_support(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/support/a.html", "css/css-unique/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_ref_identical(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/a-ref.html", "css/css-unique/match/a-ref.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_ref_different(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/not-match/a-ref.html", "css/css-unique/a-ref.html"], "normal")
assert rv == 2
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert "CSS-COLLIDING-REF-NAME" in caplog.text
def test_check_css_globally_unique_test_ref(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/a-ref.html", "css/css-unique/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_ignored(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/tools/a.html", "css/css-unique/not-match/tools/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
assert caplog.text == ""
def test_check_css_globally_unique_ignored_dir(caplog):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["css/css-unique/support/a.html"], "normal")
assert rv == 0
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
assert caplog.text == ""
def test_all_filesystem_paths():
with mock.patch(
'tools.lint.lint.walk',
return_value=[('',
[('dir_a', None), ('dir_b', None)],
[('file_a', None), ('file_b', None)]),
('dir_a',
[],
[('file_c', None), ('file_d', None)])]
):
got = list(lint_mod.all_filesystem_paths('.'))
assert got == ['file_a',
'file_b',
os.path.join('dir_a', 'file_c'),
os.path.join('dir_a', 'file_d')]
def test_filesystem_paths_subdir():
with mock.patch(
'tools.lint.lint.walk',
return_value=[('',
[('dir_a', None), ('dir_b', None)],
[('file_a', None), ('file_b', None)]),
('dir_a',
[],
[('file_c', None), ('file_d', None)])]
):
got = list(lint_mod.all_filesystem_paths('.', 'dir'))
assert got == [os.path.join('dir', 'file_a'),
os.path.join('dir', 'file_b'),
os.path.join('dir', 'dir_a', 'file_c'),
os.path.join('dir', 'dir_a', 'file_d')]
def test_main_with_args():
orig_argv = sys.argv
try:
sys.argv = ['./lint', 'a', 'b', 'c']
with mock.patch(lint_mod.__name__ + ".os.path.isfile") as mock_isfile:
mock_isfile.return_value = True
with _mock_lint('lint', return_value=True) as m:
lint_mod.main(**vars(create_parser().parse_args()))
m.assert_called_once_with(repo_root,
[os.path.relpath(os.path.join(os.getcwd(), x), repo_root)
for x in ['a', 'b', 'c']],
"normal")
finally:
sys.argv = orig_argv
def test_main_no_args():
orig_argv = sys.argv
try:
sys.argv = ['./lint']
with _mock_lint('lint', return_value=True) as m:
with _mock_lint('changed_files', return_value=['foo', 'bar']):
lint_mod.main(**vars(create_parser().parse_args()))
m.assert_called_once_with(repo_root, ['foo', 'bar'], "normal")
finally:
sys.argv = orig_argv
def test_main_all():
orig_argv = sys.argv
try:
sys.argv = ['./lint', '--all']
with _mock_lint('lint', return_value=True) as m:
with _mock_lint('all_filesystem_paths', return_value=['foo', 'bar']):
lint_mod.main(**vars(create_parser().parse_args()))
m.assert_called_once_with(repo_root, ['foo', 'bar'], "normal")
finally:
sys.argv = orig_argv
|
mkmelin/bedrock
|
refs/heads/master
|
bedrock/base/tests/test_accepted_locales.py
|
11
|
import os
import shutil
from django.conf import settings
from django.test import TestCase
from bedrock.settings.base import get_dev_languages, path
class AcceptedLocalesTest(TestCase):
"""Test lazy evaluation of locale related settings.
Verify that some localization-related settings are lazily evaluated based
on the current value of the DEV variable. Depending on the value,
DEV_LANGUAGES or PROD_LANGUAGES should be used.
"""
locale = path('locale')
locale_bkp = path('locale_bkp')
@classmethod
def setup_class(cls):
"""Create a directory structure for locale/.
Back up the existing project/locale/ directory and create the following
hierarchy in its place:
- project/locale/en-US/LC_MESSAGES
- project/locale/fr/LC_MESSAGES
- project/locale/templates/LC_MESSAGES
- project/locale/empty_file
Also, set PROD_LANGUAGES to ('en-US',).
"""
if os.path.exists(cls.locale_bkp):
raise Exception('A backup of locale/ exists at %s which might '
'mean that previous tests didn\'t end cleanly. '
'Skipping the test suite.' % cls.locale_bkp)
cls.DEV = settings.DEV
cls.PROD_LANGUAGES = settings.PROD_LANGUAGES
cls.DEV_LANGUAGES = settings.DEV_LANGUAGES
settings.PROD_LANGUAGES = ('en-US',)
if os.path.exists(cls.locale):
shutil.move(cls.locale, cls.locale_bkp)
else:
cls.locale_bkp = None
for loc in ('en-US', 'fr', 'templates'):
os.makedirs(os.path.join(cls.locale, loc, 'LC_MESSAGES'))
open(os.path.join(cls.locale, 'empty_file'), 'w').close()
@classmethod
def teardown_class(cls):
"""Remove the testing locale/ dir and bring back the backup."""
settings.DEV = cls.DEV
settings.PROD_LANGUAGES = cls.PROD_LANGUAGES
settings.DEV_LANGUAGES = cls.DEV_LANGUAGES
shutil.rmtree(cls.locale)
if cls.locale_bkp:
shutil.move(cls.locale_bkp, cls.locale)
def test_build_dev_languages(self):
"""Test that the list of dev locales is built properly.
On dev instances, the list of accepted locales should correspond to
the per-locale directories in locale/.
"""
settings.DEV = True
langs = get_dev_languages()
assert langs == ['en-US', 'fr'] or langs == ['fr', 'en-US'], (
'DEV_LANGUAGES do not correspond to the contents of locale/.')
def test_dev_languages(self):
"""Test the accepted locales on dev instances.
On dev instances, allow locales defined in DEV_LANGUAGES.
"""
settings.DEV = True
# simulate the successful result of the DEV_LANGUAGES list
# comprehension defined in settings.
settings.DEV_LANGUAGES = ['en-US', 'fr']
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US', 'fr': 'fr'}, \
('DEV is True, but DEV_LANGUAGES are not used to define the '
'allowed locales.')
def test_prod_languages(self):
"""Test the accepted locales on prod instances.
On stage/prod instances, allow locales defined in PROD_LANGUAGES.
"""
settings.DEV = False
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US'}, (
'DEV is False, but PROD_LANGUAGES are not used to define the '
'allowed locales.')
|
lcpt/xc
|
refs/heads/master
|
python_modules/actions/weight/CTE_pesos_especificos_materiales_construccion.py
|
1
|
# -*- coding: utf-8 -*-
'''Construction materials specific weights in N/m3 according to table tabla C.1
of CTE.'''
#Albañilería
pesoEspCTEArenisca=27.0e3
pesoEspCTEBasalto=31.0e3
pesoEspCTECalizaCompacta=28.0e3
pesoEspCTEMarmol=28.0e3
pesoEspCTEDiorita=30.0e3
pesoEspCTEGneis=30.0e3
pesoEspCTEGranito=30.0e3
pesoEspCTESienita=28.0e3
pesoEspCTEPorfido=28.0e3
pesoEspCTETerracotaCompacta=27.0e3
#Maderas
pesoEspCTEMaderaAserradaTiposC14aC40=5.0e3
pesoEspCTEMaderaLaminadaEncolada=4.4e3
pesoEspCTETableroMaderaContrachapada=5.0e3
pesoEspCTETableroCartonGris=8.0e3
pesoEspCTEAglomeradoConCemento=12.0e3
pesoEspCTETableroDeFibras=10.0e3
pesoEspCTETableroLigero=4.0e3
#Fábricas
pesoEspCTEBloqueHuecoDeCemento=16.0e3
pesoEspCTEBloqueHuecoDeYeso=10.0e3
pesoEspCTELadrilloCeramicoMacizo=18.0e3
pesoEspCTELadrilloCeramicoPerforado=15.0e3
pesoEspCTELadrilloCeramicoHueco=12.0e3
pesoEspCTELadrilloSilicocalcareo=20.0e3
#Mampostería con mortero
pesoEspCTEMamposteriaDeArenisca=24.0e3
pesoEspCTEMamposteriaDeBasalto=27.0e3
pesoEspCTEMamposteriaDeCaliza_compacta=26.0e3
pesoEspCTEMamposteriaDeGranito=26.0e3
#Sillería
pesoEspCTESilleriaDeArenisca=26.0e3
pesoEspCTESilleriaDeAreniscaPorosa=24.0e3
pesoEspCTESilleriaDeCalizaPorosas=24.0e3
pesoEspCTESilleriaDeCalizaCompacta=28.0e3
pesoEspCTESilleriaDeBasalto=30.0e3
pesoEspCTESilleriaDeCalizaCompacta_o_marmol=28.0e3
pesoEspCTESilleriaDeGranito=28.0e3
#Hormigones y morteros
pesoEspCTEHormigonLigeroMasa=20.0e3
pesoEspCTEHormigonLigeroArmado=20.0e3
pesoEspCTEHormigonNormalMasa=24.0e3
pesoEspCTEHormigonNormalArmado=25.0e3
pesoEspCTEMorteroDeCemento=23.0e3
pesoEspCTEMorteroDeYeso=28.0e3
pesoEspCTEMorteroDeCementoyCal=20.0e3
pesoEspCTEMorteroDeCal=18.0e3
#Metales
pesoEspCTEAcero=78.5e3
pesoEspCTEAluminio=27.0e3
pesoEspCTEBronce=85.0e3
pesoEspCTECobre=89.0e3
pesoEspCTEEstanno=74.0e3
pesoEspCTEHierro_colado=72.5e3
pesoEspCTEHierro_forjado=76.0e3
pesoEspCTELaton=85.0e3
pesoEspCTEPlomo=114.0e3
pesoEspCTEZinc=72.0e3
#Plásticos y orgánicos
pesoEspCTECaucho_en_plancha=17.0e3
pesoEspCTELamina_acrilica=12.0e3
pesoEspCTELinoleo_en_plancha=12.0e3
pesoEspCTEMastico_en_plancha=21.0e3
pesoEspCTEPoliestireno_expandido=0.3e3
#Varios
pesoEspCTEAdobe=16.0e3
pesoEspCTEAsfalto=24.0e3
pesoEspCTEBaldosa_ceramica=18.0e3
pesoEspCTEBaldosa_de_gres=19.0e3
pesoEspCTEPapel=11.0e3
pesoEspCTEPizarra=29.0e3
pesoEspCTEVidrio=25.0e3
|
JustinSGray/Hyperloop
|
refs/heads/master
|
src/hyperloop/tube_wall_temp.py
|
8
|
"""
tubeModel.py -
Determines the steady state temperature of the hyperloop tube.
Calculates Q released/absorbed by hyperloop tube due to:
Internal Convection, Tube Conduction, Ambient Natural Convection, Solar Flux In, Radiation Out
-original calculations from Jeff Berton, ported and extended by Jeff Chin
Compatible with OpenMDAO v0.8.1
"""
from math import log, pi, sqrt, e
from openmdao.main.api import Assembly, Component
from openmdao.lib.drivers.api import BroydenSolver
from openmdao.lib.datatypes.api import Float, Bool
from openmdao.main.api import convert_units as cu
from pycycle.api import FlowStationVar
class TubeWallTemp(Component):
""" Calculates Q released/absorbed by the hyperloop tube """
#--Inputs--
#Hyperloop Parameters/Design Variables
diameter_outer_tube = Float(2.23, units = 'm', iotype='in', desc='tube outer diameter') #7.3ft
length_tube = Float(482803, units = 'm', iotype='in', desc='Length of entire Hyperloop') #300 miles, 1584000ft
num_pods = Float(34, units = 'K', iotype='in', desc='Number of Pods in the Tube at a given time') #
temp_boundary = Float(322.0, units = 'K', iotype='in', desc='Average Temperature of the tube wall') #
temp_outside_ambient = Float(305.6, units = 'K', iotype='in', desc='Average Temperature of the outside air') #
nozzle_air = FlowStationVar(iotype="in", desc="air exiting the pod nozzle", copy=None)
bearing_air = FlowStationVar(iotype="in", desc="air exiting the air bearings", copy=None)
#constants
solar_insolation = Float(1000., iotype="in", units = 'W/m**2', desc='solar irradiation at sea level on a clear day') #
nn_incidence_factor = Float(0.7, iotype="in", desc='Non-normal incidence factor') #
surface_reflectance = Float(0.5, desc='Solar Reflectance Index') #
q_per_area_solar = Float(350., units = 'W/m**2', desc='Solar Heat Rate Absorbed per Area') #
q_total_solar = Float(375989751., iotype="in", units = 'W', desc='Solar Heat Absorbed by Tube') #
emissivity_tube = Float(0.5, iotype="in", units = 'W', desc='Emmissivity of the Tube') #
sb_constant = Float(0.00000005670373, iotype="in", units = 'W/((m**2)*(K**4))', desc='Stefan-Boltzmann Constant') #
#--Outputs--
area_rad = Float(337486.1, units = 'm**2', iotype='out', desc='Tube Radiating Area') #
#Required for Natural Convection Calcs
GrDelTL3 = Float(1946216.7, units = '1/((ft**3)*F)', iotype='out', desc='Heat Radiated to the outside') #
Pr = Float(0.707, iotype='out', desc='Prandtl') #
Gr = Float(12730351223., iotype='out', desc='Grashof #') #
Ra = Float(8996312085., iotype='out', desc='Rayleigh #') #
Nu = Float(232.4543713, iotype='out', desc='Nusselt #') #
k = Float(0.02655, units = 'W/(m*K)', iotype='out', desc='Thermal conductivity') #
h = Float(0.845464094, units = 'W/((m**2)*K)', iotype='out', desc='Heat Radiated to the outside') #
area_convection = Float(3374876.115, units = 'W', iotype='out', desc='Convection Area') #
#Natural Convection
q_per_area_nat_conv = Float(7.9, units = 'W/(m**2)', iotype='out', desc='Heat Radiated per Area to the outside') #
total_q_nat_conv = Float(286900419., units = 'W', iotype='out', desc='Total Heat Radiated to the outside via Natural Convection') #
#Exhausted from Pods
heat_rate_pod = Float(519763, units = 'W', iotype='out', desc='Heating Due to a Single Pods') #
total_heat_rate_pods = Float(17671942., units = 'W', iotype='out', desc='Heating Due to a All Pods') #
#Radiated Out
q_rad_per_area = Float(31.6, units = 'W/(m**2)', iotype='out', desc='Heat Radiated to the outside') #
q_rad_tot = Float(106761066.5, units = 'W', iotype='out', desc='Heat Radiated to the outside') #
#Radiated In
viewing_angle = Float(1074256, units = 'm**2', iotype='out', desc='Effective Area hit by Sun') #
#Total Heating
q_total_out = Float(286900419., units = 'W', iotype='out', desc='Total Heat Released via Radiation and Natural Convection') #
q_total_in = Float(286900419., units = 'W', iotype='out', desc='Total Heat Absorbed/Added via Pods and Solar Absorption') #
#Residual (for solver)
ss_temp_residual = Float(units = 'K', iotype='out', desc='Residual of T_released - T_absorbed')
def execute(self):
"""Calculate Various Paramters"""
bearing_q = cu(self.bearing_air.W,'lbm/s','kg/s') * cu(self.bearing_air.Cp,'Btu/(lbm*degR)','J/(kg*K)') * (cu(self.bearing_air.Tt,'degR','degK') - self.temp_boundary)
nozzle_q = cu(self.nozzle_air.W,'lbm/s','kg/s') * cu(self.nozzle_air.Cp,'Btu/(lbm*degR)','J/(kg*K)') * (cu(self.nozzle_air.Tt,'degR','degK') - self.temp_boundary)
#Q = mdot * cp * deltaT
self.heat_rate_pod = nozzle_q +bearing_q
#Total Q = Q * (number of pods)
self.total_heat_rate_pods = self.heat_rate_pod*self.num_pods
#Determine thermal resistance of outside via Natural Convection or forced convection
if(self.temp_outside_ambient < 400):
self.GrDelTL3 = 41780000000000000000*((self.temp_outside_ambient)**(-4.639)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
else:
self.GrDelTL3 = 4985000000000000000*((self.temp_outside_ambient)**(-4.284)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
#Prandtl Number
#Pr = viscous diffusion rate/ thermal diffusion rate = Cp * dyanamic viscosity / thermal conductivity
#Pr << 1 means thermal diffusivity dominates
#Pr >> 1 means momentum diffusivity dominates
if (self.temp_outside_ambient < 400):
self.Pr = 1.23*(self.temp_outside_ambient**(-0.09685)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
else:
self.Pr = 0.59*(self.temp_outside_ambient**(0.0239))
#Grashof Number
#Relationship between buoyancy and viscosity
#Laminar = Gr < 10^8
#Turbulent = Gr > 10^9
self.Gr = self.GrDelTL3*(self.temp_boundary-self.temp_outside_ambient)*(self.diameter_outer_tube**3)
#Rayleigh Number
#Buoyancy driven flow (natural convection)
self.Ra = self.Pr * self.Gr
#Nusselt Number
#Nu = convecive heat transfer / conductive heat transfer
if (self.Ra<=10**12): #valid in specific flow regime
self.Nu = (0.6 + 0.387*self.Ra**(1./6.)/(1 + (0.559/self.Pr)**(9./16.))**(8./27.))**2 #3rd Ed. of Introduction to Heat Transfer by Incropera and DeWitt, equations (9.33) and (9.34) on page 465
if(self.temp_outside_ambient < 400):
self.k = 0.0001423*(self.temp_outside_ambient**(0.9138)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
else:
self.k = 0.0002494*(self.temp_outside_ambient**(0.8152))
#h = k*Nu/Characteristic Length
self.h = (self.k * self.Nu)/ self.diameter_outer_tube
#Convection Area = Surface Area
self.area_convection = pi * self.length_tube * self.diameter_outer_tube
#Determine heat radiated per square meter (Q)
self.q_per_area_nat_conv = self.h*(self.temp_boundary-self.temp_outside_ambient)
#Determine total heat radiated over entire tube (Qtotal)
self.total_q_nat_conv = self.q_per_area_nat_conv * self.area_convection
#Determine heat incoming via Sun radiation (Incidence Flux)
#Sun hits an effective rectangular cross section
self.area_viewing = self.length_tube* self.diameter_outer_tube
self.q_per_area_solar = (1-self.surface_reflectance)* self.nn_incidence_factor * self.solar_insolation
self.q_total_solar = self.q_per_area_solar * self.area_viewing
#Determine heat released via radiation
#Radiative area = surface area
self.area_rad = self.area_convection
#P/A = SB*emmisitivity*(T^4 - To^4)
self.q_rad_per_area = self.sb_constant*self.emissivity_tube*((self.temp_boundary**4) - (self.temp_outside_ambient**4))
#P = A * (P/A)
self.q_rad_tot = self.area_rad * self.q_rad_per_area
#------------
#Sum Up
self.q_total_out = self.q_rad_tot + self.total_q_nat_conv
self.q_total_in = self.q_total_solar + self.total_heat_rate_pods
self.ss_temp_residual = (self.q_total_out - self.q_total_in)/1e6
#run stand-alone component
if __name__ == "__main__":
from openmdao.main.api import set_as_top
class TubeHeatBalance(Assembly):
def configure(self):
tm = self.add('tm', TubeWallTemp())
#tm.bearing_air.setTotalTP()
driver = self.add('driver',BroydenSolver())
driver.add_parameter('tm.temp_boundary',low=0.,high=10000.)
driver.add_constraint('tm.ss_temp_residual=0')
driver.workflow.add(['tm'])
test = TubeHeatBalance()
set_as_top(test)
#set input values
test.tm.nozzle_air.setTotalTP(1710, 0.304434211)
test.tm.nozzle_air.W = 1.08
test.tm.bearing_air.W = 0.
test.tm.diameter_outer_tube = 2.22504#, units = 'm', iotype='in', desc='Tube out diameter') #7.3ft
test.tm.length_tube = 482803.#, units = 'm', iotype='in', desc='Length of entire Hyperloop') #300 miles, 1584000ft
test.tm.num_pods = 34.#, units = 'K', iotype='in', desc='Number of Pods in the Tube at a given time') #
test.tm.temp_boundary = 340#, units = 'K', iotype='in', desc='Average Temperature of the tube') #
test.tm.temp_outside_ambient = 305.6#, units = 'K', iotype='in', desc='Average Temperature of the outside air') #
test.run()
print "-----Completed Tube Heat Flux Model Calculations---"
print ""
print "CompressQ-{} SolarQ-{} RadQ-{} ConvecQ-{}".format(test.tm.total_heat_rate_pods, test.tm.q_total_solar, test.tm.q_rad_tot, test.tm.total_q_nat_conv )
print "Equilibrium Wall Temperature: {} K or {} F".format(test.tm.temp_boundary, cu(test.tm.temp_boundary,'degK','degF'))
print "Ambient Temperature: {} K or {} F".format(test.tm.temp_outside_ambient, cu(test.tm.temp_outside_ambient,'degK','degF'))
print "Q Out = {} W ==> Q In = {} W ==> Error: {}%".format(test.tm.q_total_out,test.tm.q_total_in,((test.tm.q_total_out-test.tm.q_total_in)/test.tm.q_total_out)*100)
|
revmischa/boto
|
refs/heads/develop
|
tests/integration/gs/test_versioning.py
|
135
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Integration tests for GS versioning support."""
from xml import sax
from boto import handler
from boto.gs import acl
from tests.integration.gs.testcase import GSTestCase
class GSVersioningTest(GSTestCase):
def testVersioningToggle(self):
b = self._MakeBucket()
self.assertFalse(b.get_versioning_status())
b.configure_versioning(True)
self.assertTrue(b.get_versioning_status())
b.configure_versioning(False)
self.assertFalse(b.get_versioning_status())
def testDeleteVersionedKey(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
versions = list(b.list_versions())
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].name, "foo")
self.assertEqual(versions[1].name, "foo")
generations = [k.generation for k in versions]
self.assertIn(g1, generations)
self.assertIn(g2, generations)
# Delete "current" version and make sure that version is no longer
# visible from a basic GET call.
b.delete_key("foo", generation=None)
self.assertIsNone(b.get_key("foo"))
# Both old versions should still be there when listed using the versions
# query parameter.
versions = list(b.list_versions())
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].name, "foo")
self.assertEqual(versions[1].name, "foo")
generations = [k.generation for k in versions]
self.assertIn(g1, generations)
self.assertIn(g2, generations)
# Delete generation 2 and make sure it's gone.
b.delete_key("foo", generation=g2)
versions = list(b.list_versions())
self.assertEqual(len(versions), 1)
self.assertEqual(versions[0].name, "foo")
self.assertEqual(versions[0].generation, g1)
# Delete generation 1 and make sure it's gone.
b.delete_key("foo", generation=g1)
versions = list(b.list_versions())
self.assertEqual(len(versions), 0)
def testGetVersionedKey(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
o1 = k.get_contents_as_string()
self.assertEqual(o1, s1)
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
self.assertNotEqual(g2, g1)
o2 = k.get_contents_as_string()
self.assertEqual(o2, s2)
k = b.get_key("foo", generation=g1)
self.assertEqual(k.get_contents_as_string(), s1)
k = b.get_key("foo", generation=g2)
self.assertEqual(k.get_contents_as_string(), s2)
def testVersionedBucketCannedAcl(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
acl1g1 = b.get_acl("foo", generation=g1)
acl1g2 = b.get_acl("foo", generation=g2)
owner1g1 = acl1g1.owner.id
owner1g2 = acl1g2.owner.id
self.assertEqual(owner1g1, owner1g2)
entries1g1 = acl1g1.entries.entry_list
entries1g2 = acl1g2.entries.entry_list
self.assertEqual(len(entries1g1), len(entries1g2))
b.set_acl("public-read", key_name="foo", generation=g1)
acl2g1 = b.get_acl("foo", generation=g1)
acl2g2 = b.get_acl("foo", generation=g2)
entries2g1 = acl2g1.entries.entry_list
entries2g2 = acl2g2.entries.entry_list
self.assertEqual(len(entries2g2), len(entries1g2))
public_read_entries1 = [e for e in entries2g1 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
public_read_entries2 = [e for e in entries2g2 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
self.assertEqual(len(public_read_entries1), 1)
self.assertEqual(len(public_read_entries2), 0)
def testVersionedBucketXmlAcl(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
acl1g1 = b.get_acl("foo", generation=g1)
acl1g2 = b.get_acl("foo", generation=g2)
owner1g1 = acl1g1.owner.id
owner1g2 = acl1g2.owner.id
self.assertEqual(owner1g1, owner1g2)
entries1g1 = acl1g1.entries.entry_list
entries1g2 = acl1g2.entries.entry_list
self.assertEqual(len(entries1g1), len(entries1g2))
acl_xml = (
'<ACCESSControlList><EntrIes><Entry>' +
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
'</Entry></EntrIes></ACCESSControlList>')
aclo = acl.ACL()
h = handler.XmlHandler(aclo, b)
sax.parseString(acl_xml, h)
b.set_acl(aclo, key_name="foo", generation=g1)
acl2g1 = b.get_acl("foo", generation=g1)
acl2g2 = b.get_acl("foo", generation=g2)
entries2g1 = acl2g1.entries.entry_list
entries2g2 = acl2g2.entries.entry_list
self.assertEqual(len(entries2g2), len(entries1g2))
public_read_entries1 = [e for e in entries2g1 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
public_read_entries2 = [e for e in entries2g2 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
self.assertEqual(len(public_read_entries1), 1)
self.assertEqual(len(public_read_entries2), 0)
def testVersionedObjectCannedAcl(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
acl1g1 = b.get_acl("foo", generation=g1)
acl1g2 = b.get_acl("foo", generation=g2)
owner1g1 = acl1g1.owner.id
owner1g2 = acl1g2.owner.id
self.assertEqual(owner1g1, owner1g2)
entries1g1 = acl1g1.entries.entry_list
entries1g2 = acl1g2.entries.entry_list
self.assertEqual(len(entries1g1), len(entries1g2))
b.set_acl("public-read", key_name="foo", generation=g1)
acl2g1 = b.get_acl("foo", generation=g1)
acl2g2 = b.get_acl("foo", generation=g2)
entries2g1 = acl2g1.entries.entry_list
entries2g2 = acl2g2.entries.entry_list
self.assertEqual(len(entries2g2), len(entries1g2))
public_read_entries1 = [e for e in entries2g1 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
public_read_entries2 = [e for e in entries2g2 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
self.assertEqual(len(public_read_entries1), 1)
self.assertEqual(len(public_read_entries2), 0)
def testCopyVersionedKey(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
b2 = self._MakeVersionedBucket()
b2.copy_key("foo2", b.name, "foo", src_generation=g1)
k2 = b2.get_key("foo2")
s3 = k2.get_contents_as_string()
self.assertEqual(s3, s1)
def testKeyGenerationUpdatesOnSet(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
self.assertIsNone(k.generation)
k.set_contents_from_string("test1")
g1 = k.generation
self.assertRegexpMatches(g1, r'[0-9]+')
self.assertEqual(k.metageneration, '1')
k.set_contents_from_string("test2")
g2 = k.generation
self.assertNotEqual(g1, g2)
self.assertRegexpMatches(g2, r'[0-9]+')
self.assertGreater(int(g2), int(g1))
self.assertEqual(k.metageneration, '1')
|
jk1/intellij-community
|
refs/heads/master
|
python/testData/refactoring/pullup/simple.py
|
166
|
class Foo:
def foo(self):
print("a")
class Boo(Foo):
def boo(self):
print "rrrrr"
|
SoLoHiC/mezzanine
|
refs/heads/master
|
mezzanine/pages/context_processors.py
|
30
|
def page(request):
"""
Adds the current page to the template context and runs its
``set_helper`` method. This was previously part of
``PageMiddleware``, but moved to a context processor so that
we could assign these template context variables without
the middleware depending on Django's ``TemplateResponse``.
"""
context = {}
page = getattr(request, "page", None)
if page:
# set_helpers has always expected the current template context,
# but here we're just passing in our context dict with enough
# variables to satisfy it.
context = {"request": request, "page": page, "_current_page": page}
page.set_helpers(context)
return context
|
joebowen/LogMyRocket_API
|
refs/heads/master
|
LogMyRocket/libraries/sys_packages/docutils/writers/odf_odt/pygmentsformatter.py
|
244
|
# $Id: pygmentsformatter.py 5853 2009-01-19 21:02:02Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
|
codewarrior0/Shiboken
|
refs/heads/master
|
tests/samplebinding/oddbool_test.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2009-2011 Nokia Corporation and/or its subsidiary(-ies).
#
# Contact: PySide team <contact@pyside.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
'''Test cases for OddBool user's primitive type conversion.'''
import unittest
from sample import OddBoolUser
class DerivedOddBoolUser (OddBoolUser):
def returnMyselfVirtual(self):
return OddBoolUser()
pass
class OddBoolTest(unittest.TestCase):
def testOddBoolUser(self):
obuTrue = OddBoolUser()
obuFalse = OddBoolUser()
obuTrue.setOddBool(True)
self.assertEqual(obuFalse.oddBool(), False)
self.assertEqual(obuTrue.oddBool(), True)
self.assertEqual(obuTrue.callInvertedOddBool(), False)
self.assertEqual(obuTrue.oddBool() == True, True)
self.assertEqual(False == obuFalse.oddBool(), True)
self.assertEqual(obuTrue.oddBool() == obuFalse.oddBool(), False)
self.assertEqual(obuFalse.oddBool() != True, True)
self.assertEqual(True != obuFalse.oddBool(), True)
self.assertEqual(obuTrue.oddBool() != obuFalse.oddBool(), True)
def testVirtuals(self):
dobu = DerivedOddBoolUser()
self.assertEqual(dobu.invertedOddBool(), True)
def testImplicitConversionWithUsersPrimitiveType(self):
obu = OddBoolUser(True)
self.assertTrue(obu.oddBool())
obu = OddBoolUser(False)
self.assertFalse(obu.oddBool())
cpx = complex(1.0, 0.0)
obu = OddBoolUser(cpx)
self.assertTrue(obu.oddBool())
cpx = complex(0.0, 0.0)
obu = OddBoolUser(cpx)
self.assertFalse(obu.oddBool())
if __name__ == '__main__':
unittest.main()
|
buuck/root
|
refs/heads/master
|
interpreter/llvm/src/utils/lit/lit/ShCommands.py
|
87
|
class Command:
def __init__(self, args, redirects):
self.args = list(args)
self.redirects = list(redirects)
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
def __eq__(self, other):
if not isinstance(other, Command):
return False
return ((self.args, self.redirects) ==
(other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
if "'" not in arg:
quoted = "'%s'" % arg
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
raise NotImplementedError('Unable to quote %r' % arg)
file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
file.write("%s '%s'" % (r[0][0], r[1]))
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
def __eq__(self, other):
if not isinstance(other, Pipeline):
return False
return ((self.commands, self.negate, self.pipe_err) ==
(other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
assert op in (';', '&', '||', '&&')
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
def __eq__(self, other):
if not isinstance(other, Seq):
return False
return ((self.lhs, self.op, self.rhs) ==
(other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
|
Lyleo/OmniMarkupPreviewer
|
refs/heads/master
|
OmniMarkupLib/Renderers/libs/python2/textile/tools/imagesize.py
|
2
|
def getimagesize(url):
"""
Attempts to determine an image's width and height, and returns a string
suitable for use in an <img> tag, or an empty string in case of failure.
Requires that PIL is installed.
>>> getimagesize("http://www.google.com/intl/en_ALL/images/logo.gif")
... #doctest: +ELLIPSIS
'width="..." height="..."'
>>> getimagesize("http://bad.domain/")
''
"""
try:
from PIL import ImageFile
import urllib2
except ImportError:
return ''
try:
p = ImageFile.Parser()
f = urllib2.urlopen(url)
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
if p.image:
return 'width="%i" height="%i"' % p.image.size
except (IOError, ValueError):
return ''
def setup_module(module):
from nose.plugins.skip import SkipTest
try:
from PIL import ImageFile
except ImportError:
raise SkipTest()
|
dhalperi/incubator-beam
|
refs/heads/master
|
sdks/python/apache_beam/internal/gcp/auth.py
|
9
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dataflow credentials and authentication."""
import datetime
import json
import logging
import os
import urllib2
from oauth2client.client import GoogleCredentials
from oauth2client.client import OAuth2Credentials
from apache_beam.utils import retry
# When we are running in GCE, we can authenticate with VM credentials.
is_running_in_gce = False
# When we are running in GCE, this value is set based on worker startup
# information.
executing_project = None
def set_running_in_gce(worker_executing_project):
"""For internal use only; no backwards-compatibility guarantees.
Informs the authentication library that we are running in GCE.
When we are running in GCE, we have the option of using the VM metadata
credentials for authentication to Google services.
Args:
worker_executing_project: The project running the workflow. This information
comes from worker startup information.
"""
global is_running_in_gce
global executing_project
is_running_in_gce = True
executing_project = worker_executing_project
class AuthenticationException(retry.PermanentException):
pass
class _GCEMetadataCredentials(OAuth2Credentials):
"""For internal use only; no backwards-compatibility guarantees.
Credential object initialized using access token from GCE VM metadata."""
def __init__(self, user_agent=None):
"""Create an instance of GCEMetadataCredentials.
These credentials are generated by contacting the metadata server on a GCE
VM instance.
Args:
user_agent: string, The HTTP User-Agent to provide for this application.
"""
super(_GCEMetadataCredentials, self).__init__(
None, # access_token
None, # client_id
None, # client_secret
None, # refresh_token
datetime.datetime(2010, 1, 1), # token_expiry, set to time in past.
None, # token_uri
user_agent)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _refresh(self, http_request):
refresh_time = datetime.datetime.now()
metadata_root = os.environ.get(
'GCE_METADATA_ROOT', 'metadata.google.internal')
token_url = ('http://{}/computeMetadata/v1/instance/service-accounts/'
'default/token').format(metadata_root)
req = urllib2.Request(token_url, headers={'Metadata-Flavor': 'Google'})
token_data = json.loads(urllib2.urlopen(req).read())
self.access_token = token_data['access_token']
self.token_expiry = (refresh_time +
datetime.timedelta(seconds=token_data['expires_in']))
def get_service_credentials():
"""For internal use only; no backwards-compatibility guarantees.
Get credentials to access Google services."""
user_agent = 'beam-python-sdk/1.0'
if is_running_in_gce:
# We are currently running as a GCE taskrunner worker.
#
# TODO(ccy): It's not entirely clear if these credentials are thread-safe.
# If so, we can cache these credentials to save the overhead of creating
# them again.
return _GCEMetadataCredentials(user_agent=user_agent)
else:
client_scopes = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/datastore'
]
try:
credentials = GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(client_scopes)
logging.debug('Connecting using Google Application Default '
'Credentials.')
return credentials
except Exception:
logging.warning('Unable to find default credentials to use.')
raise
|
kreatorkodi/repository.torrentbr
|
refs/heads/master
|
script.module.urlresolver/lib/urlresolver/plugins/putload.py
|
2
|
'''
urlresolver XBMC Addon
Copyright (C) 2016 Gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from lib import helpers
from urlresolver.resolver import UrlResolver, ResolverError
class PutLoadResolver(UrlResolver):
name = "putload.tv"
domains = ["putload.tv", "youlolx.site", "shitmovie.com"]
pattern = '(?://|\.)(?:putload\.tv|youlolx\.site|shitmovie\.com)/(?:embed-)?([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id))
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://{host}/embed-{media_id}.html')
|
GitAngel/django
|
refs/heads/master
|
tests/extra_regress/models.py
|
166
|
from __future__ import unicode_literals
import copy
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RevisionableModel(models.Model):
base = models.ForeignKey('self', null=True)
title = models.CharField(blank=True, max_length=255)
when = models.DateTimeField(default=datetime.datetime.now)
def __str__(self):
return "%s (%s, %s)" % (self.title, self.id, self.base.id)
def save(self, *args, **kwargs):
super(RevisionableModel, self).save(*args, **kwargs)
if not self.base:
self.base = self
kwargs.pop('force_insert', None)
kwargs.pop('force_update', None)
super(RevisionableModel, self).save(*args, **kwargs)
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
class Order(models.Model):
created_by = models.ForeignKey(User)
text = models.TextField()
@python_2_unicode_compatible
class TestObject(models.Model):
first = models.CharField(max_length=20)
second = models.CharField(max_length=20)
third = models.CharField(max_length=20)
def __str__(self):
return 'TestObject: %s,%s,%s' % (self.first, self.second, self.third)
|
tntnatbry/tensorflow
|
refs/heads/master
|
tensorflow/python/saved_model/main_op_impl.py
|
25
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel main op implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops as tf_data_flow_ops
from tensorflow.python.ops import variables
def main_op():
"""Returns a main op to init variables and tables.
Returns the main op including the group of ops that initializes all
variables, initializes local variables and initialize all tables.
Returns:
The set of ops to be run as part of the main op upon the load operation.
"""
init = variables.global_variables_initializer()
init_local = variables.local_variables_initializer()
init_tables = tf_data_flow_ops.tables_initializer()
return control_flow_ops.group(init, init_local, init_tables)
def main_op_with_restore(restore_op_name):
"""Returns a main op to init variables, tables and restore the graph.
Returns the main op including the group of ops that initializes all
variables, initialize local variables, initialize all tables and the restore
op name.
Args:
restore_op_name: Name of the op to use to restore the graph.
Returns:
The set of ops to be run as part of the main op upon the load operation.
"""
with ops.control_dependencies([main_op()]):
main_op_with_restore = control_flow_ops.group(restore_op_name)
return main_op_with_restore
|
steppicrew/useful-scripts
|
refs/heads/master
|
KindleBooks/lib/cmbtc_v2.2.py
|
1
|
#! /usr/bin/python2
"""
Comprehensive Mazama Book DRM with Topaz Cryptography V2.2
-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdBHJ4CNc6DNFCw4MRCw4SWAK6
M8hYfnNEI0yQmn5Ti+W8biT7EatpauE/5jgQMPBmdNrDr1hbHyHBSP7xeC2qlRWC
B62UCxeu/fpfnvNHDN/wPWWH4jynZ2M6cdcnE5LQ+FfeKqZn7gnG2No1U9h7oOHx
y2/pHuYme7U1TsgSjwIDAQAB
-----END PUBLIC KEY-----
"""
from __future__ import with_statement
import csv
import sys
import os
import getopt
import zlib
from struct import pack
from struct import unpack
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
string_at, Structure, c_void_p, cast
import _winreg as winreg
import Tkinter
import Tkconstants
import tkMessageBox
import traceback
import hashlib
MAX_PATH = 255
kernel32 = windll.kernel32
advapi32 = windll.advapi32
crypt32 = windll.crypt32
global kindleDatabase
global bookFile
global bookPayloadOffset
global bookHeaderRecords
global bookMetadata
global bookKey
global command
#
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
#
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
#
# Exceptions for all the problems that might happen during the script
#
class CMBDTCError(Exception):
pass
class CMBDTCFatal(Exception):
pass
#
# Stolen stuff
#
class DataBlob(Structure):
_fields_ = [('cbData', c_uint),
('pbData', c_void_p)]
DataBlob_p = POINTER(DataBlob)
def GetSystemDirectory():
GetSystemDirectoryW = kernel32.GetSystemDirectoryW
GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
GetSystemDirectoryW.restype = c_uint
def GetSystemDirectory():
buffer = create_unicode_buffer(MAX_PATH + 1)
GetSystemDirectoryW(buffer, len(buffer))
return buffer.value
return GetSystemDirectory
GetSystemDirectory = GetSystemDirectory()
def GetVolumeSerialNumber():
GetVolumeInformationW = kernel32.GetVolumeInformationW
GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint,
POINTER(c_uint), POINTER(c_uint),
POINTER(c_uint), c_wchar_p, c_uint]
GetVolumeInformationW.restype = c_uint
def GetVolumeSerialNumber(path):
vsn = c_uint(0)
GetVolumeInformationW(path, None, 0, byref(vsn), None, None, None, 0)
return vsn.value
return GetVolumeSerialNumber
GetVolumeSerialNumber = GetVolumeSerialNumber()
def GetUserName():
GetUserNameW = advapi32.GetUserNameW
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
GetUserNameW.restype = c_uint
def GetUserName():
buffer = create_unicode_buffer(32)
size = c_uint(len(buffer))
while not GetUserNameW(buffer, byref(size)):
buffer = create_unicode_buffer(len(buffer) * 2)
size.value = len(buffer)
return buffer.value.encode('utf-16-le')[::2]
return GetUserName
GetUserName = GetUserName()
def CryptUnprotectData():
_CryptUnprotectData = crypt32.CryptUnprotectData
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
c_void_p, c_void_p, c_uint, DataBlob_p]
_CryptUnprotectData.restype = c_uint
def CryptUnprotectData(indata, entropy):
indatab = create_string_buffer(indata)
indata = DataBlob(len(indata), cast(indatab, c_void_p))
entropyb = create_string_buffer(entropy)
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
outdata = DataBlob()
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
None, None, 0, byref(outdata)):
raise CMBDTCFatal("Failed to Unprotect Data")
return string_at(outdata.pbData, outdata.cbData)
return CryptUnprotectData
CryptUnprotectData = CryptUnprotectData()
#
# Returns the MD5 digest of "message"
#
def MD5(message):
ctx = hashlib.md5()
ctx.update(message)
return ctx.digest()
#
# Returns the MD5 digest of "message"
#
def SHA1(message):
ctx = hashlib.sha1()
ctx.update(message)
return ctx.digest()
#
# Open the book file at path
#
def openBook(path):
try:
return open(path,'rb')
except:
raise CMBDTCFatal("Could not open book file: " + path)
#
# Encode the bytes in data with the characters in map
#
def encode(data, map):
result = ""
for char in data:
value = ord(char)
Q = (value ^ 0x80) // len(map)
R = value % len(map)
result += map[Q]
result += map[R]
return result
#
# Hash the bytes in data and then encode the digest with the characters in map
#
def encodeHash(data,map):
return encode(MD5(data),map)
#
# Decode the string in data with the characters in map. Returns the decoded bytes
#
def decode(data,map):
result = ""
for i in range (0,len(data),2):
high = map.find(data[i])
low = map.find(data[i+1])
value = (((high * 0x40) ^ 0x80) & 0xFF) + low
result += pack("B",value)
return result
#
# Locate and open the Kindle.info file (Hopefully in the way it is done in the Kindle application)
#
def openKindleInfo():
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
return open(path+'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info','r')
#
# Parse the Kindle.info file and return the records as a list of key-values
#
def parseKindleInfo():
DB = {}
infoReader = openKindleInfo()
infoReader.read(1)
data = infoReader.read()
items = data.split('{')
for item in items:
splito = item.split(':')
DB[splito[0]] =splito[1]
return DB
#
# Find if the original string for a hashed/encoded string is known. If so return the original string othwise return an empty string. (Totally not optimal)
#
def findNameForHash(hash):
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
result = ""
for name in names:
if hash == encodeHash(name, charMap2):
result = name
break
return name
#
# Print all the records from the kindle.info file (option -i)
#
def printKindleInfo():
for record in kindleDatabase:
name = findNameForHash(record)
if name != "" :
print (name)
print ("--------------------------\n")
else :
print ("Unknown Record")
print getKindleInfoValueForHash(record)
print "\n"
#
# Get a record from the Kindle.info file for the key "hashedKey" (already hashed and encoded). Return the decoded and decrypted record
#
def getKindleInfoValueForHash(hashedKey):
global kindleDatabase
encryptedValue = decode(kindleDatabase[hashedKey],charMap2)
return CryptUnprotectData(encryptedValue,"")
#
# Get a record from the Kindle.info file for the string in "key" (plaintext). Return the decoded and decrypted record
#
def getKindleInfoValueForKey(key):
return getKindleInfoValueForHash(encodeHash(key,charMap2))
#
# Get a 7 bit encoded number from the book file
#
def bookReadEncodedNumber():
flag = False
data = ord(bookFile.read(1))
if data == 0xFF:
flag = True
data = ord(bookFile.read(1))
if data >= 0x80:
datax = (data & 0x7F)
while data >= 0x80 :
data = ord(bookFile.read(1))
datax = (datax <<7) + (data & 0x7F)
data = datax
if flag:
data = -data
return data
#
# Encode a number in 7 bit format
#
def encodeNumber(number):
result = ""
negative = False
flag = 0
if number < 0 :
number = -number + 1
negative = True
while True:
byte = number & 0x7F
number = number >> 7
byte += flag
result += chr(byte)
flag = 0x80
if number == 0 :
if (byte == 0xFF and negative == False) :
result += chr(0x80)
break
if negative:
result += chr(0xFF)
return result[::-1]
#
# Get a length prefixed string from the file
#
def bookReadString():
stringLength = bookReadEncodedNumber()
return unpack(str(stringLength)+"s",bookFile.read(stringLength))[0]
#
# Returns a length prefixed string
#
def lengthPrefixString(data):
return encodeNumber(len(data))+data
#
# Read and return the data of one header record at the current book file position [[offset,compressedLength,decompressedLength],...]
#
def bookReadHeaderRecordData():
nbValues = bookReadEncodedNumber()
values = []
for i in range (0,nbValues):
values.append([bookReadEncodedNumber(),bookReadEncodedNumber(),bookReadEncodedNumber()])
return values
#
# Read and parse one header record at the current book file position and return the associated data [[offset,compressedLength,decompressedLength],...]
#
def parseTopazHeaderRecord():
if ord(bookFile.read(1)) != 0x63:
raise CMBDTCFatal("Parse Error : Invalid Header")
tag = bookReadString()
record = bookReadHeaderRecordData()
return [tag,record]
#
# Parse the header of a Topaz file, get all the header records and the offset for the payload
#
def parseTopazHeader():
global bookHeaderRecords
global bookPayloadOffset
magic = unpack("4s",bookFile.read(4))[0]
if magic != 'TPZ0':
raise CMBDTCFatal("Parse Error : Invalid Header, not a Topaz file")
nbRecords = bookReadEncodedNumber()
bookHeaderRecords = {}
for i in range (0,nbRecords):
result = parseTopazHeaderRecord()
bookHeaderRecords[result[0]] = result[1]
if ord(bookFile.read(1)) != 0x64 :
raise CMBDTCFatal("Parse Error : Invalid Header")
bookPayloadOffset = bookFile.tell()
#
# Get a record in the book payload, given its name and index. If necessary the record is decrypted. The record is not decompressed
#
def getBookPayloadRecord(name, index):
encrypted = False
try:
recordOffset = bookHeaderRecords[name][index][0]
except:
raise CMBDTCFatal("Parse Error : Invalid Record, record not found")
bookFile.seek(bookPayloadOffset + recordOffset)
tag = bookReadString()
if tag != name :
raise CMBDTCFatal("Parse Error : Invalid Record, record name doesn't match")
recordIndex = bookReadEncodedNumber()
if recordIndex < 0 :
encrypted = True
recordIndex = -recordIndex -1
if recordIndex != index :
raise CMBDTCFatal("Parse Error : Invalid Record, index doesn't match")
if bookHeaderRecords[name][index][2] != 0 :
record = bookFile.read(bookHeaderRecords[name][index][2])
else:
record = bookFile.read(bookHeaderRecords[name][index][1])
if encrypted:
ctx = topazCryptoInit(bookKey)
record = topazCryptoDecrypt(record,ctx)
return record
#
# Extract, decrypt and decompress a book record indicated by name and index and print it or save it in "filename"
#
def extractBookPayloadRecord(name, index, filename):
compressed = False
try:
compressed = bookHeaderRecords[name][index][2] != 0
record = getBookPayloadRecord(name,index)
except:
print("Could not find record")
if compressed:
try:
record = zlib.decompress(record)
except:
raise CMBDTCFatal("Could not decompress record")
if filename != "":
try:
file = open(filename,"wb")
file.write(record)
file.close()
except:
raise CMBDTCFatal("Could not write to destination file")
else:
print(record)
#
# return next record [key,value] from the book metadata from the current book position
#
def readMetadataRecord():
return [bookReadString(),bookReadString()]
#
# Parse the metadata record from the book payload and return a list of [key,values]
#
def parseMetadata():
global bookHeaderRecords
global bookPayloadAddress
global bookMetadata
bookMetadata = {}
bookFile.seek(bookPayloadOffset + bookHeaderRecords["metadata"][0][0])
tag = bookReadString()
if tag != "metadata" :
raise CMBDTCFatal("Parse Error : Record Names Don't Match")
flags = ord(bookFile.read(1))
nbRecords = ord(bookFile.read(1))
for i in range (0,nbRecords) :
record =readMetadataRecord()
bookMetadata[record[0]] = record[1]
#
# Returns two bit at offset from a bit field
#
def getTwoBitsFromBitField(bitField,offset):
byteNumber = offset // 4
bitPosition = 6 - 2*(offset % 4)
return ord(bitField[byteNumber]) >> bitPosition & 3
#
# Returns the six bits at offset from a bit field
#
def getSixBitsFromBitField(bitField,offset):
offset *= 3
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
return value
#
# 8 bits to six bits encoding from hash to generate PID string
#
def encodePID(hash):
global charMap3
PID = ""
for position in range (0,8):
PID += charMap3[getSixBitsFromBitField(hash,position)]
return PID
#
# Context initialisation for the Topaz Crypto
#
def topazCryptoInit(key):
ctx1 = 0x0CAFFE19E
for keyChar in key:
keyByte = ord(keyChar)
ctx2 = ctx1
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
return [ctx1,ctx2]
#
# decrypt data with the context prepared by topazCryptoInit()
#
def topazCryptoDecrypt(data, ctx):
ctx1 = ctx[0]
ctx2 = ctx[1]
plainText = ""
for dataChar in data:
dataByte = ord(dataChar)
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx2 = ctx1
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
plainText += chr(m)
return plainText
#
# Decrypt a payload record with the PID
#
def decryptRecord(data,PID):
ctx = topazCryptoInit(PID)
return topazCryptoDecrypt(data, ctx)
#
# Try to decrypt a dkey record (contains the book PID)
#
def decryptDkeyRecord(data,PID):
record = decryptRecord(data,PID)
fields = unpack("3sB8sB8s3s",record)
if fields[0] != "PID" or fields[5] != "pid" :
raise CMBDTCError("Didn't find PID magic numbers in record")
elif fields[1] != 8 or fields[3] != 8 :
raise CMBDTCError("Record didn't contain correct length fields")
elif fields[2] != PID :
raise CMBDTCError("Record didn't contain PID")
return fields[4]
#
# Decrypt all the book's dkey records (contain the book PID)
#
def decryptDkeyRecords(data,PID):
nbKeyRecords = ord(data[0])
records = []
data = data[1:]
for i in range (0,nbKeyRecords):
length = ord(data[0])
try:
key = decryptDkeyRecord(data[1:length+1],PID)
records.append(key)
except CMBDTCError:
pass
data = data[1+length:]
return records
#
# Encryption table used to generate the device PID
#
def generatePidEncryptionTable() :
table = []
for counter1 in range (0,0x100):
value = counter1
for counter2 in range (0,8):
if (value & 1 == 0) :
value = value >> 1
else :
value = value >> 1
value = value ^ 0xEDB88320
table.append(value)
return table
#
# Seed value used to generate the device PID
#
def generatePidSeed(table,dsn) :
value = 0
for counter in range (0,4) :
index = (ord(dsn[counter]) ^ value) &0xFF
value = (value >> 8) ^ table[index]
return value
#
# Generate the device PID
#
def generateDevicePID(table,dsn,nbRoll):
seed = generatePidSeed(table,dsn)
pidAscii = ""
pid = [(seed >>24) &0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF,(seed>>24) & 0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF]
index = 0
for counter in range (0,nbRoll):
pid[index] = pid[index] ^ ord(dsn[counter])
index = (index+1) %8
for counter in range (0,8):
index = ((((pid[counter] >>5) & 3) ^ pid[counter]) & 0x1f) + (pid[counter] >> 7)
pidAscii += charMap4[index]
return pidAscii
#
# Create decrypted book payload
#
def createDecryptedPayload(payload):
# store data to be able to create the header later
headerData= []
currentOffset = 0
# Add social DRM to decrypted files
try:
data = getKindleInfoValueForKey("kindle.name.info")+":"+ getKindleInfoValueForKey("login")
if payload!= None:
payload.write(lengthPrefixString("sdrm"))
payload.write(encodeNumber(0))
payload.write(data)
else:
currentOffset += len(lengthPrefixString("sdrm"))
currentOffset += len(encodeNumber(0))
currentOffset += len(data)
except:
pass
for headerRecord in bookHeaderRecords:
name = headerRecord
newRecord = []
if name != "dkey" :
for index in range (0,len(bookHeaderRecords[name])) :
offset = currentOffset
if payload != None:
# write tag
payload.write(lengthPrefixString(name))
# write data
payload.write(encodeNumber(index))
payload.write(getBookPayloadRecord(name, index))
else :
currentOffset += len(lengthPrefixString(name))
currentOffset += len(encodeNumber(index))
currentOffset += len(getBookPayloadRecord(name, index))
newRecord.append([offset,bookHeaderRecords[name][index][1],bookHeaderRecords[name][index][2]])
headerData.append([name,newRecord])
return headerData
#
# Create decrypted book
#
def createDecryptedBook(outputFile):
outputFile = open(outputFile,"wb")
# Write the payload in a temporary file
headerData = createDecryptedPayload(None)
outputFile.write("TPZ0")
outputFile.write(encodeNumber(len(headerData)))
for header in headerData :
outputFile.write(chr(0x63))
outputFile.write(lengthPrefixString(header[0]))
outputFile.write(encodeNumber(len(header[1])))
for numbers in header[1] :
outputFile.write(encodeNumber(numbers[0]))
outputFile.write(encodeNumber(numbers[1]))
outputFile.write(encodeNumber(numbers[2]))
outputFile.write(chr(0x64))
createDecryptedPayload(outputFile)
outputFile.close()
#
# Set the command to execute by the programm according to cmdLine parameters
#
def setCommand(name) :
global command
if command != "" :
raise CMBDTCFatal("Invalid command line parameters")
else :
command = name
#
# Program usage
#
def usage():
print("\nUsage:")
print("\nCMBDTC.py [options] bookFileName\n")
print("-p Adds a PID to the list of PIDs that are tried to decrypt the book key (can be used several times)")
print("-d Saves a decrypted copy of the book")
print("-r Prints or writes to disk a record indicated in the form name:index (e.g \"img:0\")")
print("-o Output file name to write records and decrypted books")
print("-v Verbose (can be used several times)")
print("-i Prints kindle.info database")
#
# Main
#
def main(argv=sys.argv):
global kindleDatabase
global bookMetadata
global bookKey
global bookFile
global command
progname = os.path.basename(argv[0])
verbose = 0
recordName = ""
recordIndex = 0
outputFile = ""
PIDs = []
kindleDatabase = None
command = ""
try:
opts, args = getopt.getopt(sys.argv[1:], "vdir:o:p:")
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
if len(opts) == 0 and len(args) == 0 :
usage()
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose+=1
if o == "-i":
setCommand("printInfo")
if o =="-o":
if a == None :
raise CMBDTCFatal("Invalid parameter for -o")
outputFile = a
if o =="-r":
setCommand("printRecord")
try:
recordName,recordIndex = a.split(':')
except:
raise CMBDTCFatal("Invalid parameter for -r")
if o =="-p":
PIDs.append(a)
if o =="-d":
setCommand("doit")
if command == "" :
raise CMBDTCFatal("No action supplied on command line")
#
# Read the encrypted database
#
try:
kindleDatabase = parseKindleInfo()
except Exception, message:
if verbose>0:
print(message)
if kindleDatabase != None :
if command == "printInfo" :
printKindleInfo()
#
# Compute the DSN
#
# Get the Mazama Random number
MazamaRandomNumber = getKindleInfoValueForKey("MazamaRandomNumber")
# Get the HDD serial
encodedSystemVolumeSerialNumber = encodeHash(str(GetVolumeSerialNumber(GetSystemDirectory().split('\\')[0] + '\\')),charMap1)
# Get the current user name
encodedUsername = encodeHash(GetUserName(),charMap1)
# concat, hash and encode
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
if verbose >1:
print("DSN: " + DSN)
#
# Compute the device PID
#
table = generatePidEncryptionTable()
devicePID = generateDevicePID(table,DSN,4)
PIDs.append(devicePID)
if verbose > 0:
print("Device PID: " + devicePID)
#
# Open book and parse metadata
#
if len(args) == 1:
bookFile = openBook(args[0])
parseTopazHeader()
parseMetadata()
#
# Compute book PID
#
# Get the account token
if kindleDatabase != None:
kindleAccountToken = getKindleInfoValueForKey("kindle.account.tokens")
if verbose >1:
print("Account Token: " + kindleAccountToken)
keysRecord = bookMetadata["keys"]
keysRecordRecord = bookMetadata[keysRecord]
pidHash = SHA1(DSN+kindleAccountToken+keysRecord+keysRecordRecord)
bookPID = encodePID(pidHash)
PIDs.append(bookPID)
if verbose > 0:
print ("Book PID: " + bookPID )
#
# Decrypt book key
#
dkey = getBookPayloadRecord('dkey', 0)
bookKeys = []
for PID in PIDs :
bookKeys+=decryptDkeyRecords(dkey,PID)
if len(bookKeys) == 0 :
if verbose > 0 :
print ("Book key could not be found. Maybe this book is not registered with this device.")
else :
bookKey = bookKeys[0]
if verbose > 0:
print("Book key: " + bookKey.encode('hex'))
if command == "printRecord" :
extractBookPayloadRecord(recordName,int(recordIndex),outputFile)
if outputFile != "" and verbose>0 :
print("Wrote record to file: "+outputFile)
elif command == "doit" :
if outputFile!="" :
createDecryptedBook(outputFile)
if verbose >0 :
print ("Decrypted book saved. Don't pirate!")
elif verbose > 0:
print("Output file name was not supplied.")
return 0
if __name__ == '__main__':
sys.exit(main())
|
zonk1024/moto
|
refs/heads/master
|
tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py
|
19
|
from __future__ import unicode_literals
template = {
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.",
"Parameters": {
"DBName": {
"Default": "MyDatabase",
"Description" : "The database name",
"Type": "String",
"MinLength": "1",
"MaxLength": "64",
"AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters."
},
"DBInstanceIdentifier": {
"Type": "String"
},
"DBUser": {
"NoEcho": "true",
"Description" : "The database admin account username",
"Type": "String",
"MinLength": "1",
"MaxLength": "16",
"AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters."
},
"DBPassword": {
"NoEcho": "true",
"Description" : "The database admin account password",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern" : "[a-zA-Z0-9]+",
"ConstraintDescription" : "must contain only alphanumeric characters."
},
"DBAllocatedStorage": {
"Default": "5",
"Description" : "The size of the database (Gb)",
"Type": "Number",
"MinValue": "5",
"MaxValue": "1024",
"ConstraintDescription" : "must be between 5 and 1024Gb."
},
"DBInstanceClass": {
"Description" : "The database instance type",
"Type": "String",
"Default": "db.m1.small",
"AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"]
,
"ConstraintDescription" : "must select a valid database instance type."
},
"EC2SecurityGroup": {
"Description" : "The EC2 security group that contains instances that need access to the database",
"Default": "default",
"Type": "String",
"AllowedPattern" : "[a-zA-Z0-9\\-]+",
"ConstraintDescription" : "must be a valid security group name."
},
"MultiAZ" : {
"Description" : "Multi-AZ master database",
"Type" : "String",
"Default" : "false",
"AllowedValues" : [ "true", "false" ],
"ConstraintDescription" : "must be true or false."
}
},
"Conditions" : {
"Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]},
{"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]},
"Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]}
},
"Resources" : {
"DBEC2SecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Condition" : "Is-EC2-VPC",
"Properties" : {
"GroupDescription": "Open database for access",
"SecurityGroupIngress" : [{
"IpProtocol" : "tcp",
"FromPort" : "3306",
"ToPort" : "3306",
"SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" }
}]
}
},
"DBSecurityGroup": {
"Type": "AWS::RDS::DBSecurityGroup",
"Condition" : "Is-EC2-Classic",
"Properties": {
"DBSecurityGroupIngress": {
"EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" }
},
"GroupDescription": "database access"
}
},
"my_vpc": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : "10.0.0.0/16",
}
},
"EC2Subnet": {
"Type" : "AWS::EC2::Subnet",
"Condition" : "Is-EC2-VPC",
"Properties" : {
"AvailabilityZone" : "eu-central-1a",
"CidrBlock" : "10.0.1.0/24",
"VpcId" : { "Ref" : "my_vpc" }
}
},
"DBSubnet": {
"Type": "AWS::RDS::DBSubnetGroup",
"Condition" : "Is-EC2-VPC",
"Properties": {
"DBSubnetGroupDescription": "my db subnet group",
"SubnetIds" : [ { "Ref": "EC2Subnet" } ],
}
},
"MasterDB" : {
"Type" : "AWS::RDS::DBInstance",
"Properties" : {
"DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" },
"DBName" : { "Ref" : "DBName" },
"AllocatedStorage" : { "Ref" : "DBAllocatedStorage" },
"DBInstanceClass" : { "Ref" : "DBInstanceClass" },
"Engine" : "MySQL",
"DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]},
"MasterUsername" : { "Ref" : "DBUser" },
"MasterUserPassword" : { "Ref" : "DBPassword" },
"MultiAZ" : { "Ref" : "MultiAZ" },
"Tags" : [{ "Key" : "Name", "Value" : "Master Database" }],
"VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]},
"DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]}
},
"DeletionPolicy" : "Snapshot"
},
"ReplicaDB" : {
"Type" : "AWS::RDS::DBInstance",
"Properties" : {
"SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" },
"DBInstanceClass" : { "Ref" : "DBInstanceClass" },
"Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }]
}
}
},
"Outputs" : {
"EC2Platform" : {
"Description" : "Platform in which this stack is deployed",
"Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]}
},
"MasterJDBCConnectionString": {
"Description" : "JDBC connection string for the master database",
"Value" : { "Fn::Join": [ "", [ "jdbc:mysql://",
{ "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] },
":",
{ "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] },
"/",
{ "Ref": "DBName" }]]}
},
"ReplicaJDBCConnectionString": {
"Description" : "JDBC connection string for the replica database",
"Value" : { "Fn::Join": [ "", [ "jdbc:mysql://",
{ "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] },
":",
{ "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] },
"/",
{ "Ref": "DBName" }]]}
}
}
}
|
htwenhe/DJOA
|
refs/heads/master
|
env/Lib/site-packages/libpasteurize/fixes/fix_unpacking.py
|
60
|
u"""
Fixer for:
(a,)* *b (,c)* [,] = s
for (a,)* *b (,c)* [,] in d: ...
"""
from lib2to3 import fixer_base
from itertools import count
from lib2to3.fixer_util import (Assign, Comma, Call, Newline, Name,
Number, token, syms, Node, Leaf)
from libfuturize.fixer_util import indentation, suitify, commatize
# from libfuturize.fixer_util import Assign, Comma, Call, Newline, Name, Number, indentation, suitify, commatize, token, syms, Node, Leaf
def assignment_source(num_pre, num_post, LISTNAME, ITERNAME):
u"""
Accepts num_pre and num_post, which are counts of values
before and after the starg (not including the starg)
Returns a source fit for Assign() from fixer_util
"""
children = []
pre = unicode(num_pre)
post = unicode(num_post)
# This code builds the assignment source from lib2to3 tree primitives.
# It's not very readable, but it seems like the most correct way to do it.
if num_pre > 0:
pre_part = Node(syms.power, [Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Leaf(token.COLON, u":"), Number(pre)]), Leaf(token.RSQB, u"]")])])
children.append(pre_part)
children.append(Leaf(token.PLUS, u"+", prefix=u" "))
main_part = Node(syms.power, [Leaf(token.LSQB, u"[", prefix=u" "), Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Number(pre) if num_pre > 0 else Leaf(1, u""), Leaf(token.COLON, u":"), Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]) if num_post > 0 else Leaf(1, u"")]), Leaf(token.RSQB, u"]"), Leaf(token.RSQB, u"]")])])
children.append(main_part)
if num_post > 0:
children.append(Leaf(token.PLUS, u"+", prefix=u" "))
post_part = Node(syms.power, [Name(LISTNAME, prefix=u" "), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]), Leaf(token.COLON, u":")]), Leaf(token.RSQB, u"]")])])
children.append(post_part)
source = Node(syms.arith_expr, children)
return source
class FixUnpacking(fixer_base.BaseFix):
PATTERN = u"""
expl=expr_stmt< testlist_star_expr<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > '=' source=any > |
impl=for_stmt< 'for' lst=exprlist<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > 'in' it=any ':' suite=any>"""
def fix_explicit_context(self, node, results):
pre, name, post, source = (results.get(n) for n in (u"pre", u"name", u"post", u"source"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = u" "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source.prefix = u""
setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [source.clone()]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def fix_implicit_context(self, node, results):
u"""
Only example of the implicit context is
a for loop, so only fix that.
"""
pre, name, post, it = (results.get(n) for n in (u"pre", u"name", u"post", u"it"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = u" "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source = it.clone()
source.prefix = u""
setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [Name(self.ITERNAME)]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def transform(self, node, results):
u"""
a,b,c,d,e,f,*g,h,i = range(100) changes to
_3to2list = list(range(100))
a,b,c,d,e,f,g,h,i, = _3to2list[:6] + [_3to2list[6:-2]] + _3to2list[-2:]
and
for a,b,*c,d,e in iter_of_iters: do_stuff changes to
for _3to2iter in iter_of_iters:
_3to2list = list(_3to2iter)
a,b,c,d,e, = _3to2list[:2] + [_3to2list[2:-2]] + _3to2list[-2:]
do_stuff
"""
self.LISTNAME = self.new_name(u"_3to2list")
self.ITERNAME = self.new_name(u"_3to2iter")
expl, impl = results.get(u"expl"), results.get(u"impl")
if expl is not None:
setup_line, power_line = self.fix_explicit_context(node, results)
setup_line.prefix = expl.prefix
power_line.prefix = indentation(expl.parent)
setup_line.append_child(Newline())
parent = node.parent
i = node.remove()
parent.insert_child(i, power_line)
parent.insert_child(i, setup_line)
elif impl is not None:
setup_line, power_line = self.fix_implicit_context(node, results)
suitify(node)
suite = [k for k in node.children if k.type == syms.suite][0]
setup_line.prefix = u""
power_line.prefix = suite.children[1].value
suite.children[2].prefix = indentation(suite.children[2])
suite.insert_child(2, Newline())
suite.insert_child(2, power_line)
suite.insert_child(2, Newline())
suite.insert_child(2, setup_line)
results.get(u"lst").replace(Name(self.ITERNAME, prefix=u" "))
|
kyokyos/bioinform
|
refs/heads/master
|
Diabetes_prediction.py
|
1
|
#DIY糖尿病预测器,请随访相关专业医师
#algorithm:
#total_points=gender+BMI+waist+age+race+family_gene+blood_pressure+psycho
#(total_points:0-6 low; 7-15 increased; 16-24 moderate; 25-47high)
#BMI=weight(kg)/height(m)**2
#value
gender="male"
height=1.65
weight=62
hypertension="True"
race="Chinese"
family_history="True"
age=65
waist=100
psycho="True"
#points
total_points=0
gender_point=0
hypertension_point=0
race_point=0
family_history_point=0
age_point=0
waist_point=0
BMI_point=0
BMI_value=0
psycho_point=0
diabetes_probability=0
def gender_analysis(gender):
if gender=="male":
return 1
else:
return 0
gender_point=gender_analysis(gender)
#print "gender_point:",gender_point
def blood_pressure_analysis(hypertension):
if hypertension=="True":
return 5
if hypertension=="True":
return 0
hypertension_point=blood_pressure_analysis(hypertension)
#print "hypertension_point:",hypertension_point
def race_analysis(race):
if race=="white":
return 0
else:
return 6
race_point=race_analysis(race)
#print "race_point:",race_point
def family_history_analysis(family_history):
if family_history=="True":
return 5
if family_history=="False":
return 0
family_history_point=family_history_analysis(family_history)
#print "family_history_point:",family_history_point
def age_analysis(age):
if age<=50:
return 0
elif 50<age<=60:
return 5
elif 60<age<=70:
return 9
else:
return 13
age_point=age_analysis(age)
#print "age_point:",age_point
def waist_analysis(waist):
if waist<=90:
return 0
elif 90<waist<=100:
return 4
elif 100<waist<=110:
return 6
else:
return 9
waist_point=waist_analysis(waist)
#print "waist_point:",waist_point
def BMI(height,weight):
return weight/height**2
BMI_value=BMI(height,weight)
#print "BMI_value:",BMI_value
def BMI_analysis(BMI_value):
if BMI_value<18.5:
return 3
elif 18.5<=BMI_value<25:
return 0
elif 25<=BMI_value<30:
return 3
elif 30<=BMI_value<32:
return 5
else:
return 9
BMI_point=BMI_analysis(BMI_value)
#print"BMI_point:",BMI_point
def psycho_analysis(psycho):
if psycho=="True":
return 5
if psycho=="False":
return 0
psycho_point=psycho_analysis(psycho)
#print "psycho_point:",psycho_point
#total_point
total_points=gender_point+hypertension_point+race_point+family_history_point+age_point+\
waist_point+BMI_point+psycho_point
#print "total_points:",total_points
def diabetes_prediction(total_points):
if 0<=total_points<=6:
return "low"
elif 6<total_points<=15:
return "increased"
elif 15<total_points<=24:
return "moderate"
else:
return "high"
diabetes_probability=diabetes_prediction(total_points)
print "Your diabetes prediction:",diabetes_probability
|
mglukhikh/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyAddSpecifierToFormatQuickFixTest/unicodeEscapes.py
|
29
|
s = <warning descr="Format specifier character missing">u"\N{LATIN SMALL LETTER B}%s\N{NUMBER SIGN}\
%\N{LATIN SMALL <caret>LETTER B}"</warning> % ("a", "b")
|
albertz/music-player
|
refs/heads/master
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsdistributednotificationcenter.py
|
3
|
from PyObjCTools.TestSupport import *
from Foundation import *
try:
unicode
except NameError:
unicode = str
class TestNSDistributedNotificationCenter (TestCase):
def testConstants(self):
self.assertIsInstance(NSLocalNotificationCenterType, unicode)
self.assertEqual(NSNotificationSuspensionBehaviorDrop, 1)
self.assertEqual(NSNotificationSuspensionBehaviorCoalesce, 2)
self.assertEqual(NSNotificationSuspensionBehaviorHold, 3)
self.assertEqual(NSNotificationSuspensionBehaviorDeliverImmediately, 4)
self.assertEqual(NSNotificationDeliverImmediately, 1)
self.assertEqual(NSNotificationPostToAllSessions, 2)
def testMethods(self):
self.assertArgIsSEL(NSDistributedNotificationCenter.addObserver_selector_name_object_suspensionBehavior_, 1, b'v@:@')
self.assertArgIsSEL(NSDistributedNotificationCenter.addObserver_selector_name_object_, 1, b'v@:@')
self.assertArgIsBOOL(NSDistributedNotificationCenter.postNotificationName_object_userInfo_deliverImmediately_, 3)
self.assertArgIsBOOL(NSDistributedNotificationCenter.setSuspended_, 0)
self.assertResultIsBOOL(NSDistributedNotificationCenter.suspended)
if __name__ == "__main__":
main()
|
kvar/ansible
|
refs/heads/seas_master_2.9.5
|
lib/ansible/modules/network/slxos/slxos_interface.py
|
91
|
#!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: slxos_interface
version_added: "2.6"
author: "Lindsay Hill (@LindsayHill)"
short_description: Manage Interfaces on Extreme SLX-OS network devices
description:
- This module provides declarative management of Interfaces
on Extreme SLX-OS network devices.
notes:
- Tested against SLX-OS 17s.1.02
options:
name:
description:
- Name of the Interface.
required: true
description:
description:
- Description of Interface.
enabled:
description:
- Interface link status.
default: True
type: bool
speed:
description:
- Interface link speed.
mtu:
description:
- Maximum size of transmit packet.
tx_rate:
description:
- Transmit rate in bits per second (bps).
rx_rate:
description:
- Receiver rate in bits per second (bps).
neighbors:
description:
- Check the operational state of given interface C(name) for LLDP neighbor.
- The following suboptions are available.
suboptions:
host:
description:
- "LLDP neighbor host for given interface C(name)."
port:
description:
- "LLDP neighbor port to which given interface C(name) is connected."
aggregate:
description: List of Interfaces definitions.
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state argument which are
I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate).
default: 10
state:
description:
- State of the Interface configuration, C(up) means present and
operationally up and C(down) means present and operationally C(down)
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure interface
slxos_interface:
name: Ethernet 0/2
description: test-interface
speed: 1000
mtu: 9216
- name: remove interface
slxos_interface:
name: Loopback 9
state: absent
- name: make interface up
slxos_interface:
name: Ethernet 0/2
enabled: True
- name: make interface down
slxos_interface:
name: Ethernet 0/2
enabled: False
- name: Check intent arguments
slxos_interface:
name: Ethernet 0/2
state: up
tx_rate: ge(0)
rx_rate: le(0)
- name: Check neighbors intent arguments
slxos_interface:
name: Ethernet 0/41
neighbors:
- port: Ethernet 0/41
host: SLX
- name: Config + intent
slxos_interface:
name: Ethernet 0/2
enabled: False
state: down
- name: Add interface using aggregate
slxos_interface:
aggregate:
- { name: Ethernet 0/1, mtu: 1548, description: test-interface-1 }
- { name: Ethernet 0/2, mtu: 1548, description: test-interface-2 }
speed: 10000
state: present
- name: Delete interface using aggregate
slxos_interface:
aggregate:
- name: Loopback 9
- name: Loopback 10
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface Ethernet 0/2
- description test-interface
- mtu 1548
"""
import re
from copy import deepcopy
from time import sleep
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.slxos.slxos import get_config, load_config
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import conditional, remove_default_spec
def validate_mtu(value, module):
if value and not 1548 <= int(value) <= 9216:
module.fail_json(msg='mtu must be between 1548 and 9216')
def validate_param_values(module, obj, param=None):
if param is None:
param = module.params
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(param.get(key), module)
def parse_shutdown(configobj, name):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'^shutdown', cfg, re.M)
if match:
return True
else:
return False
def parse_config_argument(configobj, name, arg=None):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'%s (.+)$' % arg, cfg, re.M)
if match:
return match.group(1)
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def add_command_to_interface(interface, cmd, commands):
if interface not in commands:
commands.append(interface)
commands.append(cmd)
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=1, contents=config)
match = re.findall(r'^interface (\S+ \S+)', config, re.M)
if not match:
return list()
instances = list()
for item in set(match):
obj = {
'name': item,
'description': parse_config_argument(configobj, item, 'description'),
'speed': parse_config_argument(configobj, item, 'speed'),
'mtu': parse_config_argument(configobj, item, 'mtu'),
'disable': True if parse_shutdown(configobj, item) else False,
'state': 'present'
}
instances.append(obj)
return instances
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
validate_param_values(module, item, item)
d = item.copy()
if d['enabled']:
d['disable'] = False
else:
d['disable'] = True
obj.append(d)
else:
params = {
'name': module.params['name'],
'description': module.params['description'],
'speed': module.params['speed'],
'mtu': module.params['mtu'],
'state': module.params['state'],
'delay': module.params['delay'],
'tx_rate': module.params['tx_rate'],
'rx_rate': module.params['rx_rate'],
'neighbors': module.params['neighbors']
}
validate_param_values(module, params)
if module.params['enabled']:
params.update({'disable': False})
else:
params.update({'disable': True})
obj.append(params)
return obj
def map_obj_to_commands(updates):
commands = list()
want, have = updates
args = ('speed', 'description', 'mtu')
for w in want:
name = w['name']
disable = w['disable']
state = w['state']
obj_in_have = search_obj_in_list(name, have)
interface = 'interface ' + name
if state == 'absent' and obj_in_have:
commands.append('no ' + interface)
elif state in ('present', 'up', 'down'):
if obj_in_have:
for item in args:
candidate = w.get(item)
running = obj_in_have.get(item)
if candidate != running:
if candidate:
cmd = item + ' ' + str(candidate)
add_command_to_interface(interface, cmd, commands)
if disable and not obj_in_have.get('disable', False):
add_command_to_interface(interface, 'shutdown', commands)
elif not disable and obj_in_have.get('disable', False):
add_command_to_interface(interface, 'no shutdown', commands)
else:
commands.append(interface)
for item in args:
value = w.get(item)
if value:
commands.append(item + ' ' + str(value))
if disable:
commands.append('no shutdown')
return commands
def check_declarative_intent_params(module, want, result):
failed_conditions = []
have_neighbors = None
for w in want:
want_state = w.get('state')
want_tx_rate = w.get('tx_rate')
want_rx_rate = w.get('rx_rate')
want_neighbors = w.get('neighbors')
if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate and not want_neighbors:
continue
if result['changed']:
sleep(w['delay'])
command = 'show interface %s' % w['name']
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
if want_state in ('up', 'down'):
match = re.search(r'%s (\w+)' % 'line protocol is', out, re.M)
have_state = None
if match:
have_state = match.group(1)
if have_state is None or not conditional(want_state, have_state.strip()):
failed_conditions.append('state ' + 'eq(%s)' % want_state)
if want_tx_rate:
match = re.search(r'%s (\d+)' % 'Output', out, re.M)
have_tx_rate = None
if match:
have_tx_rate = match.group(1)
if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int):
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
match = re.search(r'%s (\d+)' % 'Input', out, re.M)
have_rx_rate = None
if match:
have_rx_rate = match.group(1)
if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int):
failed_conditions.append('rx_rate ' + want_rx_rate)
if want_neighbors:
have_host = []
have_port = []
if have_neighbors is None:
rc, have_neighbors, err = exec_command(module, 'show lldp neighbors detail')
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
if have_neighbors:
lines = have_neighbors.strip().split('Local Interface: ')
short_name = w['name'].replace('Ethernet', 'Eth')
for line in lines:
field = line.split('\n')
if field[0].split('(')[0].strip() == short_name:
for item in field:
if item.startswith('System Name:'):
have_host.append(item.split(':')[1].strip())
if item.startswith('Remote Interface:'):
have_port.append(item.split(':')[1].split('(')[0].strip())
for item in want_neighbors:
host = item.get('host')
port = item.get('port')
if host and host not in have_host:
failed_conditions.append('host ' + host)
if port and port not in have_port:
failed_conditions.append('port ' + port)
return failed_conditions
def main():
""" main entry point for module execution
"""
neighbors_spec = dict(
host=dict(),
port=dict()
)
element_spec = dict(
name=dict(),
description=dict(),
speed=dict(),
mtu=dict(),
enabled=dict(default=True, type='bool'),
tx_rate=dict(),
rx_rate=dict(),
neighbors=dict(type='list', elements='dict', options=neighbors_spec),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
failed_conditions = check_declarative_intent_params(module, want, result)
if failed_conditions:
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions, changed=result['changed'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Cisco-Talos/pyrebox
|
refs/heads/master
|
volatility/volatility/plugins/gui/vtypes/win10.py
|
3
|
# Volatility
# Copyright (C) 2007-2017 Volatility Foundation
# Copyright (C) 2017 Michael Hale Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
class Win10x86_Gui(obj.ProfileModification):
before = ["XP2003x86BaseVTypes", "Win32Kx86VTypes", "AtomTablex86Overlay", "Win32KCoreClasses", "Win8x86Gui"]
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 4}
def modification(self, profile):
build = profile.metadata.get('build', 0)
if build >= 15063:
profile.merge_overlay({
'tagDESKTOP': [None, {
'rpdeskNext': [0x10, ['pointer', ['tagDESKTOP']]],
'rpwinstaParent': [0x14, ['pointer', ['tagWINDOWSTATION']]],
'pheapDesktop': [0x40, ['pointer', ['tagWIN32HEAP']]],
'PtiList': [0x5c, ['_LIST_ENTRY']],
}],
'tagTHREADINFO': [None, {
'ppi': [0xe0, ['pointer', ['tagPROCESSINFO']]],
'PtiLink': [0x188, ['_LIST_ENTRY']],
}],
'tagWND': [None, {
'spwndNext': [0x34, ['pointer', ['tagWND']]],
'spwndPrev': [0x38, ['pointer', ['tagWND']]],
'spwndParent': [0x3c, ['pointer', ['tagWND']]],
'spwndChild': [0x40, ['pointer', ['tagWND']]],
'lpfnWndProc': [0x68, ['pointer', ['void']]], #?
'pcls': [0x6c, ['pointer', ['tagCLS']]], #?
'strName': [0x8c, ['_LARGE_UNICODE_STRING']], #?
}],
})
class Win10x64_Gui(obj.ProfileModification):
before = ["Win32KCoreClasses", "Win8x64Gui"]
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 4}
def modification(self, profile):
build = profile.metadata.get('build', 0)
if build >= 15063:
profile.merge_overlay({
'tagDESKTOP': [None, {
'rpdeskNext': [0x20, ['pointer64', ['tagDESKTOP']]],
'rpwinstaParent': [0x28, ['pointer64', ['tagWINDOWSTATION']]],
'pheapDesktop': [0x80, ['pointer', ['tagWIN32HEAP']]],
'PtiList': [0xA8, ['_LIST_ENTRY']],
}],
'tagTHREADINFO': [None, {
'ppi': [0x190, ['pointer', ['tagPROCESSINFO']]],
# zzzSetDesktop
# mov [rbx+1B8h], rax
# lea rax, [rbx+2C8h]
# lea rcx, [rdi+0A8h] => int 29h
'PtiLink': [0x2c8, ['_LIST_ENTRY']],
}],
'tagWND': [None, {
'spwndNext': [0x58, ['pointer64', ['tagWND']]],
'spwndPrev': [0x60, ['pointer64', ['tagWND']]],
'spwndParent': [0x68, ['pointer64', ['tagWND']]],
'spwndChild': [0x70, ['pointer64', ['tagWND']]],
'lpfnWndProc': [0xa0, ['pointer64', ['void']]],
'pcls': [0xa8, ['pointer64', ['tagCLS']]],
'strName': [0xe8, ['_LARGE_UNICODE_STRING']],
}],
})
|
2014c2g14/2014c2
|
refs/heads/master
|
exts/w2/static/Brython2.0.0-20140209-164925/Lib/xml/etree/ElementInclude.py
|
784
|
#
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding (UTF-8 by default for "text").
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
if parse == "xml":
file = open(href, 'rb')
data = ElementTree.parse(file).getroot()
else:
if not encoding:
encoding = 'UTF-8'
file = open(href, 'r', encoding=encoding)
data = file.read()
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text + (e.tail or "")
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
|
jonyroda97/redbot-amigosprovaveis
|
refs/heads/develop
|
lib/pip/_vendor/chardet/utf8prober.py
|
290
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8_SM_MODEL
class UTF8Prober(CharSetProber):
ONE_CHAR_PROB = 0.5
def __init__(self):
super(UTF8Prober, self).__init__()
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
self._num_mb_chars = None
self.reset()
def reset(self):
super(UTF8Prober, self).reset()
self.coding_sm.reset()
self._num_mb_chars = 0
@property
def charset_name(self):
return "utf-8"
@property
def language(self):
return ""
def feed(self, byte_str):
for c in byte_str:
coding_state = self.coding_sm.next_state(c)
if coding_state == MachineState.ERROR:
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
if self.coding_sm.get_current_charlen() >= 2:
self._num_mb_chars += 1
if self.state == ProbingState.DETECTING:
if self.get_confidence() > self.SHORTCUT_THRESHOLD:
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
unlike = 0.99
if self._num_mb_chars < 6:
unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
return 1.0 - unlike
else:
return unlike
|
laslabs/odoo-connector-carepoint
|
refs/heads/release/10.0
|
connector_carepoint/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2015-2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import models
from . import backend
from . import connector
from . import consumer
from . import related_action
|
shubhamdhama/zulip
|
refs/heads/master
|
zerver/webhooks/buildbot/view.py
|
4
|
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@api_key_only_webhook_view('Buildbot')
@has_request_variables
def api_buildbot_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
topic = payload["project"]
if not topic:
topic = "general"
body = get_message(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
def get_message(payload: Dict[str, Any]) -> str:
if "results" in payload:
# See http://docs.buildbot.net/latest/developer/results.html
results = ("success", "warnings", "failure", "skipped",
"exception", "retry", "cancelled")
status = results[payload["results"]]
if payload["event"] == "new":
body = "Build [#{id}]({url}) for **{name}** started.".format(
id=payload["buildid"],
name=payload["buildername"],
url=payload["url"],
)
elif payload["event"] == "finished":
body = "Build [#{id}]({url}) (result: {status}) for **{name}** finished.".format(
id=payload["buildid"],
name=payload["buildername"],
url=payload["url"],
status=status,
)
return body
|
manisandro/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/saga/versioncheck.py
|
22
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
versioncheck.py
---------------------
Date : December 2014
Copyright : (C) 2014 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2014'
__copyright__ = '(C) 2014, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import subprocess
def getAlgParams(f):
params = []
booleanparams = []
numparams = []
with open(f) as lines:
line = lines.readline().strip('\n').strip()
name = line
if '|' in name:
tokens = name.split('|')
cmdname = tokens[1]
else:
cmdname = name
line = lines.readline().strip('\n').strip()
group = line
line = lines.readline().strip('\n').strip()
while line != '':
if line.startswith('Hardcoded'):
pass
elif line.startswith('AllowUnmatching'):
pass
elif line.startswith('Extent'):
extentParamNames = line[6:].strip().split(' ')
params.extend(["-" + p for p in extentParamNames])
else:
tokens = line.split("|")
if tokens[0] == "ParameterBoolean":
booleanparams.append("-" + tokens[1].strip())
elif tokens[0] == "ParameterNumber":
numparams.append("-" + tokens[1].strip())
else:
params.append("-" + tokens[1])
line = lines.readline().strip('\n').strip()
return cmdname, group, params, booleanparams, numparams
def testDescriptionFile(f):
usage = ""
cmdname, group, params, booleanparams, numparams = getAlgParams(f)
command = [r'd:\saga2.1.2\saga_cmd.exe', group, cmdname]
for p in params:
command.append(p)
command.append("dummy")
for p in numparams:
command.append(p)
command.append("0")
command.extend(booleanparams)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
lines = []
for line in iter(proc.readline, ''):
lines.append(line)
if "Usage" in line:
usage = line
if usage and not lines[0].startswith("_"):
# fix_print_with_import
print("-" * 50)
# fix_print_with_import
print(f + " [ERROR]")
# fix_print_with_import
print(lines)
# fix_print_with_import
print(usage)
# fix_print_with_import
print("Name in description:" + cmdname)
# fix_print_with_import
print("Parameters in description:" + str(params))
# fix_print_with_import
print("-" * 50)
print()
if __name__ == '__main__':
folder = os.path.join(os.path.dirname(__file__), "description")
for descriptionFile in os.listdir(folder):
if descriptionFile.endswith('txt'):
testDescriptionFile(os.path.join(folder, descriptionFile))
|
botswana-harvard/edc-dispatch
|
refs/heads/develop
|
edc_dispatch/urls.py
|
1
|
from django.conf.urls import patterns, url
from .views import return_items
urlpatterns = patterns('',
url(r'^return/', return_items),
url(r'^return/(?P<identifier>\w+)/', 'return_households', name='return_household'),
url(r'^return/(?P<identifier>\w+)/', 'return_households', name='return_household'),
)
|
harshita-gupta/Harvard-FRSEM-Catalog-2016-17
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/constants.py
|
3007
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
Tithen-Firion/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/theplatform.py
|
11
|
# coding: utf-8
from __future__ import unicode_literals
import re
import time
import hmac
import binascii
import hashlib
from .once import OnceIE
from .adobepass import AdobePassIE
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
unsmuggle_url,
update_url_query,
xpath_with_ns,
mimetype2ext,
find_xpath_attr,
)
default_ns = 'http://www.w3.org/2005/SMIL21/Language'
_x = lambda p: xpath_with_ns(p, {'smil': default_ns})
class ThePlatformBaseIE(OnceIE):
def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
meta = self._download_xml(
smil_url, video_id, note=note, query={'format': 'SMIL'},
headers=self.geo_verification_headers())
error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src')
if error_element is not None and error_element.attrib['src'].startswith(
'http://link.theplatform.com/s/errorFiles/Unavailable.'):
raise ExtractorError(error_element.attrib['abstract'], expected=True)
smil_formats = self._parse_smil_formats(
meta, smil_url, video_id, namespace=default_ns,
# the parameters are from syfy.com, other sites may use others,
# they also work for nbc.com
f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
formats = []
for _format in smil_formats:
if OnceIE.suitable(_format['url']):
formats.extend(self._extract_once_formats(_format['url']))
else:
media_url = _format['url']
if determine_ext(media_url) == 'm3u8':
hdnea2 = self._get_cookies(media_url).get('hdnea2')
if hdnea2:
_format['url'] = update_url_query(media_url, {'hdnea3': hdnea2.value})
formats.append(_format)
subtitles = self._parse_smil_subtitles(meta, default_ns)
return formats, subtitles
def _download_theplatform_metadata(self, path, video_id):
info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
return self._download_json(info_url, video_id)
def _parse_theplatform_metadata(self, info):
subtitles = {}
captions = info.get('captions')
if isinstance(captions, list):
for caption in captions:
lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
subtitles.setdefault(lang, []).append({
'ext': mimetype2ext(mime),
'url': src,
})
return {
'title': info['title'],
'subtitles': subtitles,
'description': info['description'],
'thumbnail': info['defaultThumbnailUrl'],
'duration': int_or_none(info.get('duration'), 1000),
'timestamp': int_or_none(info.get('pubDate'), 1000) or None,
'uploader': info.get('billingCode'),
}
def _extract_theplatform_metadata(self, path, video_id):
info = self._download_theplatform_metadata(path, video_id)
return self._parse_theplatform_metadata(info)
class ThePlatformIE(ThePlatformBaseIE, AdobePassIE):
_VALID_URL = r'''(?x)
(?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
(?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
|theplatform:)(?P<id>[^/\?&]+)'''
_TESTS = [{
# from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
'info_dict': {
'id': 'e9I_cZgTgIPd',
'ext': 'flv',
'title': 'Blackberry\'s big, bold Z30',
'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
'duration': 247,
'timestamp': 1383239700,
'upload_date': '20131031',
'uploader': 'CBSI-NEW',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': '404 Not Found',
}, {
# from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
'info_dict': {
'id': '22d_qsQ6MIRT',
'ext': 'flv',
'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
'title': 'Tesla Model S: A second step towards a cleaner motoring future',
'timestamp': 1426176191,
'upload_date': '20150312',
'uploader': 'CBSI-NEW',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
'info_dict': {
'id': 'yMBg9E8KFxZD',
'ext': 'mp4',
'description': 'md5:644ad9188d655b742f942bf2e06b002d',
'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
'uploader': 'EGSM',
}
}, {
'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
'only_matching': True,
}, {
'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
'md5': 'fb96bb3d85118930a5b055783a3bd992',
'info_dict': {
'id': 'tdy_or_siri_150701',
'ext': 'mp4',
'title': 'iPhone Siri’s sassy response to a math question has people talking',
'description': 'md5:a565d1deadd5086f3331d57298ec6333',
'duration': 83.0,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1435752600,
'upload_date': '20150701',
'uploader': 'NBCU-NEWS',
},
}, {
# From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
# geo-restricted (US), HLS encrypted with AES-128
'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781',
'only_matching': True,
}]
@classmethod
def _extract_urls(cls, webpage):
m = re.search(
r'''(?x)
<meta\s+
property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+
content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2
''', webpage)
if m:
return [m.group('url')]
# Are whitesapces ignored in URLs?
# https://github.com/rg3/youtube-dl/issues/12044
matches = re.findall(
r'(?s)<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage)
if matches:
return [re.sub(r'\s', '', list(zip(*matches))[1][0])]
@staticmethod
def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
flags = '10' if include_qs else '00'
expiration_date = '%x' % (int(time.time()) + life)
def str_to_hex(str):
return binascii.b2a_hex(str.encode('ascii')).decode('ascii')
def hex_to_bytes(hex):
return binascii.a2b_hex(hex.encode('ascii'))
relative_path = re.match(r'https?://link.theplatform.com/s/([^?]+)', url).group(1)
clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path))
checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
return '%s&sig=%s' % (url, sig)
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
provider_id = mobj.group('provider_id')
video_id = mobj.group('id')
if not provider_id:
provider_id = 'dJ5BDC'
path = provider_id + '/'
if mobj.group('media'):
path += mobj.group('media')
path += video_id
qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
if 'guid' in qs_dict:
webpage = self._download_webpage(url, video_id)
scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
feed_id = None
# feed id usually locates in the last script.
# Seems there's no pattern for the interested script filename, so
# I try one by one
for script in reversed(scripts):
feed_script = self._download_webpage(
self._proto_relative_url(script, 'http:'),
video_id, 'Downloading feed script')
feed_id = self._search_regex(
r'defaultFeedId\s*:\s*"([^"]+)"', feed_script,
'default feed id', default=None)
if feed_id is not None:
break
if feed_id is None:
raise ExtractorError('Unable to find feed id')
return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
provider_id, feed_id, qs_dict['guid'][0]))
if smuggled_data.get('force_smil_url', False):
smil_url = url
# Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385)
elif '/guid/' in url:
headers = {}
source_url = smuggled_data.get('source_url')
if source_url:
headers['Referer'] = source_url
request = sanitized_Request(url, headers=headers)
webpage = self._download_webpage(request, video_id)
smil_url = self._search_regex(
r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
webpage, 'smil url', group='url')
path = self._search_regex(
r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path')
smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4'
elif mobj.group('config'):
config_url = url + '&form=json'
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
config = self._download_json(config_url, video_id, 'Downloading config')
if 'releaseUrl' in config:
release_url = config['releaseUrl']
else:
release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
smil_url = release_url + '&formats=MPEG4&manifest=f4m'
else:
smil_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
sig = smuggled_data.get('sig')
if sig:
smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
self._sort_formats(formats)
ret = self._extract_theplatform_metadata(path, video_id)
combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
ret.update({
'id': video_id,
'formats': formats,
'subtitles': combined_subtitles,
})
return ret
class ThePlatformFeedIE(ThePlatformBaseIE):
_URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s'
_VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[\w-]+))'
_TESTS = [{
# From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
'md5': '6e32495b5073ab414471b615c5ded394',
'info_dict': {
'id': 'n_hardball_5biden_140207',
'ext': 'mp4',
'title': 'The Biden factor: will Joe run in 2016?',
'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140208',
'timestamp': 1391824260,
'duration': 467.0,
'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
'uploader': 'NBCU-NEWS',
},
}]
def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None):
real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query)
entry = self._download_json(real_url, video_id)['entries'][0]
main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else None
formats = []
subtitles = {}
first_video_id = None
duration = None
asset_types = []
for item in entry['media$content']:
smil_url = item['plfile$url']
cur_video_id = ThePlatformIE._match_id(smil_url)
if first_video_id is None:
first_video_id = cur_video_id
duration = float_or_none(item.get('plfile$duration'))
for asset_type in item['plfile$assetTypes']:
if asset_type in asset_types:
continue
asset_types.append(asset_type)
query = {
'mbr': 'true',
'formats': item['plfile$format'],
'assetTypes': asset_type,
}
if asset_type in asset_types_query:
query.update(asset_types_query[asset_type])
cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query(
main_smil_url or smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type)
formats.extend(cur_formats)
subtitles = self._merge_subtitles(subtitles, cur_subtitles)
self._sort_formats(formats)
thumbnails = [{
'url': thumbnail['plfile$url'],
'width': int_or_none(thumbnail.get('plfile$width')),
'height': int_or_none(thumbnail.get('plfile$height')),
} for thumbnail in entry.get('media$thumbnails', [])]
timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
categories = [item['media$name'] for item in entry.get('media$categories', [])]
ret = self._extract_theplatform_metadata('%s/%s' % (provider_id, first_video_id), video_id)
subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
ret.update({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'categories': categories,
})
if custom_fields:
ret.update(custom_fields(entry))
return ret
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
provider_id = mobj.group('provider_id')
feed_id = mobj.group('feed_id')
filter_query = mobj.group('filter')
return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)
|
neopenx/Dragon
|
refs/heads/master
|
Dragon/python/dragon/vm/theano/configdefaults.py
|
1
|
# --------------------------------------------------------
# Theano @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
class TheanoConfig(object):
floatX = 'float32'
config = TheanoConfig()
|
opensourcechipspark/platform_external_chromium_org
|
refs/heads/master
|
chrome/test/functional/media/media_basic_playback.py
|
65
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Basic playback test. Checks playback, seek, and replay based on events.
This test uses the bear videos from the test matrix in h264, vp8, and theora
formats.
"""
import logging
import os
import pyauto_media
import pyauto
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_basic_playback.html')
# Test videos to play. TODO(dalecurtis): Convert to text matrix parser when we
# have more test videos in the matrix. Code already written, see patch here:
# https://chromiumcodereview.appspot.com/9290008/#ps12
_TEST_VIDEOS = [
pyauto.PyUITest.GetFileURLForContentDataPath('media', name)
for name in ['bear.mp4', 'bear.ogv', 'bear.webm', 'bear_silent.mp4',
'bear_silent.ogv', 'bear_silent.webm']]
# Expected events for the first iteration and every iteration thereafter.
_EXPECTED_EVENTS_0 = [('ended', 2), ('playing', 2), ('seeked', 1),
('suspend', 1)]
_EXPECTED_EVENTS_n = [('abort', 1), ('emptied', 1)] + _EXPECTED_EVENTS_0
class MediaBasicPlaybackTest(pyauto.PyUITest):
"""PyAuto test container. See file doc string for more information."""
def testBasicPlaybackMatrix(self):
"""Launches HTML test which plays each video until end, seeks, and replays.
Specifically ensures that after the above sequence of events, the following
are true:
1. The first video has only 2x playing, 2x ended, and 1x seeked events.
2. Each subsequent video additionally has 1x abort and 1x emptied due to
switching of the src attribute.
3. video.currentTime == video.duration for each video.
See the HTML file at _TEST_HTML_PATH for more information.
"""
self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))
for i, media in enumerate(_TEST_VIDEOS):
logging.debug('Running basic playback test for %s', media)
# Block until the test finishes and notifies us. Upon return the value of
# video.currentTime == video.duration is provided.
try:
self.assertTrue(self.ExecuteJavascript("startTest('%s');" % media))
# PyAuto has trouble with arrays, so convert to string prior to request.
events = self.GetDOMValue("events.join(',')").split(',')
counts = [(item, events.count(item)) for item in sorted(set(events))]
# The first loop will not have the abort and emptied events triggered by
# changing the video src.
if (i == 0):
self.assertEqual(counts, _EXPECTED_EVENTS_0)
else:
self.assertEqual(counts, _EXPECTED_EVENTS_n)
except:
logging.debug(
'Test failed with events: %s', self.GetDOMValue("events.join(',')"))
raise
if __name__ == '__main__':
pyauto_media.Main()
|
bluestar-solutions/account-invoicing
|
refs/heads/9.0
|
account_invoice_uom/__openerp__.py
|
5
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-15 Agile Business Group sagl (<http://www.agilebg.com>)
# Author: Lorenzo Battistini <lorenzo.battistini@agilebg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Unit of measure for invoices",
"version": "8.0.1.0.0",
'author': "Agile Business Group, Odoo Community Association (OCA)",
"website": "http://www.agilebg.com",
'license': 'AGPL-3',
"category": "Account",
"depends": [
'sale_stock',
'stock_picking_invoice_link',
],
"demo": [],
"data": [
'account_invoice_line_view.xml',
],
'test': [
'test/account_invoice_uom.yml',
],
'installable': False,
}
|
Sofcom/treeio
|
refs/heads/2.0
|
treeio/core/rss.py
|
3
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Global RSS Framework
"""
from django.contrib.syndication.views import Feed
from django.contrib.sites.models import RequestSite
from treeio.core.models import Object, UpdateRecord, User
import hashlib
import random
class ObjectFeed(Feed):
"Generic RSS class"
def __init__(self, title, link, description, objects, *args, **kwargs):
self.title = title
self.link = link
self.description = description
self.key = ''
self.objects = objects
super(ObjectFeed, self).__init__(*args, **kwargs)
def __call__(self, request, *args, **kwargs):
"Generates response"
self.site_url = 'http://' + RequestSite(request).domain
self.link = self.site_url + self.link
response = super(ObjectFeed, self).__call__(request, *args, **kwargs)
# Dirty hack for "example.com" - I hate it too but it works (contrast to all other solutions)
# TODO: proper workaround for "example.com" in URLs
# P.S. worship Ctulhu before you attempt this
response.content = response.content.replace(
'http://example.com', self.site_url)
return response
def get_object(self, request, *args, **kwargs):
"Returns feed objects"
return self.objects[:50]
def items(self, obj):
"Returns a single object"
return obj
def item_title(self, obj):
"Returns object title"
if isinstance(obj, Object):
return obj.creator
elif isinstance(obj, UpdateRecord):
return obj.author
def item_pubdate(self, obj):
"Returns object's date_created"
return obj.date_created
def item_description(self, obj):
"Returns object's body, details or full message"
if isinstance(obj, Object):
if obj.body:
return obj.body
else:
return obj.details
elif isinstance(obj, UpdateRecord):
body = ''
for object in obj.about.all():
body += '<a href="' + self.site_url + \
object.get_absolute_url(
) + '">' + unicode(object) + ' (' + object.get_human_type() + ')</a><br />'
body += obj.get_full_message()
return body
def item_link(self, obj):
"Returns object's full url"
if isinstance(obj, Object):
return self.site_url + obj.get_absolute_url()
elif isinstance(obj, UpdateRecord):
# link must be unique
return self.link + '?' + str(random.random())
def verify_secret_key(request):
"Verifies secret key for a request"
if request.user.username:
# always allow authenticated users
return True
else:
key = request.GET['secret']
user_id, secret = key.split('.', 1)
try:
profile = User.objects.get(pk=user_id)
except:
return False
if key == get_secret_key(request, profile):
request.user = profile.user
return True
return False
def get_secret_key(request, profile=None):
"Generates secret key for a request in RSS format"
if not profile:
if request.user.username:
profile = request.user.profile
if profile:
params = request.GET.copy()
if 'secret' in params:
del params['secret']
hash = hashlib.sha224()
hash.update(unicode(params))
hash.update(unicode(profile.id))
hash.update(unicode(profile.user.date_joined))
key = unicode(profile.id) + '.' + hash.hexdigest()
return key
return ''
|
attakei/readthedocs-oauth
|
refs/heads/master
|
readthedocs/rtd_tests/tests/test_project.py
|
29
|
import json
from django.test import TestCase
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import Project
from rest_framework.reverse import reverse
from django_dynamic_fixture import get
from readthedocs.restapi.serializers import ProjectSerializer
from readthedocs.rtd_tests.mocks.paths import fake_paths_by_regex
class TestProject(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
def test_valid_versions(self):
r = self.client.get('/api/v2/project/6/valid_versions/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['flat'][0], '0.8')
self.assertEqual(resp['flat'][1], '0.8.1')
def test_subprojects(self):
r = self.client.get('/api/v2/project/6/subprojects/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['subprojects'][0]['id'], 23)
def test_translations(self):
main_project = get(Project)
# Create translation of ``main_project``.
get(Project, main_language_project=main_project)
url = reverse('project-translations', [main_project.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
translation_ids_from_api = [
t['id'] for t in response.data['translations']]
translation_ids_from_orm = [
t[0] for t in main_project.translations.values_list('id')]
self.assertEqual(
set(translation_ids_from_api),
set(translation_ids_from_orm)
)
def test_token(self):
r = self.client.get('/api/v2/project/6/token/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['token'], None)
def test_has_pdf(self):
# The project has a pdf if the PDF file exists on disk.
with fake_paths_by_regex('\.pdf$'):
self.assertTrue(self.pip.has_pdf(LATEST))
# The project has no pdf if there is no file on disk.
with fake_paths_by_regex('\.pdf$', exists=False):
self.assertFalse(self.pip.has_pdf(LATEST))
def test_has_pdf_with_pdf_build_disabled(self):
# The project has NO pdf if pdf builds are disabled
self.pip.enable_pdf_build = False
with fake_paths_by_regex('\.pdf$'):
self.assertFalse(self.pip.has_pdf(LATEST))
def test_has_epub(self):
# The project has a epub if the PDF file exists on disk.
with fake_paths_by_regex('\.epub$'):
self.assertTrue(self.pip.has_epub(LATEST))
# The project has no epub if there is no file on disk.
with fake_paths_by_regex('\.epub$', exists=False):
self.assertFalse(self.pip.has_epub(LATEST))
def test_has_epub_with_epub_build_disabled(self):
# The project has NO epub if epub builds are disabled
self.pip.enable_epub_build = False
with fake_paths_by_regex('\.epub$'):
self.assertFalse(self.pip.has_epub(LATEST))
|
dymkowsk/mantid
|
refs/heads/master
|
Testing/SystemTests/tests/analysis/SANS2DMultiPeriodAddFiles.py
|
3
|
#pylint: disable=no-init
from __future__ import (absolute_import, division, print_function)
import stresstesting
from mantid.simpleapi import *
from mantid import config
from ISISCommandInterface import *
class SANS2DMultiPeriodAddFiles(stresstesting.MantidStressTest):
def requiredMemoryMB(self):
"""Requires 2.5Gb"""
return 2500
def runTest(self):
pass
SANS2D()
Set1D()
Detector("rear-detector")
MaskFile('MASKSANS2Doptions.091A')
Gravity(True)
add_runs( ('5512', '5512') ,'SANS2D', 'nxs', lowMem=True)
#one period of a multi-period Nexus file
AssignSample('5512-add.nxs', period=7)
WavRangeReduction(2, 4, DefaultTrans)
paths = [os.path.join(config['defaultsave.directory'],'SANS2D00005512-add.nxs'),
os.path.join(config['defaultsave.directory'],'SANS2D00005512.log')]
for path in paths:
if os.path.exists(path):
os.remove(path)
def validate(self):
# Need to disable checking of the Spectra-Detector map because it isn't
# fully saved out to the nexus file (it's limited to the spectra that
# are actually present in the saved workspace).
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Instrument')
self.disableChecking.append('Axes')
return '5512p7rear_1D_2.0_4.0Phi-45.0_45.0','SANS2DMultiPeriodAddFiles.nxs'
class LARMORMultiPeriodAddEventFiles(stresstesting.MantidStressTest):
def requiredMemoryMB(self):
"""Requires 2.5Gb"""
return 2500
def runTest(self):
LARMOR()
Set1D()
Detector("DetectorBench")
MaskFile('USER_LARMOR_151B_LarmorTeam_80tubes_BenchRot1p4_M4_r3699.txt')
Gravity(True)
add_runs( ('13065', '13065') ,'LARMOR', 'nxs', lowMem=True)
AssignSample('13065-add.nxs')
WavRangeReduction(2, 4, DefaultTrans)
# Clean up
to_clean = ["13065_sans_nxs",
"13065p1rear_1D_2.0_4.0_incident_monitor",
"13065p2rear_1D_2.0_4.0_incident_monitor",
"13065p3rear_1D_2.0_4.0_incident_monitor",
"13065p4rear_1D_2.0_4.0_incident_monitor",
"80tubeCalibration_1-05-2015_r3157-3160"]
for workspace in to_clean:
DeleteWorkspace(workspace)
paths = [os.path.join(config['defaultsave.directory'],'LARMOR00013065-add.nxs'),
os.path.join(config['defaultsave.directory'],'SANS2D00013065.log')] # noqa
for path in paths:
if os.path.exists(path):
os.remove(path)
def validate(self):
# Need to disable checking of the Spectra-Detector map because it isn't
# fully saved out to the nexus file (it's limited to the spectra that
# are actually present in the saved workspace).
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Instrument')
self.disableChecking.append('Axes')
return "13065p1rear_1D_2.0_4.0" , "LARMORMultiPeriodAddEventFiles.nxs"
|
justhalf/weak-semi-crf-naacl2016
|
refs/heads/master
|
data/sms_corpus/students/18/fix.py
|
1
|
# -*- coding: utf-8 -*-
"""
Fix annotations for 1000451
"""
# Import statements
def main():
with open('sms_corpus.txt') as infile:
text = infile.read()
with open('sms_corpus.ann_') as infile:
anns = [ann.strip() for ann in infile.readlines()]
with open('fixed.ann', 'w') as outfile:
for ann in anns:
ident, np_start_end, string = ann.split('\t')
np, start, end = np_start_end.split(' ')
start = int(start)
end = int(end)
start_idx = start
end_idx = end
if end == 2857 or end == 26260:
end -= 1
end_idx -= 1
# if start == 20304:
# start += 1
# end += 1
# start_idx += 1
# end_idx += 1
# elif start > 20304:
# start += 2
# start_idx += 2
# end += 2
# end_idx += 2
# if start >= 21195:
# start_idx += 2
# end_idx += 2
outfile.write('{}\t{} {} {}\t{}\n'.format(ident, np, start, end, text[start_idx:end_idx]))
if __name__ == '__main__':
main()
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
tools/auto_bisect/bisect_results.py
|
9
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import os
import bisect_utils
import math_utils
import source_control
import ttest
from bisect_state import RevisionState
class BisectResults(object):
"""Contains results of the completed bisect.
Properties:
error: Error message if the bisect failed.
If the error is None, the following properties are present:
warnings: List of warnings from the bisect run.
state: BisectState object from which these results were generated.
first_working_revision: First good revision.
last_broken_revision: Last bad revision.
If both of above revisions are not None, the follow properties are present:
culprit_revisions: A list of revisions, which contain the bad change
introducing the failure.
other_regressions: A list of tuples representing other regressions, which
may have occured.
regression_size: For performance bisects, this is a relative change of
the mean metric value. For other bisects this field always contains
'zero-to-nonzero'.
regression_std_err: For performance bisects, it is a pooled standard error
for groups of good and bad runs. Not used for other bisects.
confidence: For performance bisects, it is a confidence that the good and
bad runs are distinct groups. Not used for non-performance bisects.
"""
def __init__(self, bisect_state=None, depot_registry=None, opts=None,
runtime_warnings=None, error=None, abort_reason=None):
"""Computes final bisect results after a bisect run is complete.
This constructor should be called in one of the following ways:
BisectResults(state, depot_registry, opts, runtime_warnings)
BisectResults(error=error)
First option creates an object representing successful bisect results, while
second option creates an error result.
Args:
bisect_state: BisectState object representing latest bisect state.
depot_registry: DepotDirectoryRegistry object with information on each
repository in the bisect_state.
opts: Options passed to the bisect run.
runtime_warnings: A list of warnings from the bisect run.
error: Error message. When error is not None, other arguments are ignored.
"""
self.error = error
self.abort_reason = abort_reason
if error is not None or abort_reason is not None:
return
assert (bisect_state is not None and depot_registry is not None and
opts is not None and runtime_warnings is not None), (
'Incorrect use of the BisectResults constructor. When error is '
'None, all other arguments are required')
self.state = bisect_state
rev_states = bisect_state.GetRevisionStates()
first_working_rev, last_broken_rev = self.FindBreakingRevRange(rev_states)
self.first_working_revision = first_working_rev
self.last_broken_revision = last_broken_rev
self.warnings = runtime_warnings
self.retest_results_tot = None
self.retest_results_reverted = None
if first_working_rev is not None and last_broken_rev is not None:
statistics = self._ComputeRegressionStatistics(
rev_states, first_working_rev, last_broken_rev)
self.regression_size = statistics['regression_size']
self.regression_std_err = statistics['regression_std_err']
self.confidence = statistics['confidence']
self.culprit_revisions = self._FindCulpritRevisions(
rev_states, depot_registry, first_working_rev, last_broken_rev)
self.other_regressions = self._FindOtherRegressions(
rev_states, statistics['bad_greater_than_good'])
self.warnings += self._GetResultBasedWarnings(
self.culprit_revisions, opts, self.confidence)
elif first_working_rev is not None:
# Setting these attributes so that bisect printer does not break when the
# regression cannot be reproduced (no broken revision was found)
self.regression_size = 0
self.regression_std_err = 0
self.confidence = 0
self.culprit_revisions = []
self.other_regressions = []
def AddRetestResults(self, results_tot, results_reverted):
if not results_tot or not results_reverted:
self.warnings.append(
'Failed to re-test reverted culprit CL against ToT.')
return
confidence_params = (results_reverted[0]['values'],
results_tot[0]['values'])
confidence = BisectResults.ConfidenceScore(*confidence_params)
self.retest_results_tot = RevisionState('ToT', 'n/a', 0)
self.retest_results_tot.value = results_tot[0]
self.retest_results_reverted = RevisionState('Reverted', 'n/a', 0)
self.retest_results_reverted.value = results_reverted[0]
if confidence <= bisect_utils.HIGH_CONFIDENCE:
self.warnings.append(
'Confidence of re-test with reverted CL is not high.'
' Check that the regression hasn\'t already recovered. '
' There\'s still a chance this is a regression, as performance of'
' local builds may not match official builds.' )
@staticmethod
def _GetResultBasedWarnings(culprit_revisions, opts, confidence):
warnings = []
if len(culprit_revisions) > 1:
warnings.append('Due to build errors, regression range could '
'not be narrowed down to a single commit.')
if opts.repeat_test_count == 1:
warnings.append('Tests were only set to run once. This may '
'be insufficient to get meaningful results.')
if 0 < confidence < bisect_utils.HIGH_CONFIDENCE:
warnings.append('Confidence is not high. Try bisecting again '
'with increased repeat_count, larger range, or '
'on another metric.')
if not confidence:
warnings.append('Confidence score is 0%. Try bisecting again on '
'another platform or another metric.')
return warnings
@staticmethod
def ConfidenceScore(sample1, sample2,
accept_single_bad_or_good=False):
"""Calculates a confidence score.
This score is a percentage which represents our degree of confidence in the
proposition that the good results and bad results are distinct groups, and
their differences aren't due to chance alone.
Args:
sample1: A flat list of "good" result numbers.
sample2: A flat list of "bad" result numbers.
accept_single_bad_or_good: If True, computes confidence even if there is
just one bad or good revision, otherwise single good or bad revision
always returns 0.0 confidence. This flag will probably get away when
we will implement expanding the bisect range by one more revision for
such case.
Returns:
A number in the range [0, 100].
"""
# If there's only one item in either list, this means only one revision was
# classified good or bad; this isn't good enough evidence to make a
# decision. If an empty list was passed, that also implies zero confidence.
if not accept_single_bad_or_good:
if len(sample1) <= 1 or len(sample2) <= 1:
return 0.0
# If there were only empty lists in either of the lists (this is unexpected
# and normally shouldn't happen), then we also want to return 0.
if not sample1 or not sample2:
return 0.0
# The p-value is approximately the probability of obtaining the given set
# of good and bad values just by chance.
_, _, p_value = ttest.WelchsTTest(sample1, sample2)
return 100.0 * (1.0 - p_value)
@classmethod
def _FindOtherRegressions(cls, revision_states, bad_greater_than_good):
"""Compiles a list of other possible regressions from the revision data.
Args:
revision_states: Sorted list of RevisionState objects.
bad_greater_than_good: Whether the result value at the "bad" revision is
numerically greater than the result value at the "good" revision.
Returns:
A list of [current_rev, previous_rev, confidence] for other places where
there may have been a regression.
"""
other_regressions = []
previous_values = []
prev_state = None
for revision_state in revision_states:
if revision_state.value:
current_values = revision_state.value['values']
if previous_values:
confidence_params = (sum(previous_values, []),
sum([current_values], []))
confidence = cls.ConfidenceScore(*confidence_params,
accept_single_bad_or_good=True)
mean_of_prev_runs = math_utils.Mean(sum(previous_values, []))
mean_of_current_runs = math_utils.Mean(current_values)
# Check that the potential regression is in the same direction as
# the overall regression. If the mean of the previous runs < the
# mean of the current runs, this local regression is in same
# direction.
prev_greater_than_current = mean_of_prev_runs > mean_of_current_runs
is_same_direction = (prev_greater_than_current if
bad_greater_than_good else not prev_greater_than_current)
# Only report potential regressions with high confidence.
if is_same_direction and confidence > 50:
other_regressions.append([revision_state, prev_state, confidence])
previous_values.append(current_values)
prev_state = revision_state
return other_regressions
@staticmethod
def FindBreakingRevRange(revision_states):
first_working_revision = None
last_broken_revision = None
for revision_state in revision_states:
if revision_state.passed == 1 and not first_working_revision:
first_working_revision = revision_state
if not revision_state.passed:
last_broken_revision = revision_state
return first_working_revision, last_broken_revision
@staticmethod
def _FindCulpritRevisions(revision_states, depot_registry, first_working_rev,
last_broken_rev):
cwd = os.getcwd()
culprit_revisions = []
for i in xrange(last_broken_rev.index, first_working_rev.index):
depot_registry.ChangeToDepotDir(revision_states[i].depot)
info = source_control.QueryRevisionInfo(revision_states[i].revision)
culprit_revisions.append((revision_states[i].revision, info,
revision_states[i].depot))
os.chdir(cwd)
return culprit_revisions
@classmethod
def _ComputeRegressionStatistics(cls, rev_states, first_working_rev,
last_broken_rev):
# TODO(sergiyb): We assume that value has "values" key, which may not be
# the case for failure-bisects, where there is a single value only.
broken_means = [state.value['values']
for state in rev_states[:last_broken_rev.index+1]
if state.value]
working_means = [state.value['values']
for state in rev_states[first_working_rev.index:]
if state.value]
# Flatten the lists to calculate mean of all values.
working_mean = sum(working_means, [])
broken_mean = sum(broken_means, [])
# Calculate the approximate size of the regression
mean_of_bad_runs = math_utils.Mean(broken_mean)
mean_of_good_runs = math_utils.Mean(working_mean)
regression_size = 100 * math_utils.RelativeChange(mean_of_good_runs,
mean_of_bad_runs)
if math.isnan(regression_size):
regression_size = 'zero-to-nonzero'
regression_std_err = math.fabs(math_utils.PooledStandardError(
[working_mean, broken_mean]) /
max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
# Give a "confidence" in the bisect. At the moment we use how distinct the
# values are before and after the last broken revision, and how noisy the
# overall graph is.
confidence_params = (sum(working_means, []), sum(broken_means, []))
confidence = cls.ConfidenceScore(*confidence_params)
bad_greater_than_good = mean_of_bad_runs > mean_of_good_runs
return {'regression_size': regression_size,
'regression_std_err': regression_std_err,
'confidence': confidence,
'bad_greater_than_good': bad_greater_than_good}
|
apyrgio/snf-ganeti
|
refs/heads/stable-2.10-bpo2
|
lib/watcher/state.py
|
1
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module keeping state for Ganeti watcher.
"""
import os
import time
import logging
from ganeti import utils
from ganeti import serializer
from ganeti import errors
# Delete any record that is older than 8 hours; this value is based on
# the fact that the current retry counter is 5, and watcher runs every
# 5 minutes, so it takes around half an hour to exceed the retry
# counter, so 8 hours (16*1/2h) seems like a reasonable reset time
RETRY_EXPIRATION = 8 * 3600
KEY_RESTART_COUNT = "restart_count"
KEY_RESTART_WHEN = "restart_when"
KEY_BOOT_ID = "bootid"
def OpenStateFile(path):
"""Opens the state file and acquires a lock on it.
@type path: string
@param path: Path to state file
"""
# The two-step dance below is necessary to allow both opening existing
# file read/write and creating if not existing. Vanilla open will truncate
# an existing file -or- allow creating if not existing.
statefile_fd = os.open(path, os.O_RDWR | os.O_CREAT)
# Try to acquire lock on state file. If this fails, another watcher instance
# might already be running or another program is temporarily blocking the
# watcher from running.
try:
utils.LockFile(statefile_fd)
except errors.LockError, err:
logging.error("Can't acquire lock on state file %s: %s", path, err)
return None
return os.fdopen(statefile_fd, "w+")
class WatcherState(object):
"""Interface to a state file recording restart attempts.
"""
def __init__(self, statefile):
"""Open, lock, read and parse the file.
@type statefile: file
@param statefile: State file object
"""
self.statefile = statefile
try:
state_data = self.statefile.read()
if not state_data:
self._data = {}
else:
self._data = serializer.Load(state_data)
except Exception, msg: # pylint: disable=W0703
# Ignore errors while loading the file and treat it as empty
self._data = {}
logging.warning(("Invalid state file. Using defaults."
" Error message: %s"), msg)
if "instance" not in self._data:
self._data["instance"] = {}
if "node" not in self._data:
self._data["node"] = {}
self._orig_data = serializer.Dump(self._data)
def Save(self, filename):
"""Save state to file, then unlock and close it.
"""
assert self.statefile
serialized_form = serializer.Dump(self._data)
if self._orig_data == serialized_form:
logging.debug("Data didn't change, just touching status file")
os.utime(filename, None)
return
# We need to make sure the file is locked before renaming it, otherwise
# starting ganeti-watcher again at the same time will create a conflict.
fd = utils.WriteFile(filename,
data=serialized_form,
prewrite=utils.LockFile, close=False)
self.statefile = os.fdopen(fd, "w+")
def Close(self):
"""Unlock configuration file and close it.
"""
assert self.statefile
# Files are automatically unlocked when closing them
self.statefile.close()
self.statefile = None
def GetNodeBootID(self, name):
"""Returns the last boot ID of a node or None.
"""
ndata = self._data["node"]
if name in ndata and KEY_BOOT_ID in ndata[name]:
return ndata[name][KEY_BOOT_ID]
return None
def SetNodeBootID(self, name, bootid):
"""Sets the boot ID of a node.
"""
assert bootid
ndata = self._data["node"]
ndata.setdefault(name, {})[KEY_BOOT_ID] = bootid
def NumberOfRestartAttempts(self, instance_name):
"""Returns number of previous restart attempts.
@type instance_name: string
@param instance_name: the name of the instance to look up
"""
idata = self._data["instance"]
if instance_name in idata:
return idata[instance_name][KEY_RESTART_COUNT]
return 0
def MaintainInstanceList(self, instances):
"""Perform maintenance on the recorded instances.
@type instances: list of string
@param instances: the list of currently existing instances
"""
idict = self._data["instance"]
# First, delete obsolete instances
obsolete_instances = set(idict).difference(instances)
for inst in obsolete_instances:
logging.debug("Forgetting obsolete instance %s", inst)
idict.pop(inst, None)
# Second, delete expired records
earliest = time.time() - RETRY_EXPIRATION
expired_instances = [i for i in idict
if idict[i][KEY_RESTART_WHEN] < earliest]
for inst in expired_instances:
logging.debug("Expiring record for instance %s", inst)
idict.pop(inst, None)
def RecordRestartAttempt(self, instance_name):
"""Record a restart attempt.
@type instance_name: string
@param instance_name: the name of the instance being restarted
"""
idata = self._data["instance"]
inst = idata.setdefault(instance_name, {})
inst[KEY_RESTART_WHEN] = time.time()
inst[KEY_RESTART_COUNT] = inst.get(KEY_RESTART_COUNT, 0) + 1
def RemoveInstance(self, instance_name):
"""Update state to reflect that a machine is running.
This method removes the record for a named instance (as we only
track down instances).
@type instance_name: string
@param instance_name: the name of the instance to remove from books
"""
idata = self._data["instance"]
idata.pop(instance_name, None)
|
terentjew-alexey/market-analysis-system
|
refs/heads/master
|
data/to_windowed.py
|
1
|
import numpy as np
from mas_tools.data import create_timeseries_matrix
from files import FILES, PERIODS, CSV
print('Warning! Process may be very long.')
# lpath = 'E:/Projects/market-analysis-system/data/transformed/'
lpath = 'E:/Projects/market-analysis-system/data/normalized/'
spath = 'E:/Projects/market-analysis-system/data/windowed/'
window = 20 # warning! size of file multiply in to window size
for symbol in FILES:
for tf in PERIODS:
## Optimize skip
if tf == '1' or tf == '5' or tf =='15':
continue
## Read
data = np.genfromtxt(lpath+symbol+tf+CSV, delimiter=';')
## To windowed
data, _ = create_timeseries_matrix(data, look_back=window)
## Save
np.savetxt(spath+'norm_w_'+str(window)+symbol+tf+CSV, data, fmt='%0.6f', delimiter=';')
data = None
|
uarka/binutils
|
refs/heads/master
|
gdb/testsuite/gdb.perf/lib/perftest/__init__.py
|
41
|
# Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GDB performance testing framework.
"""
|
makingspace/umeboshi
|
refs/heads/master
|
umeboshi/serializer.py
|
1
|
import pickle
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
class DefaultSerializer(object):
def dumps(self, value):
return pickle.dumps(value)
def loads(self, value):
return pickle.loads(value)
def load_class(path):
"""
Loads class from path.
"""
mod_name, klass_name = path.rsplit('.', 1)
try:
mod = import_module(mod_name)
except AttributeError as e:
raise ImproperlyConfigured('Error importing {0}: "{1}"'.format(mod_name, e))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define a "{1}" class'.format(mod_name, klass_name))
if not hasattr(klass, "loads"):
raise ImproperlyConfigured('Class "{0}" does not define "loads" method'.format(klass_name))
if not hasattr(klass, "dumps"):
raise ImproperlyConfigured('Class "{0}" does not define "dumps" method'.format(klass_name))
return klass
def load_serializer(settings):
if hasattr(settings, 'UMEBOSHI_SERIALIZER'):
return load_class(settings.UMEBOSHI_SERIALIZER)()
else:
return DefaultSerializer()
serializer = load_serializer(settings)
|
mateon1/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_interface_identifier_conflicts_across_members.py
|
276
|
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers1 {
const byte thing1 = 1;
readonly attribute long thing1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers2 {
readonly attribute long thing1;
const byte thing1 = 1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers3 {
getter boolean thing1(DOMString name);
readonly attribute long thing1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers1 {
const byte thing1 = 1;
long thing1();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
|
comandrei/django-haystack
|
refs/heads/master
|
test_haystack/test_utils.py
|
13
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.test import TestCase
from django.test.utils import override_settings
from test_haystack.core.models import MockModel
from haystack.utils import _lookup_identifier_method, get_facet_field_name, get_identifier, Highlighter, log
class GetIdentifierTestCase(TestCase):
def test_get_facet_field_name(self):
self.assertEqual(get_facet_field_name('id'), 'id')
self.assertEqual(get_facet_field_name('django_id'), 'django_id')
self.assertEqual(get_facet_field_name('django_ct'), 'django_ct')
self.assertEqual(get_facet_field_name('author'), 'author_exact')
self.assertEqual(get_facet_field_name('author_exact'), 'author_exact_exact')
class GetFacetFieldNameTestCase(TestCase):
def test_get_identifier(self):
self.assertEqual(get_identifier('core.mockmodel.1'), 'core.mockmodel.1')
# Valid object.
mock = MockModel.objects.get(pk=1)
self.assertEqual(get_identifier(mock), 'core.mockmodel.1')
@override_settings(HAYSTACK_IDENTIFIER_METHOD='test_haystack.core.custom_identifier.get_identifier_method')
def test_haystack_identifier_method(self):
get_identifier = _lookup_identifier_method()
self.assertEqual(get_identifier('a.b.c'), 'a.b.c')
@override_settings(HAYSTACK_IDENTIFIER_METHOD='test_haystack.core.custom_identifier.not_there')
def test_haystack_identifier_method_bad_path(self):
self.assertRaises(AttributeError, _lookup_identifier_method)
@override_settings(HAYSTACK_IDENTIFIER_METHOD='core.not_there.not_there')
def test_haystack_identifier_method_bad_module(self):
self.assertRaises(ImportError, _lookup_identifier_method)
class HighlighterTestCase(TestCase):
def setUp(self):
super(HighlighterTestCase, self).setUp()
self.document_1 = "This is a test of the highlightable words detection. This is only a test. Were this an actual emergency, your text would have exploded in mid-air."
self.document_2 = "The content of words in no particular order causes nothing to occur."
self.document_3 = "%s %s" % (self.document_1, self.document_2)
def test_find_highlightable_words(self):
highlighter = Highlighter('this test')
highlighter.text_block = self.document_1
self.assertEqual(highlighter.find_highlightable_words(), {'this': [0, 53, 79], 'test': [10, 68]})
# We don't stem for now.
highlighter = Highlighter('highlight tests')
highlighter.text_block = self.document_1
self.assertEqual(highlighter.find_highlightable_words(), {'highlight': [22], 'tests': []})
# Ignore negated bits.
highlighter = Highlighter('highlight -test')
highlighter.text_block = self.document_1
self.assertEqual(highlighter.find_highlightable_words(), {'highlight': [22]})
def test_find_window(self):
# The query doesn't matter for this method, so ignore it.
highlighter = Highlighter('')
highlighter.text_block = self.document_1
# No query.
self.assertEqual(highlighter.find_window({}), (0, 200))
# Nothing found.
self.assertEqual(highlighter.find_window({'highlight': [], 'tests': []}), (0, 200))
# Simple cases.
self.assertEqual(highlighter.find_window({'highlight': [0], 'tests': [100]}), (0, 200))
self.assertEqual(highlighter.find_window({'highlight': [99], 'tests': [199]}), (99, 299))
self.assertEqual(highlighter.find_window({'highlight': [0], 'tests': [201]}), (0, 200))
self.assertEqual(highlighter.find_window({'highlight': [203], 'tests': [120]}), (120, 320))
self.assertEqual(highlighter.find_window({'highlight': [], 'tests': [100]}), (100, 300))
self.assertEqual(highlighter.find_window({'highlight': [0], 'tests': [80], 'moof': [120]}), (0, 200))
# Simple cases, with an outlier far outside the window.
self.assertEqual(highlighter.find_window({'highlight': [0], 'tests': [100, 450]}), (0, 200))
self.assertEqual(highlighter.find_window({'highlight': [100], 'tests': [220, 450]}), (100, 300))
self.assertEqual(highlighter.find_window({'highlight': [100], 'tests': [350, 450]}), (350, 550))
self.assertEqual(highlighter.find_window({'highlight': [100], 'tests': [220], 'moof': [450]}), (100, 300))
# Density checks.
self.assertEqual(highlighter.find_window({'highlight': [0], 'tests': [100, 180, 450]}), (0, 200))
self.assertEqual(highlighter.find_window({'highlight': [0, 40], 'tests': [100, 200, 220, 450]}), (40, 240))
self.assertEqual(highlighter.find_window({'highlight': [0, 40], 'tests': [100, 200, 220], 'moof': [450]}), (40, 240))
self.assertEqual(highlighter.find_window({'highlight': [0, 40], 'tests': [100, 200, 220], 'moof': [294, 299, 450]}), (100, 300))
def test_render_html(self):
highlighter = Highlighter('this test')
highlighter.text_block = self.document_1
self.assertEqual(highlighter.render_html({'this': [0, 53, 79], 'test': [10, 68]}, 0, 200), '<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air.')
highlighter.text_block = self.document_2
self.assertEqual(highlighter.render_html({'this': [0, 53, 79], 'test': [10, 68]}, 0, 200), 'The content of words in no particular order causes nothing to occur.')
highlighter.text_block = self.document_3
self.assertEqual(highlighter.render_html({'this': [0, 53, 79], 'test': [10, 68]}, 0, 200), '<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...')
highlighter = Highlighter('content detection')
highlighter.text_block = self.document_3
self.assertEqual(highlighter.render_html({'content': [151], 'detection': [42]}, 42, 242), '...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class="highlighted">content</span> of words in no particular order causes nothing to occur.')
self.assertEqual(highlighter.render_html({'content': [151], 'detection': [42]}, 42, 200), '...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class="highlighted">content</span> of words in no particular order causes no...')
# One term found within another term.
highlighter = Highlighter('this is')
highlighter.text_block = self.document_1
self.assertEqual(highlighter.render_html({'this': [0, 53, 79], 'is': [2, 5, 55, 58, 81]}, 0, 200), '<span class="highlighted">This</span> <span class="highlighted">is</span> a test of the highlightable words detection. <span class="highlighted">This</span> <span class="highlighted">is</span> only a test. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air.')
# Regression for repetition in the regular expression.
highlighter = Highlighter('i++')
highlighter.text_block = 'Foo is i++ in most cases.'
self.assertEqual(highlighter.render_html({'i++': [7]}, 0, 200), 'Foo is <span class="highlighted">i++</span> in most cases.')
highlighter = Highlighter('i**')
highlighter.text_block = 'Foo is i** in most cases.'
self.assertEqual(highlighter.render_html({'i**': [7]}, 0, 200), 'Foo is <span class="highlighted">i**</span> in most cases.')
highlighter = Highlighter('i..')
highlighter.text_block = 'Foo is i.. in most cases.'
self.assertEqual(highlighter.render_html({'i..': [7]}, 0, 200), 'Foo is <span class="highlighted">i..</span> in most cases.')
highlighter = Highlighter('i??')
highlighter.text_block = 'Foo is i?? in most cases.'
self.assertEqual(highlighter.render_html({'i??': [7]}, 0, 200), 'Foo is <span class="highlighted">i??</span> in most cases.')
# Regression for highlighting already highlighted HTML terms.
highlighter = Highlighter('span')
highlighter.text_block = 'A span in spam makes html in a can.'
self.assertEqual(highlighter.render_html({'span': [2]}, 0, 200), 'A <span class="highlighted">span</span> in spam makes html in a can.')
highlighter = Highlighter('highlight')
highlighter.text_block = 'A span in spam makes highlighted html in a can.'
self.assertEqual(highlighter.render_html({'highlight': [21]}, 0, 200), 'A span in spam makes <span class="highlighted">highlight</span>ed html in a can.')
def test_highlight(self):
highlighter = Highlighter('this test')
self.assertEqual(highlighter.highlight(self.document_1), u'<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air.')
self.assertEqual(highlighter.highlight(self.document_2), u'The content of words in no particular order causes nothing to occur.')
self.assertEqual(highlighter.highlight(self.document_3), u'<span class="highlighted">This</span> is a <span class="highlighted">test</span> of the highlightable words detection. <span class="highlighted">This</span> is only a <span class="highlighted">test</span>. Were <span class="highlighted">this</span> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...')
highlighter = Highlighter('this test', html_tag='div', css_class=None)
self.assertEqual(highlighter.highlight(self.document_1), u'<div>This</div> is a <div>test</div> of the highlightable words detection. <div>This</div> is only a <div>test</div>. Were <div>this</div> an actual emergency, your text would have exploded in mid-air.')
self.assertEqual(highlighter.highlight(self.document_2), u'The content of words in no particular order causes nothing to occur.')
self.assertEqual(highlighter.highlight(self.document_3), u'<div>This</div> is a <div>test</div> of the highlightable words detection. <div>This</div> is only a <div>test</div>. Were <div>this</div> an actual emergency, your text would have exploded in mid-air. The content of words in no particular order causes no...')
highlighter = Highlighter('content detection')
self.assertEqual(highlighter.highlight(self.document_1), u'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air.')
self.assertEqual(highlighter.highlight(self.document_2), u'...<span class="highlighted">content</span> of words in no particular order causes nothing to occur.')
self.assertEqual(highlighter.highlight(self.document_3), u'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-air. The <span class="highlighted">content</span> of words in no particular order causes nothing to occur.')
highlighter = Highlighter('content detection', max_length=100)
self.assertEqual(highlighter.highlight(self.document_1), u'...<span class="highlighted">detection</span>. This is only a test. Were this an actual emergency, your text would have exploded in mid-...')
self.assertEqual(highlighter.highlight(self.document_2), u'...<span class="highlighted">content</span> of words in no particular order causes nothing to occur.')
self.assertEqual(highlighter.highlight(self.document_3), u'This is a test of the highlightable words <span class="highlighted">detection</span>. This is only a test. Were this an actual emerge...')
class LoggingFacadeTestCase(TestCase):
def test_everything_noops_if_settings_are_off(self):
with self.settings(HAYSTACK_LOGGING=False):
l = log.LoggingFacade(None)
l.error()
def test_uses_provided_logger_if_logging_is_on(self):
with self.settings(HAYSTACK_LOGGING=True):
l = log.LoggingFacade(None)
try:
l.error()
except AttributeError:
pass
def test_uses_provided_logger_by_default(self):
class Logger(object):
def __init__(self):
self.was_called = False
def error(self):
self.was_called = True
l = log.LoggingFacade(Logger())
self.assertFalse(l.was_called, msg='sanity check')
l.error()
self.assertTrue(l.was_called)
|
rgom/Pydev
|
refs/heads/development
|
plugins/org.python.pydev.jython/Lib/modjy/modjy_input.py
|
29
|
###
#
# Copyright Alan Kennedy.
#
# You may contact the copyright holder at this uri:
#
# http://www.xhaus.com/contact/modjy
#
# The licence under which this code is released is the Apache License v2.0.
#
# The terms and conditions of this license are listed in a file contained
# in the distribution that also contained this file, under the name
# LICENSE.txt.
#
# You may also read a copy of the license at the following web address.
#
# http://modjy.xhaus.com/LICENSE.txt
#
###
#
# This code adapted from the socket._fileobject class
#
import jarray
class modjy_input_object(object):
def __init__(self, servlet_inputstream, bufsize=8192):
self.istream = servlet_inputstream
self.buffer_size = bufsize
self.buffer = ""
def istream_read(self, n):
data = jarray.zeros(n, 'b')
m = self.istream.read(data)
if m == -1: # indicates EOF has been reached, so we just return the empty string
return ""
elif m <= 0:
return ""
if m < n:
data = data[:m]
return data.tostring()
def read(self, size=-1):
data = self.buffer
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self.buffer = ""
recv_size = self.buffer_size
while True:
data = self.istream_read(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self.buffer = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self.buffer = ""
while True:
left = size - buf_len
recv_size = max(self.buffer_size, left)
data = self.istream_read(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self.buffer = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self.buffer
if size < 0:
# Read until \n or EOF, whichever comes first
nl = data.find('\n')
if nl >= 0:
nl += 1
self.buffer = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self.buffer = ""
while True:
data = self.istream_read(self.buffer_size)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self.buffer = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self.buffer = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self.buffer = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self.buffer = ""
while True:
data = self.istream_read(self.buffer_size)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self.buffer = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self.buffer = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
|
revoer/keystone-8.0.0
|
refs/heads/master
|
swift/common/header_key_dict.py
|
5
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
class HeaderKeyDict(dict):
"""
A dict that title-cases all keys on the way in, so as to be
case-insensitive.
"""
def __init__(self, base_headers=None, **kwargs):
if base_headers:
self.update(base_headers)
self.update(kwargs)
def update(self, other):
if hasattr(other, 'keys'):
for key in other.keys():
self[key.title()] = other[key]
else:
for key, value in other:
self[key.title()] = value
def __getitem__(self, key):
return dict.get(self, key.title())
def __setitem__(self, key, value):
if value is None:
self.pop(key.title(), None)
elif isinstance(value, six.text_type):
return dict.__setitem__(self, key.title(), value.encode('utf-8'))
else:
return dict.__setitem__(self, key.title(), str(value))
def __contains__(self, key):
return dict.__contains__(self, key.title())
def __delitem__(self, key):
return dict.__delitem__(self, key.title())
def get(self, key, default=None):
return dict.get(self, key.title(), default)
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def pop(self, key, default=None):
return dict.pop(self, key.title(), default)
|
michaelpacer/python-future
|
refs/heads/master
|
src/future/backports/email/mime/application.py
|
83
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Keith Dart
# Contact: email-sig@python.org
"""Class representing application/* type MIME documents."""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.backports.email import encoders
from future.backports.email.mime.nonmultipart import MIMENonMultipart
__all__ = ["MIMEApplication"]
class MIMEApplication(MIMENonMultipart):
"""Class for generating application/* MIME documents."""
def __init__(self, _data, _subtype='octet-stream',
_encoder=encoders.encode_base64, **_params):
"""Create an application/* type MIME document.
_data is a string containing the raw application data.
_subtype is the MIME content type subtype, defaulting to
'octet-stream'.
_encoder is a function which will perform the actual encoding for
transport of the application data, defaulting to base64 encoding.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
raise TypeError('Invalid application MIME subtype')
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
self.set_payload(_data)
_encoder(self)
|
tealover/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_v21_extensions.py
|
37
|
# Copyright 2013 IBM Corp.
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import stevedore
import webob.exc
from nova.api import openstack
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack import extensions
from nova import exception
from nova import test
CONF = cfg.CONF
class fake_bad_extension(object):
name = "fake_bad_extension"
alias = "fake-bad"
class fake_stevedore_enabled_extensions(object):
def __init__(self, namespace, check_func, invoke_on_load=False,
invoke_args=(), invoke_kwds=None):
self.extensions = []
def map(self, func, *args, **kwds):
pass
def __iter__(self):
return iter(self.extensions)
class fake_loaded_extension_info(object):
def __init__(self):
self.extensions = {}
def register_extension(self, ext):
self.extensions[ext] = ext
return True
def get_extensions(self):
return {'core1': None, 'core2': None, 'noncore1': None}
class ExtensionLoadingTestCase(test.NoDBTestCase):
def _set_v21_core(self, core_extensions):
openstack.API_V3_CORE_EXTENSIONS = core_extensions
def test_extensions_loaded(self):
app = compute.APIRouterV21()
self.assertIn('servers', app._loaded_extension_info.extensions)
def test_check_bad_extension(self):
extension_info = plugins.LoadedExtensionInfo()
self.assertFalse(extension_info._check_extension(fake_bad_extension))
def test_extensions_blacklist(self):
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
app = compute.APIRouterV21()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
@mock.patch('nova.api.openstack.APIRouterV21._register_resources_list')
def test_extensions_inherit(self, mock_register):
app = compute.APIRouterV21()
self.assertIn('servers', app._loaded_extension_info.extensions)
self.assertIn('os-volumes', app._loaded_extension_info.extensions)
mock_register.assert_called_with(mock.ANY, mock.ANY)
ext_no_inherits = mock_register.call_args_list[0][0][0]
ext_has_inherits = mock_register.call_args_list[1][0][0]
# os-volumes inherits from servers
name_list = [ext.obj.alias for ext in ext_has_inherits]
self.assertIn('os-volumes', name_list)
name_list = [ext.obj.alias for ext in ext_no_inherits]
self.assertIn('servers', name_list)
def test_extensions_whitelist_accept(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v21_core, v21_core)
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
'osapi_v3')
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
def test_extensions_whitelist_block(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v21_core, v21_core)
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers'], 'osapi_v3')
app = compute.APIRouterV21()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
def test_blacklist_overrides_whitelist(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v21_core, v21_core)
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
'osapi_v3')
CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
app = compute.APIRouterV21()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
self.assertIn('servers', app._loaded_extension_info.extensions)
self.assertEqual(1, len(app._loaded_extension_info.extensions))
def test_get_missing_core_extensions(self):
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
self.addCleanup(self._set_v21_core, v21_core)
self.assertEqual(0, len(
compute.APIRouterV21.get_missing_core_extensions(
['core1', 'core2', 'noncore1'])))
missing_core = compute.APIRouterV21.get_missing_core_extensions(
['core1'])
self.assertEqual(1, len(missing_core))
self.assertIn('core2', missing_core)
missing_core = compute.APIRouterV21.get_missing_core_extensions([])
self.assertEqual(2, len(missing_core))
self.assertIn('core1', missing_core)
self.assertIn('core2', missing_core)
missing_core = compute.APIRouterV21.get_missing_core_extensions(
['noncore1'])
self.assertEqual(2, len(missing_core))
self.assertIn('core1', missing_core)
self.assertIn('core2', missing_core)
def test_core_extensions_present(self):
self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
fake_stevedore_enabled_extensions)
self.stubs.Set(plugins, 'LoadedExtensionInfo',
fake_loaded_extension_info)
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
self.addCleanup(self._set_v21_core, v21_core)
# if no core API extensions are missing then an exception will
# not be raised when creating an instance of compute.APIRouterV21
compute.APIRouterV21()
def test_core_extensions_missing(self):
self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
fake_stevedore_enabled_extensions)
self.stubs.Set(plugins, 'LoadedExtensionInfo',
fake_loaded_extension_info)
self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV21)
def test_extensions_expected_error(self):
@extensions.expected_errors(404)
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_extensions_expected_error_from_list(self):
@extensions.expected_errors((404, 403))
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_extensions_unexpected_error(self):
@extensions.expected_errors(404)
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_extensions_unexpected_error_from_list(self):
@extensions.expected_errors((404, 413))
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_extensions_unexpected_policy_not_authorized_error(self):
@extensions.expected_errors(404)
def fake_func():
raise exception.PolicyNotAuthorized(action="foo")
self.assertRaises(exception.PolicyNotAuthorized, fake_func)
|
mydongistiny/external_chromium_org
|
refs/heads/benzo
|
third_party/cython/src/Cython/Debugger/libpython.py
|
101
|
#!/usr/bin/python
# NOTE: this file is taken from the Python source distribution
# It can be found under Tools/gdb/libpython.py. It is shipped with Cython
# because it's not installed as a python module, and because changes are only
# merged into new python versions (v3.2+).
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyStringObject* instances, we can
generate a proxy value within the gdb process that is a list of strings:
["foo", "bar", "baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
from __future__ import with_statement
import os
import re
import sys
import struct
import locale
import atexit
import warnings
import tempfile
import textwrap
import itertools
import gdb
if sys.version_info[0] < 3:
# I think this is the only way to fix this bug :'(
# http://sourceware.org/bugzilla/show_bug.cgi?id=12285
out, err = sys.stdout, sys.stderr
reload(sys).setdefaultencoding('UTF-8')
sys.stdout = out
sys.stderr = err
# Look up the gdb.Type for some standard types:
_type_char_ptr = gdb.lookup_type('char').pointer() # char*
_type_unsigned_char_ptr = gdb.lookup_type('unsigned char').pointer()
_type_void_ptr = gdb.lookup_type('void').pointer() # void*
SIZEOF_VOID_P = _type_void_ptr.sizeof
Py_TPFLAGS_HEAPTYPE = (1L << 9)
Py_TPFLAGS_INT_SUBCLASS = (1L << 23)
Py_TPFLAGS_LONG_SUBCLASS = (1L << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1L << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1L << 26)
Py_TPFLAGS_STRING_SUBCLASS = (1L << 27)
Py_TPFLAGS_BYTES_SUBCLASS = (1L << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1L << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1L << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1L << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1L << 31)
MAX_OUTPUT_LEN = 1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given a integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(val))
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to cStringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
# pretty printer lookup
all_pretty_typenames = set()
class PrettyPrinterTrackerMeta(type):
def __init__(self, name, bases, dict):
super(PrettyPrinterTrackerMeta, self).__init__(name, bases, dict)
all_pretty_typenames.add(self._typename)
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's a either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyStringObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
__metaclass__ = PrettyPrinterTrackerMeta
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print 'tp_flags = 0x%08x' % tp_flags
#print 'tp_name = %r' % tp_name
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'instance': PyInstanceObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & (Py_TPFLAGS_HEAPTYPE|Py_TPFLAGS_TYPE_SUBCLASS):
return PyTypeObjectPtr
if tp_flags & Py_TPFLAGS_INT_SUBCLASS:
return PyIntObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_STRING_SUBCLASS:
try:
gdb.lookup_type('PyBytesObject')
return PyBytesObjectPtr
except RuntimeError:
return PyStringObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError, exc:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by old-style and new-style classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(SIZEOF_VOID_P - 1)
) & ~(SIZEOF_VOID_P - 1)
).cast(gdb.lookup_type('size_t'))
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % SIZEOF_VOID_P == 0
dictptr = self._gdbval.cast(_type_char_ptr) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
try:
tp_name = self.field('tp_name').string()
except RuntimeError:
tp_name = 'unknown'
out.write('<type %s at remote 0x%x>' % (tp_name,
self.as_address()))
# pyop_attrdict = self.get_attr_dict()
# _write_instance_repr(out, visited,
# self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analagous to dict.iteritems()
'''
for i in safe_range(self.field('ma_mask') + 1):
ep = self.field('ma_table') + i
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
class PyInstanceObjectPtr(PyObjectPtr):
_typename = 'PyInstanceObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
in_dict = self.pyop_field('in_dict').proxyval(visited)
# Old-style class:
return InstanceProxy(cl_name, in_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
# Old-style class:
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
pyop_in_dict = self.pyop_field('in_dict')
_write_instance_repr(out, visited,
cl_name, pyop_in_dict, self.as_address())
class PyIntObjectPtr(PyObjectPtr):
_typename = 'PyIntObject'
def proxyval(self, visited):
result = int_from_int(self.field('ob_ival'))
return result
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0L
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15L
else:
SHIFT = 30L
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
_typename = 'PyBoolObject'
def proxyval(self, visited):
castto = gdb.lookup_type('PyLongObject').pointer()
self._gdbval = self._gdbval.cast(castto)
return bool(PyLongObjectPtr(self._gdbval).proxyval(visited))
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
#except ValueError:
# return self.f_lineno
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
with open(os_fsencode(filename), 'r') as f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = []
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
key_proxy = PyObjectPtr.from_pyobject_ptr(key).proxyval(visited)
if key_proxy != '<dummy key>':
members.append(key_proxy)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
pyop_key = PyObjectPtr.from_pyobject_ptr(key)
key_proxy = pyop_key.proxyval(visited) # FIXME!
if key_proxy != '<dummy key>':
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
return ''.join(struct.pack('b', field_ob_sval[i])
for i in safe_range(field_ob_size))
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited, py3=True):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
if py3:
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyStringObjectPtr(PyBytesObjectPtr):
_typename = 'PyStringObject'
def write_repr(self, out, visited):
return super(PyStringObjectPtr, self).write_repr(out, visited, py3=False)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
# From unicodeobject.h:
# Py_ssize_t length; /* Length of raw Unicode data in buffer */
# Py_UNICODE *str; /* Raw Unicode buffer */
field_length = long(self.field('length'))
field_str = self.field('str')
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-2 or UCS-4 code points:
if self.char_width() > 2:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([_unichr(ucs) for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
try:
gdb.parse_and_eval('PyString_Type')
except RuntimeError:
# Python 3, don't write 'u' as prefix
pass
else:
# Python 2, write the 'u'
out.write('u')
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
def __unicode__(self):
return self.proxyval(set())
def __str__(self):
# In Python 3, everything is unicode (including attributes of e.g.
# code objects, such as function names). The Python 2 debugger code
# uses PyUnicodePtr objects to format strings etc, whereas with a
# Python 2 debuggee we'd get PyStringObjectPtr instances with __str__.
# Be compatible with that.
return unicode(self).encode('UTF-8')
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
if str(type) in all_pretty_typenames:
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj == None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False
def read_var(self, varname):
"""
read_var with respect to code blocks (gdbframe.read_var works with
respect to the most recent block)
Apparently this function doesn't work, though, as it seems to read
variables in other frames also sometimes.
"""
block = self._gdbframe.block()
var = None
while block and var is None:
try:
var = self._gdbframe.read_var(varname, block)
except ValueError:
pass
block = block.superblock
return var
def get_pyop(self):
try:
# self.read_var does not always work properly, so select our frame
# and restore the previously selected frame
selected_frame = gdb.selected_frame()
self._gdbframe.select()
f = gdb.parse_and_eval('f')
selected_frame.select()
except RuntimeError:
return None
else:
return PyFrameObjectPtr.from_pyobject_ptr(f)
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
sys.stdout.write(pyop.current_line())
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
sys.stdout.write('#%i\n' % self.get_index())
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop = frame.get_pyop()
if not pyop:
print 'Unable to read information on python frame'
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
with open(os_fsencode(filename), 'r') as f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_evalframeex():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print 'Unable to find an older python frame'
else:
print 'Unable to find a newer python frame'
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_evalframeex():
frame.print_summary()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print ('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print '%r not found' % name
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
namespace = self.get_namespace(pyop_frame)
namespace = [(name.proxyval(set()), val) for name, val in namespace]
if namespace:
name, val = max(namespace, key=lambda (name, val): len(name))
max_name_length = len(name)
for name, pyop_value in namespace:
value = pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)
print ('%-*s = %s' % (max_name_length, name, value))
def get_namespace(self, pyop_frame):
return pyop_frame.iter_locals()
class PyGlobals(PyLocals):
'List all the globals in the currently select Python frame'
def get_namespace(self, pyop_frame):
return pyop_frame.iter_globals()
PyLocals("py-locals", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
PyGlobals("py-globals", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
class PyNameEquals(gdb.Function):
def _get_pycurframe_attr(self, attr):
frame = Frame(gdb.selected_frame())
if frame.is_evalframeex():
pyframe = frame.get_pyop()
if pyframe is None:
warnings.warn("Use a Python debug build, Python breakpoints "
"won't work otherwise.")
return None
return getattr(pyframe, attr).proxyval(set())
return None
def invoke(self, funcname):
attr = self._get_pycurframe_attr('co_name')
return attr is not None and attr == funcname.string()
PyNameEquals("pyname_equals")
class PyModEquals(PyNameEquals):
def invoke(self, modname):
attr = self._get_pycurframe_attr('co_filename')
if attr is not None:
filename, ext = os.path.splitext(os.path.basename(attr))
return filename == modname.string()
return False
PyModEquals("pymod_equals")
class PyBreak(gdb.Command):
"""
Set a Python breakpoint. Examples:
Break on any function or method named 'func' in module 'modname'
py-break modname.func
Break on any function or method named 'func'
py-break func
"""
def invoke(self, funcname, from_tty):
if '.' in funcname:
modname, dot, funcname = funcname.rpartition('.')
cond = '$pyname_equals("%s") && $pymod_equals("%s")' % (funcname,
modname)
else:
cond = '$pyname_equals("%s")' % funcname
gdb.execute('break PyEval_EvalFrameEx if ' + cond)
PyBreak("py-break", gdb.COMMAND_RUNNING, gdb.COMPLETE_NONE)
class _LoggingState(object):
"""
State that helps to provide a reentrant gdb.execute() function.
"""
def __init__(self):
self.fd, self.filename = tempfile.mkstemp()
self.file = os.fdopen(self.fd, 'r+')
_execute("set logging file %s" % self.filename)
self.file_position_stack = []
atexit.register(os.close, self.fd)
atexit.register(os.remove, self.filename)
def __enter__(self):
if not self.file_position_stack:
_execute("set logging redirect on")
_execute("set logging on")
_execute("set pagination off")
self.file_position_stack.append(os.fstat(self.fd).st_size)
return self
def getoutput(self):
gdb.flush()
self.file.seek(self.file_position_stack[-1])
result = self.file.read()
return result
def __exit__(self, exc_type, exc_val, tb):
startpos = self.file_position_stack.pop()
self.file.seek(startpos)
self.file.truncate()
if not self.file_position_stack:
_execute("set logging off")
_execute("set logging redirect off")
_execute("set pagination on")
def execute(command, from_tty=False, to_string=False):
"""
Replace gdb.execute() with this function and have it accept a 'to_string'
argument (new in 7.2). Have it properly capture stderr also. Ensure
reentrancy.
"""
if to_string:
with _logging_state as state:
_execute(command, from_tty)
return state.getoutput()
else:
_execute(command, from_tty)
_execute = gdb.execute
gdb.execute = execute
_logging_state = _LoggingState()
def get_selected_inferior():
"""
Return the selected inferior in gdb.
"""
# Woooh, another bug in gdb! Is there an end in sight?
# http://sourceware.org/bugzilla/show_bug.cgi?id=12212
return gdb.inferiors()[0]
selected_thread = gdb.selected_thread()
for inferior in gdb.inferiors():
for thread in inferior.threads():
if thread == selected_thread:
return inferior
def source_gdb_script(script_contents, to_string=False):
"""
Source a gdb script with script_contents passed as a string. This is useful
to provide defines for py-step and py-next to make them repeatable (this is
not possible with gdb.execute()). See
http://sourceware.org/bugzilla/show_bug.cgi?id=12216
"""
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write(script_contents)
f.close()
gdb.execute("source %s" % filename, to_string=to_string)
os.remove(filename)
def register_defines():
source_gdb_script(textwrap.dedent("""\
define py-step
-py-step
end
define py-next
-py-next
end
document py-step
%s
end
document py-next
%s
end
""") % (PyStep.__doc__, PyNext.__doc__))
def stackdepth(frame):
"Tells the stackdepth of a gdb frame."
depth = 0
while frame:
frame = frame.older()
depth += 1
return depth
class ExecutionControlCommandBase(gdb.Command):
"""
Superclass for language specific execution control. Language specific
features should be implemented by lang_info using the LanguageInfo
interface. 'name' is the name of the command.
"""
def __init__(self, name, lang_info):
super(ExecutionControlCommandBase, self).__init__(
name, gdb.COMMAND_RUNNING, gdb.COMPLETE_NONE)
self.lang_info = lang_info
def install_breakpoints(self):
all_locations = itertools.chain(
self.lang_info.static_break_functions(),
self.lang_info.runtime_break_functions())
for location in all_locations:
result = gdb.execute('break %s' % location, to_string=True)
yield re.search(r'Breakpoint (\d+)', result).group(1)
def delete_breakpoints(self, breakpoint_list):
for bp in breakpoint_list:
gdb.execute("delete %s" % bp)
def filter_output(self, result):
reflags = re.MULTILINE
output_on_halt = [
(r'^Program received signal .*', reflags|re.DOTALL),
(r'.*[Ww]arning.*', 0),
(r'^Program exited .*', reflags),
]
output_always = [
# output when halting on a watchpoint
(r'^(Old|New) value = .*', reflags),
# output from the 'display' command
(r'^\d+: \w+ = .*', reflags),
]
def filter_output(regexes):
output = []
for regex, flags in regexes:
for match in re.finditer(regex, result, flags):
output.append(match.group(0))
return '\n'.join(output)
# Filter the return value output of the 'finish' command
match_finish = re.search(r'^Value returned is \$\d+ = (.*)', result,
re.MULTILINE)
if match_finish:
finish_output = 'Value returned: %s\n' % match_finish.group(1)
else:
finish_output = ''
return (filter_output(output_on_halt),
finish_output + filter_output(output_always))
def stopped(self):
return get_selected_inferior().pid == 0
def finish_executing(self, result):
"""
After doing some kind of code running in the inferior, print the line
of source code or the result of the last executed gdb command (passed
in as the `result` argument).
"""
output_on_halt, output_always = self.filter_output(result)
if self.stopped():
print output_always
print output_on_halt
else:
frame = gdb.selected_frame()
source_line = self.lang_info.get_source_line(frame)
if self.lang_info.is_relevant_function(frame):
raised_exception = self.lang_info.exc_info(frame)
if raised_exception:
print raised_exception
if source_line:
if output_always.rstrip():
print output_always.rstrip()
print source_line
else:
print result
def _finish(self):
"""
Execute until the function returns (or until something else makes it
stop)
"""
if gdb.selected_frame().older() is not None:
return gdb.execute('finish', to_string=True)
else:
# outermost frame, continue
return gdb.execute('cont', to_string=True)
def _finish_frame(self):
"""
Execute until the function returns to a relevant caller.
"""
while True:
result = self._finish()
try:
frame = gdb.selected_frame()
except RuntimeError:
break
hitbp = re.search(r'Breakpoint (\d+)', result)
is_relevant = self.lang_info.is_relevant_function(frame)
if hitbp or is_relevant or self.stopped():
break
return result
def finish(self, *args):
"Implements the finish command."
result = self._finish_frame()
self.finish_executing(result)
def step(self, stepinto, stepover_command='next'):
"""
Do a single step or step-over. Returns the result of the last gdb
command that made execution stop.
This implementation, for stepping, sets (conditional) breakpoints for
all functions that are deemed relevant. It then does a step over until
either something halts execution, or until the next line is reached.
If, however, stepover_command is given, it should be a string gdb
command that continues execution in some way. The idea is that the
caller has set a (conditional) breakpoint or watchpoint that can work
more efficiently than the step-over loop. For Python this means setting
a watchpoint for f->f_lasti, which means we can then subsequently
"finish" frames.
We want f->f_lasti instead of f->f_lineno, because the latter only
works properly with local trace functions, see
PyFrameObjectPtr.current_line_num and PyFrameObjectPtr.addr2line.
"""
if stepinto:
breakpoint_list = list(self.install_breakpoints())
beginframe = gdb.selected_frame()
if self.lang_info.is_relevant_function(beginframe):
# If we start in a relevant frame, initialize stuff properly. If
# we don't start in a relevant frame, the loop will halt
# immediately. So don't call self.lang_info.lineno() as it may
# raise for irrelevant frames.
beginline = self.lang_info.lineno(beginframe)
if not stepinto:
depth = stackdepth(beginframe)
newframe = beginframe
while True:
if self.lang_info.is_relevant_function(newframe):
result = gdb.execute(stepover_command, to_string=True)
else:
result = self._finish_frame()
if self.stopped():
break
newframe = gdb.selected_frame()
is_relevant_function = self.lang_info.is_relevant_function(newframe)
try:
framename = newframe.name()
except RuntimeError:
framename = None
m = re.search(r'Breakpoint (\d+)', result)
if m:
if is_relevant_function and m.group(1) in breakpoint_list:
# although we hit a breakpoint, we still need to check
# that the function, in case hit by a runtime breakpoint,
# is in the right context
break
if newframe != beginframe:
# new function
if not stepinto:
# see if we returned to the caller
newdepth = stackdepth(newframe)
is_relevant_function = (newdepth < depth and
is_relevant_function)
if is_relevant_function:
break
else:
# newframe equals beginframe, check for a difference in the
# line number
lineno = self.lang_info.lineno(newframe)
if lineno and lineno != beginline:
break
if stepinto:
self.delete_breakpoints(breakpoint_list)
self.finish_executing(result)
def run(self, args, from_tty):
self.finish_executing(gdb.execute('run ' + args, to_string=True))
def cont(self, *args):
self.finish_executing(gdb.execute('cont', to_string=True))
class LanguageInfo(object):
"""
This class defines the interface that ExecutionControlCommandBase needs to
provide language-specific execution control.
Classes that implement this interface should implement:
lineno(frame)
Tells the current line number (only called for a relevant frame).
If lineno is a false value it is not checked for a difference.
is_relevant_function(frame)
tells whether we care about frame 'frame'
get_source_line(frame)
get the line of source code for the current line (only called for a
relevant frame). If the source code cannot be retrieved this
function should return None
exc_info(frame) -- optional
tells whether an exception was raised, if so, it should return a
string representation of the exception value, None otherwise.
static_break_functions()
returns an iterable of function names that are considered relevant
and should halt step-into execution. This is needed to provide a
performing step-into
runtime_break_functions() -- optional
list of functions that we should break into depending on the
context
"""
def exc_info(self, frame):
"See this class' docstring."
def runtime_break_functions(self):
"""
Implement this if the list of step-into functions depends on the
context.
"""
return ()
class PythonInfo(LanguageInfo):
def pyframe(self, frame):
pyframe = Frame(frame).get_pyop()
if pyframe:
return pyframe
else:
raise gdb.RuntimeError(
"Unable to find the Python frame, run your code with a debug "
"build (configure with --with-pydebug or compile with -g).")
def lineno(self, frame):
return self.pyframe(frame).current_line_num()
def is_relevant_function(self, frame):
return Frame(frame).is_evalframeex()
def get_source_line(self, frame):
try:
pyframe = self.pyframe(frame)
return '%4d %s' % (pyframe.current_line_num(),
pyframe.current_line().rstrip())
except IOError, e:
return None
def exc_info(self, frame):
try:
tstate = frame.read_var('tstate').dereference()
if gdb.parse_and_eval('tstate->frame == f'):
# tstate local variable initialized, check for an exception
inf_type = tstate['curexc_type']
inf_value = tstate['curexc_value']
if inf_type:
return 'An exception was raised: %s' % (inf_value,)
except (ValueError, RuntimeError), e:
# Could not read the variable tstate or it's memory, it's ok
pass
def static_break_functions(self):
yield 'PyEval_EvalFrameEx'
class PythonStepperMixin(object):
"""
Make this a mixin so CyStep can also inherit from this and use a
CythonCodeStepper at the same time.
"""
def python_step(self, stepinto):
"""
Set a watchpoint on the Python bytecode instruction pointer and try
to finish the frame
"""
output = gdb.execute('watch f->f_lasti', to_string=True)
watchpoint = int(re.search(r'[Ww]atchpoint (\d+):', output).group(1))
self.step(stepinto=stepinto, stepover_command='finish')
gdb.execute('delete %s' % watchpoint)
class PyStep(ExecutionControlCommandBase, PythonStepperMixin):
"Step through Python code."
stepinto = True
def invoke(self, args, from_tty):
self.python_step(stepinto=self.stepinto)
class PyNext(PyStep):
"Step-over Python code."
stepinto = False
class PyFinish(ExecutionControlCommandBase):
"Execute until function returns to a caller."
invoke = ExecutionControlCommandBase.finish
class PyRun(ExecutionControlCommandBase):
"Run the program."
invoke = ExecutionControlCommandBase.run
class PyCont(ExecutionControlCommandBase):
invoke = ExecutionControlCommandBase.cont
def _pointervalue(gdbval):
"""
Return the value of the pionter as a Python int.
gdbval.type must be a pointer type
"""
# don't convert with int() as it will raise a RuntimeError
if gdbval.address is not None:
return long(gdbval.address)
else:
# the address attribute is None sometimes, in which case we can
# still convert the pointer to an int
return long(gdbval)
def pointervalue(gdbval):
pointer = _pointervalue(gdbval)
try:
if pointer < 0:
raise gdb.GdbError("Negative pointer value, presumably a bug "
"in gdb, aborting.")
except RuntimeError:
# work around yet another bug in gdb where you get random behaviour
# and tracebacks
pass
return pointer
def get_inferior_unicode_postfix():
try:
gdb.parse_and_eval('PyUnicode_FromEncodedObject')
except RuntimeError:
try:
gdb.parse_and_eval('PyUnicodeUCS2_FromEncodedObject')
except RuntimeError:
return 'UCS4'
else:
return 'UCS2'
else:
return ''
class PythonCodeExecutor(object):
Py_single_input = 256
Py_file_input = 257
Py_eval_input = 258
def malloc(self, size):
chunk = (gdb.parse_and_eval("(void *) malloc((size_t) %d)" % size))
pointer = pointervalue(chunk)
if pointer == 0:
raise gdb.GdbError("No memory could be allocated in the inferior.")
return pointer
def alloc_string(self, string):
pointer = self.malloc(len(string))
get_selected_inferior().write_memory(pointer, string)
return pointer
def alloc_pystring(self, string):
stringp = self.alloc_string(string)
PyString_FromStringAndSize = 'PyString_FromStringAndSize'
try:
gdb.parse_and_eval(PyString_FromStringAndSize)
except RuntimeError:
# Python 3
PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' %
(get_inferior_unicode_postfix(),))
try:
result = gdb.parse_and_eval(
'(PyObject *) %s((char *) %d, (size_t) %d)' % (
PyString_FromStringAndSize, stringp, len(string)))
finally:
self.free(stringp)
pointer = pointervalue(result)
if pointer == 0:
raise gdb.GdbError("Unable to allocate Python string in "
"the inferior.")
return pointer
def free(self, pointer):
gdb.parse_and_eval("free((void *) %d)" % pointer)
def incref(self, pointer):
"Increment the reference count of a Python object in the inferior."
gdb.parse_and_eval('Py_IncRef((PyObject *) %d)' % pointer)
def xdecref(self, pointer):
"Decrement the reference count of a Python object in the inferior."
# Py_DecRef is like Py_XDECREF, but a function. So we don't have
# to check for NULL. This should also decref all our allocated
# Python strings.
gdb.parse_and_eval('Py_DecRef((PyObject *) %d)' % pointer)
def evalcode(self, code, input_type, global_dict=None, local_dict=None):
"""
Evaluate python code `code` given as a string in the inferior and
return the result as a gdb.Value. Returns a new reference in the
inferior.
Of course, executing any code in the inferior may be dangerous and may
leave the debuggee in an unsafe state or terminate it alltogether.
"""
if '\0' in code:
raise gdb.GdbError("String contains NUL byte.")
code += '\0'
pointer = self.alloc_string(code)
globalsp = pointervalue(global_dict)
localsp = pointervalue(local_dict)
if globalsp == 0 or localsp == 0:
raise gdb.GdbError("Unable to obtain or create locals or globals.")
code = """
PyRun_String(
(char *) %(code)d,
(int) %(start)d,
(PyObject *) %(globals)s,
(PyObject *) %(locals)d)
""" % dict(code=pointer, start=input_type,
globals=globalsp, locals=localsp)
with FetchAndRestoreError():
try:
pyobject_return_value = gdb.parse_and_eval(code)
finally:
self.free(pointer)
return pyobject_return_value
class FetchAndRestoreError(PythonCodeExecutor):
"""
Context manager that fetches the error indicator in the inferior and
restores it on exit.
"""
def __init__(self):
self.sizeof_PyObjectPtr = gdb.lookup_type('PyObject').pointer().sizeof
self.pointer = self.malloc(self.sizeof_PyObjectPtr * 3)
type = self.pointer
value = self.pointer + self.sizeof_PyObjectPtr
traceback = self.pointer + self.sizeof_PyObjectPtr * 2
self.errstate = type, value, traceback
def __enter__(self):
gdb.parse_and_eval("PyErr_Fetch(%d, %d, %d)" % self.errstate)
def __exit__(self, *args):
if gdb.parse_and_eval("(int) PyErr_Occurred()"):
gdb.parse_and_eval("PyErr_Print()")
pyerr_restore = ("PyErr_Restore("
"(PyObject *) *%d,"
"(PyObject *) *%d,"
"(PyObject *) *%d)")
try:
gdb.parse_and_eval(pyerr_restore % self.errstate)
finally:
self.free(self.pointer)
class FixGdbCommand(gdb.Command):
def __init__(self, command, actual_command):
super(FixGdbCommand, self).__init__(command, gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.actual_command = actual_command
def fix_gdb(self):
"""
It seems that invoking either 'cy exec' and 'py-exec' work perfectly
fine, but after this gdb's python API is entirely broken.
Maybe some uncleared exception value is still set?
sys.exc_clear() didn't help. A demonstration:
(gdb) cy exec 'hello'
'hello'
(gdb) python gdb.execute('cont')
RuntimeError: Cannot convert value to int.
Error while executing Python code.
(gdb) python gdb.execute('cont')
[15148 refs]
Program exited normally.
"""
warnings.filterwarnings('ignore', r'.*', RuntimeWarning,
re.escape(__name__))
try:
long(gdb.parse_and_eval("(void *) 0")) == 0
except RuntimeError:
pass
# warnings.resetwarnings()
def invoke(self, args, from_tty):
self.fix_gdb()
try:
gdb.execute('%s %s' % (self.actual_command, args))
except RuntimeError, e:
raise gdb.GdbError(str(e))
self.fix_gdb()
def _evalcode_python(executor, code, input_type):
"""
Execute Python code in the most recent stack frame.
"""
global_dict = gdb.parse_and_eval('PyEval_GetGlobals()')
local_dict = gdb.parse_and_eval('PyEval_GetLocals()')
if (pointervalue(global_dict) == 0 or pointervalue(local_dict) == 0):
raise gdb.GdbError("Unable to find the locals or globals of the "
"most recent Python function (relative to the "
"selected frame).")
return executor.evalcode(code, input_type, global_dict, local_dict)
class PyExec(gdb.Command):
def readcode(self, expr):
if expr:
return expr, PythonCodeExecutor.Py_single_input
else:
lines = []
while True:
try:
line = raw_input('>')
except EOFError:
break
else:
if line.rstrip() == 'end':
break
lines.append(line)
return '\n'.join(lines), PythonCodeExecutor.Py_file_input
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
executor = PythonCodeExecutor()
executor.xdecref(_evalcode_python(executor, input_type, global_dict,
local_dict))
gdb.execute('set breakpoint pending on')
if hasattr(gdb, 'GdbError'):
# Wrap py-step and py-next in gdb defines to make them repeatable.
py_step = PyStep('-py-step', PythonInfo())
py_next = PyNext('-py-next', PythonInfo())
register_defines()
py_finish = PyFinish('py-finish', PythonInfo())
py_run = PyRun('py-run', PythonInfo())
py_cont = PyCont('py-cont', PythonInfo())
py_exec = FixGdbCommand('py-exec', '-py-exec')
_py_exec = PyExec("-py-exec", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
else:
warnings.warn("Use gdb 7.2 or higher to use the py-exec command.")
|
gymnasium/edx-platform
|
refs/heads/open-release/hawthorn.master
|
lms/djangoapps/courseware/tests/test_course_tools.py
|
13
|
"""
Unit tests for course tools.
"""
import crum
import datetime
from mock import patch
from nose.plugins.attrib import attr
import pytz
from django.test import RequestFactory
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from courseware.course_tools import VerifiedUpgradeTool
from courseware.models import DynamicUpgradeDeadlineConfiguration
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.schedules.config import CREATE_SCHEDULE_WAFFLE_FLAG
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=3)
class VerifiedUpgradeToolTest(SharedModuleStoreTestCase):
@classmethod
def setUpClass(cls):
super(VerifiedUpgradeToolTest, cls).setUpClass()
cls.now = datetime.datetime.now(pytz.UTC)
cls.course = CourseFactory.create(
org='edX',
number='test',
display_name='Test Course',
self_paced=True,
start=cls.now - datetime.timedelta(days=30),
)
cls.course_overview = CourseOverview.get_from_id(cls.course.id)
@override_waffle_flag(CREATE_SCHEDULE_WAFFLE_FLAG, True)
def setUp(self):
super(VerifiedUpgradeToolTest, self).setUp()
self.course_verified_mode = CourseModeFactory(
course_id=self.course.id,
mode_slug=CourseMode.VERIFIED,
expiration_datetime=self.now + datetime.timedelta(days=30),
)
patcher = patch('openedx.core.djangoapps.schedules.signals.get_current_site')
mock_get_current_site = patcher.start()
self.addCleanup(patcher.stop)
mock_get_current_site.return_value = SiteFactory.create()
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
self.enrollment = CourseEnrollmentFactory(
course_id=self.course.id,
mode=CourseMode.AUDIT,
course=self.course_overview,
)
self.request = RequestFactory().request()
self.request.user = self.enrollment.user
crum.set_current_request(self.request)
def test_tool_visible(self):
self.assertTrue(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
def test_not_visible_when_no_enrollment_exists(self):
self.enrollment.delete()
request = RequestFactory().request()
request.user = UserFactory()
self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
def test_not_visible_when_using_deadline_from_course_mode(self):
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=False)
self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
def test_not_visible_when_enrollment_is_inactive(self):
self.enrollment.is_active = False
self.enrollment.save()
self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
def test_not_visible_when_already_verified(self):
self.enrollment.mode = CourseMode.VERIFIED
self.enrollment.save()
self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
def test_not_visible_when_no_verified_track(self):
self.course_verified_mode.delete()
self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
def test_not_visible_when_course_deadline_has_passed(self):
self.course_verified_mode.expiration_datetime = self.now - datetime.timedelta(days=1)
self.course_verified_mode.save()
self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
def test_not_visible_when_course_mode_has_no_deadline(self):
self.course_verified_mode.expiration_datetime = None
self.course_verified_mode.save()
self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
|
rlowrance/python_lib
|
refs/heads/master
|
applied_data_science3/dirutility.py
|
1
|
'''utilities for managing directories
Copyright 2017 Roy E. Lowrance
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on as "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing premission and
limitation under the license.
'''
import os
import pdb
import string
import unittest
def assure_exists(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path) # make all intermediate directories
return dir_path
def _make_invalid_characters():
'return translation table suitable for s.translate()'
bad_linux = '/\x00'
# bad_macos = ':/'
bad_ntfs = '"*:<>?\|' # plus 0x00 - 0x1F plus 0x7F
bad_os = bad_ntfs + bad_linux # bad_macos is in these two
low_codes = '\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15'
assert len(low_codes) == 15, low_codes
high_code = '\x7f'
return bad_os + low_codes + high_code
_invalid_characters = _make_invalid_characters()
def always_valid_filename(s, replacement_char='-'):
'''return a new string that is acceeptable to Linux, MacOS, and Linux
ref: https://en.wikipedia.org/wiki/Filename
'''
table = string.maketrans(_invalid_characters, replacement_char * len(_invalid_characters))
return s.translate(table)
class Test_always_valid_filename(unittest.TestCase):
def test1(self):
s = 'abc\x03/:"*<>?\\|'
result = always_valid_filename(s)
self.assertEqual('abc----------', result)
if __name__ == '__main__':
if False:
pdb
unittest.main()
|
andersbll/deeppy-website
|
refs/heads/gh-pages
|
_downloads/mlp_mnist.py
|
5
|
#!/usr/bin/env python
"""
Digit classification
====================
"""
import numpy as np
import matplotlib.pyplot as plt
import deeppy as dp
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
test_input = dp.Input(x_test)
# Setup network
weight_gain = 2.0
weight_decay = 0.0005
net = dp.NeuralNetwork(
layers=[
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(weight_gain),
weight_decay=weight_decay),
),
dp.Activation('relu'),
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(weight_gain),
weight_decay=weight_decay),
),
dp.Activation('relu'),
dp.FullyConnected(
n_out=dataset.n_classes,
weights=dp.Parameter(dp.AutoFiller()),
),
],
loss=dp.SoftmaxCrossEntropy(),
)
# Train network
n_epochs = [50, 15]
learn_rate = 0.05
for i, epochs in enumerate(n_epochs):
trainer = dp.StochasticGradientDescent(
max_epochs=epochs,
learn_rule=dp.Momentum(learn_rate=learn_rate/10**i, momentum=0.94),
)
trainer.train(net, train_input)
# Evaluate on test data
error = np.mean(net.predict(test_input) != y_test)
print('Test error rate: %.4f' % error)
# Plot dataset examples
def plot_img(img, title):
plt.figure()
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.axis('off')
plt.title(title)
plt.tight_layout()
imgs = np.reshape(x_train[:63, ...], (-1, 28, 28))
plot_img(dp.misc.img_tile(dp.misc.img_stretch(imgs)),
'Dataset examples')
# Plot learned features in first layer
w = np.array(net.layers[0].weights.array)
w = np.reshape(w.T, (-1,) + dataset.img_shape)
w = w[np.argsort(np.std(w, axis=(1, 2)))[-64:]]
plot_img(dp.misc.img_tile(dp.misc.img_stretch(w)),
'Examples of features learned')
|
nsh87/medicare-claims-query-api
|
refs/heads/master
|
db/data_loader.py
|
1
|
"""Load the CMS 2008-2010 Medicare Beneficiary Summary tables into Postgres.
See https://www.cms.gov/Research-Statistics-Data-and-Systems/Downloadable-Public-Use-Files/SynPUFs/DE_Syn_PUF.html
for more info on the data.
This file is intended to be run during Vagrant or AWS provisioning.
See https://github.com/nsh87/medicare-claims-query-api for more info on setting
this up in your own environment.
Table "public.beneficiary_sample_2010"
Column | Type | Modifiers
----------------------------------------+----------------------+-----------
id | character(16) |
dob | date |
dod | date |
sex | sex |
race | race |
end_stage_renal_disease | boolean |
state | character varying(4) |
county_code | integer |
part_a_coverage_months | integer |
part_b_coverage_months | integer |
hmo_coverage_months | integer |
part_d_coverage_months | integer |
alzheimers_related_senile | boolean |
heart_failure | boolean |
chronic_kidney | boolean |
cancer | boolean |
chronic_obstructive_pulmonary | boolean |
depression | boolean |
diabetes | boolean |
ischemic_heart | boolean |
osteoporosis | boolean |
rheumatoid_osteo_arthritis | boolean |
stroke_ischemic_attack | boolean |
inpatient_reimbursement | integer |
inpatient_beneficiary_responsibility | integer |
inpatient_primary_payer_reimbursement | integer |
outpatient_reimbursement | integer |
outpatient_beneficiary_responsibility | integer |
outpatient_primary_payer_reimbursement | integer |
carrier_reimbursement | integer |
beneficiary_responsibility | integer |
primary_payer_reimbursement | integer |
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import csv
import glob
import io
import os
import sys
import urlparse
import zipfile
import psycopg2
import requests
# Need to append parent dir to path so you can import files in sister dirs
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from db import config as dbconfig
from core.utilities import cursor_connect
TABLE_NAME = dbconfig.db_tablename
# Parse arguments
argparser = argparse.ArgumentParser(
description="Load synthetic CMS 2010 summary beneficiary data into "
"Postgres.",
epilog="example: python data_loader.py --host localhost --dbname Nikhil "
"--user Nikhil")
argparser.add_argument("--host", required=True, help="location of database")
argparser.add_argument("--dbname", required=True, help="name of database")
argparser.add_argument("--user", required=True, help="user to access database")
argparser.add_argument("--password", required=False, help="password to connect")
args = argparser.parse_args()
# Declare URLs of CSV files to download
base_url = (
"https://www.cms.gov/Research-Statistics-Data-and-Systems/Downloadable"
"-Public-Use-Files/SynPUFs/Downloads/some_file.zip"
)
# Prep base filename, with 'XX' to be replaced by a two-digit number indicating
# which file to download.
base_filename = "DE1_0_2010_Beneficiary_Summary_File_Sample_XX.zip"
DATA_FILES = [
urlparse.urljoin(base_url, base_filename.replace('XX', '{0}').format(i))
for i in range(1, 21)]
def download_zip(uri):
"""
Download an zipped data file and return the unzipped file.
Parameters
----------
uri : str, unicode
The URI for the .zip file.
Returns
-------
zipfile.ZipExtFile
A file-like object holding the file contents. This should be read like
any other file, with one of `read()`, `readline()`, or `readlines()`
methods::
for line in f.readlines():
print line
"""
r = requests.get(uri)
if r.status_code == requests.codes.ok:
z = zipfile.ZipFile(io.BytesIO(r.content))
csv_file = z.namelist()[0]
f = z.open(csv_file)
else:
raise ValueError(
"Failed to get {0}. "
"Returned status code {1}.".format(uri, r.status_code))
return f
def drop_table():
"""
Drop the table specified by TABLE_NAME.
"""
con, cur = cursor_connect(db_dsn)
try:
sql = "DROP TABLE IF EXISTS {0};".format(TABLE_NAME)
cur.execute(sql)
except psycopg2.Error:
raise
else:
con.commit()
cur.close()
con.close()
def create_table():
"""
Create the table given by TABLE_NAME.
"""
con, cur = cursor_connect(db_dsn)
# Create new column types, like factors in R, to hold sex and race.
new_types = [
("CREATE TYPE sex AS ENUM ('male', 'female');",),
("CREATE TYPE race as ENUM ('white', 'black', 'others', 'hispanic');",),
]
for i, val in enumerate(new_types):
cmd = val[0]
try:
cur.execute(cmd)
except psycopg2.ProgrammingError as e:
# If the types already exist just continue on
if "already exists" in e.message:
con, cur = cursor_connect(db_dsn) # Re-create the connection
else:
cur.close()
con.close()
raise
try:
sql = ("CREATE TABLE {0} ("
"id CHAR(16) UNIQUE, "
"dob CHAR(8), " # These are converted to DATE later
"dod CHAR(8), " # These are converted to DATE later
"sex sex, "
"race race, "
"end_stage_renal_disease BOOLEAN, "
"state VARCHAR(4), "
"county_code INT, "
"part_a_coverage_months INT, "
"part_b_coverage_months INT, "
"hmo_coverage_months INT, "
"part_d_coverage_months INT, "
"alzheimers_related_senile BOOLEAN, "
"heart_failure BOOLEAN, "
"chronic_kidney BOOLEAN, "
"cancer BOOLEAN, "
"chronic_obstructive_pulmonary BOOLEAN, "
"depression BOOLEAN, "
"diabetes BOOLEAN, "
"ischemic_heart BOOLEAN, "
"osteoporosis BOOLEAN, "
"rheumatoid_osteo_arthritis BOOLEAN, "
"stroke_ischemic_attack BOOLEAN, "
"inpatient_reimbursement INT, "
"inpatient_beneficiary_responsibility INT, "
"inpatient_primary_payer_reimbursement INT, "
"outpatient_reimbursement INT, "
"outpatient_beneficiary_responsibility INT, "
"outpatient_primary_payer_reimbursement INT, "
"carrier_reimbursement INT, "
"beneficiary_responsibility INT, "
"primary_payer_reimbursement INT"
");".format(TABLE_NAME))
cur.execute(sql)
except psycopg2.Error:
raise
else:
con.commit()
cur.close()
con.close()
def load_csv(csv_file):
"""
Load data from a CSV file or file-like object into the database.
Parameters
----------
csv_file : str, unicode
A file of file-like object returned from download_zip(). The file must
have both `read()` and `readline()` methods.
"""
con, cur = cursor_connect(db_dsn)
try:
with open(csv_file, 'r') as f:
cur.copy_from(f, TABLE_NAME, sep=',', null='')
except psycopg2.Error:
raise
else:
con.commit()
cur.close()
con.close()
def prep_csv(csv_file):
"""
Modifies the CMS Medicare data to get it ready to load in the DB.
Important modifications are transforming character columns to 0 and 1 for
import into BOOLEAN Postgres columns.
Parameters
----------
csv_file : zipfile.ZipExtFile
A CSV-like object returned from download_zip().
Returns
-------
str
Path to a prepared CSV file on disk.
"""
states = ('AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC',
'FL', 'GA', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY',
'LA', 'ME', 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT',
'NE', 'NV', 'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'OH',
'OK', 'OR', 'PA', '__', 'RI', 'SC', 'SD', 'TN', 'TX',
'UT', 'VT', '__', 'VA', 'WA', 'WV', 'WI', 'WY', 'Othr')
states_map = {}
for i, val in enumerate(states):
states_map[i + 1] = val
prepped_filename = 'prepped_medicare.csv'
reader = csv.reader(csv_file)
with open(prepped_filename, 'a') as f:
writer = csv.writer(f)
for row in reader:
# Transform state
row[6] = states_map[int(row[6])]
# Transform 'Y' for 'yes' into 1, for boolean
if row[5] == 'Y':
row[5] = '1'.encode('ascii')
# Transform sex into factors
sex = {'1': 'male'.encode('ascii'), '2': 'female'.encode('ascii')}
row[3] = sex[row[3]]
# Transform race into factors (note: there is no '4' value...)
race = {
'1': 'white'.encode('ascii'),
'2': 'black'.encode('ascii'),
'3': 'others'.encode('ascii'),
'5': 'hispanic'.encode('ascii')
}
row[4] = race[row[4]]
# Transform 'boolean' 1 and 2 into 0 and 1, for columns 12 - 22
boolean_transform = {
'1': '1'.encode('ascii'),
'2': '0'.encode('ascii')
}
for i in range(12, 23):
row[i] = boolean_transform[row[i]]
# Transform strings to floats to ints
for i in range(23, 32):
row[i] = str(int(float(row[i]))).encode('ascii')
writer.writerow(row)
return prepped_filename
def alter_col_types():
"""
Alter column types of the table to better suit the data.
For example, convert the character-represented-dates to type DATE.
"""
con, cur = cursor_connect(db_dsn)
try:
# Get column names so you can index the 2th and 3th columns
sql = "SELECT * FROM {0} LIMIT 0;".format(TABLE_NAME)
cur.execute(sql)
colnames = [desc[0] for desc in cur.description]
cols = (colnames[1], colnames[2]) # DO-Birth and DO-Death
for col in cols:
sql = """
ALTER TABLE {0} ALTER COLUMN {1} TYPE DATE
USING to_date({1}, 'YYYYMMDD');
""".format(TABLE_NAME, col)
cur.execute(sql)
except psycopg2.Error:
raise
else:
con.commit()
cur.close()
con.close()
def verify_data_load():
"""
Verify that all the data was loaded into the DB.
"""
con, cur = cursor_connect(db_dsn)
try:
sql = "SELECT COUNT(*) FROM {0}".format(TABLE_NAME)
cur.execute(sql)
result = cur.fetchone()
num_rows = result[0]
except psycopg2.Error:
raise
else:
cur.close()
con.close()
expected_row_count = 2255098
if num_rows != expected_row_count:
raise AssertionError("{0} rows in DB. Should be {1}".format(
num_rows, expected_row_count))
print("Data load complete.")
if __name__ == '__main__':
# Create the database's DNS to connect with using psycopg2
db_dsn = "host={0} dbname={1} user={2} password={3}".format(
args.host, args.dbname, args.user, args.password
)
# Delete any orphaned data file that might exist
try:
csv_files = glob.glob('*.csv')
for f in csv_files:
os.remove(f)
except:
pass
# Delete the table and recreate it if it exists
print("Dropping table.")
drop_table()
print("Creating table.")
create_table()
# Download the data and load it into the DB
try:
for uri in DATA_FILES:
print("Downloading {0}".format(uri.split('/')[-1]))
medicare_csv = download_zip(uri)
headers = medicare_csv.readline().replace('"', "").split(",")
print("Downloaded CSV contains {0} headers.".format(len(headers)))
prepped_csv = prep_csv(medicare_csv)
print("Loading data into database '{0}' at '{1}'.".format(
args.dbname, args.host))
load_csv(prepped_csv)
print("Altering columns.")
alter_col_types()
print("Verifying data load.")
verify_data_load()
except:
raise
finally:
try:
print("Deleting temporary data file.")
os.remove(prepped_csv)
except:
pass
|
nuobit/odoo-addons
|
refs/heads/11.0
|
stock_picking_partner_ref/__manifest__.py
|
1
|
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
{
'name': "Stock picking partner reference",
'summary': "Adds a partner reference on pickingg",
'author': 'NuoBiT Solutions, S.L., Eric Antones',
'category': 'Warehouse',
'version': '11.0.0.1.0',
'license': 'AGPL-3',
'website': 'https://github.com/nuobit',
'depends': [
'stock',
],
'data': [
'views/stock_picking_views.xml',
],
'installable': True,
}
|
enthought/etsproxy
|
refs/heads/master
|
enthought/envisage/ui/action/action_set_manager.py
|
1
|
# proxy module
from __future__ import absolute_import
from envisage.ui.action.action_set_manager import *
|
zhuwei136295/project-config
|
refs/heads/master
|
nodepool/scripts/common.py
|
27
|
#!/usr/bin/env python
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
def run_local(cmd, status=False, cwd='.', env={}):
print "Running:", cmd
newenv = os.environ
newenv.update(env)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=cwd,
stderr=subprocess.STDOUT, env=newenv)
(out, nothing) = p.communicate()
if status:
return (p.returncode, out.strip())
return out.strip()
|
sanjeevtripurari/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_itercompat.py
|
569
|
from django.test import TestCase
from .models import Category, Thing
class TestIsIterator(TestCase):
def test_regression(self):
"""This failed on Django 1.5/Py2.6 because category has a next method."""
category = Category.objects.create(name='category')
Thing.objects.create(category=category)
Thing.objects.filter(category=category)
|
smkr/pyclipse
|
refs/heads/master
|
plugins/com.python.pydev.fastparser/tests/pysrc/filetoparse1.py
|
5
|
def GlobalMethod(param1):
'''GlobalMethod docs'''
|
friedrich420/N3-AEL-Kernel-NF1-v5-
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
JonathanStein/odoo
|
refs/heads/8.0
|
addons/account_bank_statement_extensions/res_partner_bank.py
|
381
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class res_partner_bank(osv.osv):
_inherit = 'res.partner.bank'
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = []
if name:
ids = self.search(cr, user, [('acc_number', operator, name)] + args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
js0701/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/script.py
|
23
|
from pkg.core.listener import Listener as listen
|
wkrzemien/DIRAC
|
refs/heads/integration
|
FrameworkSystem/Service/UserProfileManagerHandler.py
|
2
|
########################################################################
# $HeadURL$
########################################################################
""" ProfileManager manages web user profiles
in the DISET framework
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import types
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.DB.UserProfileDB import UserProfileDB
from DIRAC.Core.Security import Properties
gUPDB = False
def initializeUserProfileManagerHandler( serviceInfo ):
global gUPDB
try:
gUPDB = UserProfileDB()
except Exception as e:
return S_ERROR( "Can't initialize UserProfileDB: %s" % str( e ) )
return S_OK()
class UserProfileManagerHandler( RequestHandler ):
types_retrieveProfileVar = [ types.StringTypes, types.StringTypes ]
def export_retrieveProfileVar( self, profileName, varName ):
""" Get profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveVar( userName, userGroup,
userName, userGroup,
profileName, varName )
types_retrieveProfileVarFromUser = [ types.StringTypes, types.StringTypes, types.StringTypes, types.StringTypes ]
def export_retrieveProfileVarFromUser( self, ownerName, ownerGroup, profileName, varName ):
""" Get profile data for web for any user according to perms
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveVar( userName, userGroup,
ownerName, ownerGroup,
profileName, varName )
types_retrieveProfileAllVars = [ types.StringTypes ]
def export_retrieveProfileAllVars( self, profileName ):
""" Get profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveAllUserVars( userName, userGroup, profileName )
types_storeProfileVar = [ types.StringTypes, types.StringTypes, types.StringTypes, types.DictType ]
def export_storeProfileVar( self, profileName, varName, data, perms ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.storeVar( userName, userGroup, profileName, varName, data, perms )
types_deleteProfileVar = [ types.StringTypes, types.StringTypes ]
def export_deleteProfileVar( self, profileName, varName ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.deleteVar( userName, userGroup, profileName, varName )
types_listAvailableProfileVars = [ types.StringTypes ]
def export_listAvailableProfileVars( self, profileName, filterDict = {} ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.listVars( userName, userGroup, profileName, filterDict )
types_getUserProfiles = []
def export_getUserProfiles( self ):
""" Get all profiles for a user
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveUserProfiles( userName, userGroup )
types_setProfileVarPermissions = [ types.StringTypes, types.StringTypes, types.DictType ]
def export_setProfileVarPermissions( self, profileName, varName, perms ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.setUserVarPerms( userName, userGroup, profileName, varName, perms )
types_getProfileVarPermissions = [ types.StringTypes, types.StringTypes ]
def export_getProfileVarPermissions( self, profileName, varName ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveVarPerms( userName, userGroup,
userName, userGroup,
profileName, varName )
types_storeHashTag = [ types.StringTypes ]
def export_storeHashTag( self, tagName ):
""" Set hash tag
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.storeHashTag( userName, userGroup, tagName )
types_retrieveHashTag = [ types.StringTypes ]
def export_retrieveHashTag( self, hashTag ):
""" Get hash tag
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveHashTag( userName, userGroup, hashTag )
types_retrieveAllHashTags = []
def export_retrieveAllHashTags( self ):
""" Get all hash tags
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveAllHashTags( userName, userGroup )
types_deleteProfiles = [ types.ListType ]
def export_deleteProfiles( self, userList ):
"""
Delete profiles for a list of users
"""
credDict = self.getRemoteCredentials()
requesterUserName = credDict[ 'username' ]
if Properties.SERVICE_ADMINISTRATOR in credDict[ 'properties' ]:
admin = True
else:
admin = False
for entry in userList:
userName = entry
if admin or userName == requesterUserName:
result = gUPDB.deleteUserProfile( userName )
if not result[ 'OK' ]:
return result
return S_OK()
types_getUserProfileNames = [types.DictType]
def export_getUserProfileNames( self, permission ):
"""
it returns the available profile names by not taking account the permission: ReadAccess and PublishAccess
"""
return gUPDB.getUserProfileNames( permission )
|
Beauhurst/django
|
refs/heads/master
|
tests/forms_tests/views.py
|
452
|
from django import forms
from django.views.generic.edit import UpdateView
from .models import Article
class ArticleForm(forms.ModelForm):
content = forms.CharField(strip=False, widget=forms.Textarea)
class Meta:
model = Article
fields = '__all__'
class ArticleFormView(UpdateView):
model = Article
success_url = '/'
form_class = ArticleForm
|
bdoner/SickRage
|
refs/heads/master
|
lib/unrar2/unix.py
|
13
|
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Unix version uses unrar command line executable
import subprocess
import gc
import os, os.path
import time, re
from rar_exceptions import *
class UnpackerNotInstalled(Exception): pass
rar_executable_cached = None
rar_executable_version = None
def call_unrar(params):
"Calls rar/unrar command line executable, returns stdout pipe"
global rar_executable_cached
if rar_executable_cached is None:
for command in ('unrar', 'rar'):
try:
subprocess.Popen([command], stdout=subprocess.PIPE)
rar_executable_cached = command
break
except OSError:
pass
if rar_executable_cached is None:
raise UnpackerNotInstalled("No suitable RAR unpacker installed")
assert type(params) == list, "params must be list"
args = [rar_executable_cached] + params
try:
gc.disable() # See http://bugs.python.org/issue1336
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
finally:
gc.enable()
class RarFileImplementation(object):
def init(self, password=None):
global rar_executable_version
self.password = password
proc = self.call('v', [])
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
if line.find("CRC failed")>=0:
raise IncorrectRARPassword
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while (line.find('RAR ') == -1):
line = source.next()
signature = line
# The code below is mighty flaky
# and will probably crash on localized versions of RAR
# but I see no safe way to rewrite it using a CLI tool
if signature.find("RAR 4") > -1:
rar_executable_version = 4
while not (line.startswith('Comment:') or line.startswith('Pathname/Comment')):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
line = source.next()
while not line.startswith('Pathname/Comment'):
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
accum[0] = accum[0][9:] # strip out "Comment:" part
self.comment = '\n'.join(accum[:-1])
else:
self.comment = None
elif signature.find("RAR 5") > -1:
rar_executable_version = 5
line = source.next()
while not line.startswith('Archive:'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
self.comment = '\n'.join(accum[:-1]).strip()
else:
self.comment = None
else:
raise UnpackerNotInstalled("Unsupported RAR version, expected 4.x or 5.x, found: "
+ signature.split(" ")[1])
def escaped_password(self):
return '-' if self.password == None else self.password
def call(self, cmd, options=[], files=[]):
options2 = options + ['p'+self.escaped_password()]
soptions = ['-'+x for x in options2]
return call_unrar([cmd]+soptions+['--',self.archiveName]+files)
def infoiter(self):
command = "v" if rar_executable_version == 4 else "l"
proc = self.call(command, ['c-'])
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error") or line.startswith("checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
i = 0
re_spaces = re.compile(r"\s+")
if rar_executable_version == 4:
while not line.startswith('-----------'):
accum.append(line)
if len(accum)==2:
data = {}
data['index'] = i
# asterisks mark password-encrypted files
data['filename'] = accum[0].strip().lstrip("*") # asterisks marks password-encrypted files
fields = re_spaces.split(accum[1].strip())
data['size'] = int(fields[0])
attr = fields[5]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[3]+" "+fields[4], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
accum = []
i += 1
line = source.next()
elif rar_executable_version == 5:
while not line.startswith('-----------'):
fields = line.strip().lstrip("*").split()
data = {}
data['index'] = i
data['filename'] = " ".join(fields[4:])
data['size'] = int(fields[1])
attr = fields[0]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[2]+" "+fields[3], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
i += 1
line = source.next()
def read_files(self, checker):
res = []
for info in self.infoiter():
checkres = checker(info)
if checkres==True and not info.isdir:
pipe = self.call('p', ['inul'], [info.filename]).stdout
res.append((info, pipe.read()))
return res
def extract(self, checker, path, withSubpath, overwrite):
res = []
command = 'x'
if not withSubpath:
command = 'e'
options = []
if overwrite:
options.append('o+')
else:
options.append('o-')
if not path.endswith(os.sep):
path += os.sep
names = []
for info in self.infoiter():
checkres = checker(info)
if type(checkres) in [str, unicode]:
raise NotImplementedError("Condition callbacks returning strings are deprecated and only supported in Windows")
if checkres==True and not info.isdir:
names.append(info.filename)
res.append(info)
names.append(path)
proc = self.call(command, options, names)
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
if stderrdata.find("CRC failed")>=0 or stderrdata.find("Checksum error")>=0 or stderrdata.find("checksum error")>=0:
raise CRCRARError
if stderrdata.find("No files to extract")>=0:
raise NoFileToExtract
if stderrdata.find("Bad archive")>=0:
raise FatalRARError
return res
def _check_returncode(self, returncode):
# RAR exit code from unrarsrc-5.2.1.tar.gz/errhnd.hpp
RARX_SUCCESS = 0
RARX_WARNING = 1
RARX_FATAL = 2
RARX_CRC = 3
RARX_LOCK = 4
RARX_WRITE = 5
RARX_OPEN = 6
RARX_USERERROR = 7
RARX_MEMORY = 8
RARX_CREATE = 9
RARX_NOFILES = 10
RARX_BADPWD = 11
RARX_USERBREAK = 255
if returncode != RARX_SUCCESS:
if returncode == RARX_FATAL:
raise FatalRARError
elif returncode == RARX_CRC:
raise CRCRARError
elif returncode == RARX_BADPWD:
raise IncorrectRARPassword
elif returncode == RARX_NOFILES:
raise NoFileToExtract
else:
raise GenericRARError
def destruct(self):
pass
def get_volume(self):
command = "v" if rar_executable_version == 4 else "l"
stdoutdata, stderrdata = self.call(command, ['c-']).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
if rar_executable_version == 4:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if len(items)>4 and items[4]=="volume":
return int(items[5]) - 1
else:
return None
elif rar_executable_version == 5:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if items[1]=="volume":
return int(items[2]) - 1
else:
return None
|
sumeetchhetri/FrameworkBenchmarks
|
refs/heads/master
|
frameworks/Python/cherrypy/satool.py
|
79
|
import cherrypy
__all__ = ['SATool']
class SATool(cherrypy.Tool):
def __init__(self):
"""
The SA tool is responsible for associating a SA session
to the SA engine and attaching it to the current request.
Since we are running in a multithreaded application,
we use the scoped_session that will create a session
on a per thread basis so that you don't worry about
concurrency on the session object itself.
This tools binds a session to the engine each time
a requests starts and commits/rollbacks whenever
the request terminates.
"""
cherrypy.Tool.__init__(self, 'on_start_resource',
self.bind_session,
priority=20)
def _setup(self):
cherrypy.Tool._setup(self)
cherrypy.request.hooks.attach('on_end_resource',
self.commit_transaction,
priority=80)
def bind_session(self):
"""
Attaches a session to the request's scope by requesting
the SA plugin to bind a session to the SA engine.
"""
session = cherrypy.engine.publish('bind-session').pop()
cherrypy.request.db = session
def commit_transaction(self):
"""
Commits the current transaction or rolls back
if an error occurs. Removes the session handle
from the request's scope.
"""
if not hasattr(cherrypy.request, 'db'):
return
cherrypy.request.db = None
cherrypy.engine.publish('commit-session')
|
castroflavio/ryu
|
refs/heads/master
|
ryu/tests/unit/ofproto/test_parser_compat.py
|
22
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from nose.tools import eq_
from nose.tools import ok_
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_2_parser
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib import addrconv
from struct import unpack
class Test_Parser_Compat(unittest.TestCase):
def __init__(self, methodName):
print 'init', methodName
super(Test_Parser_Compat, self).__init__(methodName)
def setUp(self):
pass
def tearDown(self):
pass
def _test(self, name, ofpp):
ofp = {
ofproto_v1_2_parser: ofproto_v1_2,
ofproto_v1_3_parser: ofproto_v1_3,
}[ofpp]
in_port = 987654321
eth_src = 'aa:bb:cc:dd:ee:ff'
ipv4_src = '192.0.2.9'
ipv6_src = 'fe80::f00b:a4ff:feef:5d8f'
old_in_port = in_port
old_eth_src = addrconv.mac.text_to_bin(eth_src)
old_ipv4_src = unpack('!I', addrconv.ipv4.text_to_bin(ipv4_src))[0]
old_ipv6_src = list(unpack('!8H',
addrconv.ipv6.text_to_bin(ipv6_src)))
def check(o):
check_old(o)
check_new(o)
def check_old(o):
# old api
def get_field(m, t):
for f in m.fields:
if isinstance(f, t):
return f
get_value = lambda m, t: get_field(m, t).value
eq_(get_value(o, ofpp.MTInPort), old_in_port)
eq_(get_value(o, ofpp.MTEthSrc), old_eth_src)
eq_(get_value(o, ofpp.MTIPV4Src), old_ipv4_src)
eq_(get_value(o, ofpp.MTIPv6Src), old_ipv6_src)
def check_new(o):
# new api
eq_(o['in_port'], in_port)
eq_(o['eth_src'], eth_src)
eq_(o['ipv4_src'], ipv4_src)
eq_(o['ipv6_src'], ipv6_src)
# ensure that old and new api produces the same thing
# old api
old = ofpp.OFPMatch()
old.set_in_port(old_in_port)
old.set_dl_src(old_eth_src)
old.set_ipv4_src(old_ipv4_src)
old.set_ipv6_src(old_ipv6_src)
old_buf = bytearray()
old.serialize(old_buf, 0)
# note: you can't inspect an object composed with the old set_XXX api
# before serialize().
check_old(old)
# another variant of old api; originally it was intended to be
# internal but actually used in the field. eg. LINC l2_switch_v1_3.py
old2 = ofpp.OFPMatch()
old2.append_field(ofp.OXM_OF_IN_PORT, old_in_port)
old2.append_field(ofp.OXM_OF_ETH_SRC, old_eth_src)
old2.append_field(ofp.OXM_OF_IPV4_SRC, old_ipv4_src)
old2.append_field(ofp.OXM_OF_IPV6_SRC, old_ipv6_src)
check_old(old2)
old2_buf = bytearray()
old2.serialize(old2_buf, 0)
# new api
new = ofpp.OFPMatch(in_port=in_port, eth_src=eth_src,
ipv4_src=ipv4_src, ipv6_src=ipv6_src)
check_new(new)
new_buf = bytearray()
new.serialize(new_buf, 0)
eq_(new_buf, old_buf)
eq_(new_buf, old2_buf)
old_jsondict = old.to_jsondict()
old2_jsondict = old2.to_jsondict()
new_jsondict = new.to_jsondict()
eq_(new_jsondict, old_jsondict)
eq_(new_jsondict, old2_jsondict)
eq_(str(new), str(old))
eq_(str(new), str(old2))
# a parsed object can be inspected by old and new api
check(ofpp.OFPMatch.parser(buffer(new_buf), 0))
check(ofpp.OFPMatch.from_jsondict(new_jsondict.values()[0]))
def _add_tests():
import new
import functools
import itertools
ofpps = [ofproto_v1_2_parser, ofproto_v1_3_parser]
for ofpp in ofpps:
mod = ofpp.__name__.split('.')[-1]
method_name = 'test_' + mod + '_ofpmatch_compat'
def _run(self, name, ofpp):
print ('processing %s ...' % name)
self._test(name, ofpp)
print ('adding %s ...' % method_name)
f = functools.partial(_run, name=method_name,
ofpp=ofpp)
f.func_name = method_name
f.__name__ = method_name
cls = Test_Parser_Compat
im = new.instancemethod(f, None, cls)
setattr(cls, method_name, im)
_add_tests()
|
mapleoin/braintree_python
|
refs/heads/master
|
tests/integration/test_http.py
|
2
|
from tests.test_helper import *
from distutils.version import LooseVersion
import platform
import braintree
import requests
class TestHttp(unittest.TestCase):
if LooseVersion(requests.__version__) >= LooseVersion('1.0.0'):
SSLError = requests.exceptions.SSLError
else:
SSLError = requests.models.SSLError
def get_http(self, environment):
config = Configuration(environment, "merchant_id", public_key="public_key", private_key="private_key")
return config.http()
def test_successful_connection_sandbox(self):
http = self.get_http(Environment.Sandbox)
try:
http.get("/")
except AuthenticationError:
pass
else:
self.assertTrue(False)
def test_successful_connection_production(self):
http = self.get_http(Environment.Production)
try:
http.get("/")
except AuthenticationError:
pass
else:
self.assertTrue(False)
def test_wrapping_http_exceptions(self):
config = Configuration(
Environment("test", "localhost", "1", False, None, Environment.Production.ssl_certificate),
"integration_merchant_id",
public_key="integration_public_key",
private_key="integration_private_key",
wrap_http_exceptions=True
)
gateway = braintree.braintree_gateway.BraintreeGateway(config)
try:
gateway.transaction.find("my_id")
except braintree.exceptions.unexpected_error.UnexpectedError:
correct_exception = True
except Exception as e:
correct_exception = False
self.assertTrue(correct_exception)
def test_unsuccessful_connection_to_good_ssl_server_with_wrong_cert(self):
if platform.system() == "Darwin":
return
environment = Environment("test", "www.google.com", "443", "http://auth.venmo.dev:9292", True, Environment.Production.ssl_certificate)
http = self.get_http(environment)
try:
http.get("/")
except self.SSLError as e:
self.assertTrue("certificate verify failed" in str(e))
except AuthenticationError:
self.fail("Expected to receive an SSL error but received an Authentication Error instead, check your local openssl installation")
else:
self.fail("Expected to receive an SSL error but no exception was raised")
def test_unsuccessful_connection_to_ssl_server_with_wrong_domain(self):
#ip address of api.braintreegateway.com
environment = Environment("test", "204.109.13.121", "443", "http://auth.venmo.dev:9292", True, Environment.Production.ssl_certificate)
http = self.get_http(environment)
try:
http.get("/")
except self.SSLError as e:
pass
else:
self.fail("Expected to receive an SSL error but no exception was raised")
def test_timeouts(self):
config = Configuration(
Environment.Development,
"integration_merchant_id",
public_key="integration_public_key",
private_key="integration_private_key",
wrap_http_exceptions=True,
timeout=0.001
)
gateway = braintree.braintree_gateway.BraintreeGateway(config)
try:
gateway.transaction.find("my_id")
except braintree.exceptions.http.timeout_error.TimeoutError:
correct_exception = True
except Exception as e:
correct_exception = False
self.assertTrue(correct_exception)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.