repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
bbcf/bsPlugins | tests/test_QuantifyTable.py | 1 | 1450 | from unittest2 import TestCase, skip
from bbcflib.track import track
from bsPlugins.QuantifyTable import QuantifyTablePlugin
import os
path = 'testing_files/'
class Test_QuantifyTablePlugin(TestCase):
def setUp(self):
self.plugin = QuantifyTablePlugin()
def test_quantify_table_sql(self):
self.plugin(**{'input_type':'Signal', 'SigMulti':{'signals':[path+'KO50.bedGraph', path+'WT50.bedGraph']},
'features':path+'features.bed', 'feature_type':3, 'assembly':'mm9', 'format':'sql'})
with track(self.plugin.output_files[0][0]) as t:
s = t.read()
content = list(s)
self.assertListEqual(s.fields, ["chr","start","end","name","score0","score1"])
self.assertEqual(len(content),9)
def test_quantify_table_text(self):
self.plugin(**{'input_type':'Signal', 'SigMulti':{'signals':[path+'KO50.bedGraph', path+'WT50.bedGraph']},
'features':path+'features.bed', 'feature_type':3, 'assembly':'mm9', 'format':'txt'})
with track(self.plugin.output_files[0][0], fields=["chr","start","end","name","score0","score1"]) as t:
s = t.read()
content = list(s)
self.assertEqual(len(content),9)
def tearDown(self):
for f in os.listdir('.'):
if f.startswith('tmp'):
os.system("rm -rf %s" % f)
# nosetests --logging-filter=-tw2 test_QuantifyTable.py
| gpl-3.0 |
zding5/Microblog-Flask | flask/lib/python2.7/site-packages/flask/signals.py | 783 | 2140 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
| mit |
esrille/ibus-replace-with-kanji | dic_tools/intersection-yomi.py | 1 | 1034 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Esrille Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import re
import sys
from signal import signal, SIGPIPE, SIG_DFL
import dic
if __name__ == '__main__':
signal(SIGPIPE, SIG_DFL)
a = 'restrained.dic'
if 2 <= len(sys.argv):
a = sys.argv[1]
b = 'katakana.dic'
if 3 <= len(sys.argv):
b = sys.argv[2]
dict = dic.intersection_yomi(dic.load(a), dic.load(b))
print(";", a, "∩", b)
dic.output(dict)
| apache-2.0 |
amandersillinois/landlab | landlab/grid/raster_gradients.py | 3 | 67150 | #! /usr/bin/env python
"""Calculate gradients on a raster grid.
Gradient calculators for raster grids
+++++++++++++++++++++++++++++++++++++
.. autosummary::
~landlab.grid.raster_gradients.calc_grad_at_link
~landlab.grid.raster_gradients.calc_grad_across_cell_faces
~landlab.grid.raster_gradients.calc_grad_across_cell_corners
"""
from collections import deque
import numpy as np
from landlab.core.utils import make_optional_arg_into_id_array, radians_to_degrees
from landlab.grid import gradients
from landlab.utils.decorators import use_field_name_or_array
@use_field_name_or_array("node")
def calc_diff_at_d8(grid, node_values, out=None):
"""Calculate differences of node values over links and diagonals.
Calculates the difference in quantity *node_values* at each link in the
grid.
Parameters
----------
grid : ModelGrid
A ModelGrid.
node_values : ndarray or field name
Values at grid nodes.
out : ndarray, optional
Buffer to hold the result.
Returns
-------
ndarray
Differences across links.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [60.0, 60.0, 60.0, 60.0],
... [60.0, 60.0, 0.0, 0.0],
... [60.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_diff_at_d8(z)
array([ 0., 0., 0., 0., 0., -60., -60., 0., -60., 0., 0.,
-60., 0., 0., -60., 0., 0., 0., 0., -60., 0., -60.,
-60., -60., 0., -60., 0., 0., 0.])
LLCATS: LINF GRAD
"""
if out is None:
out = np.empty(grid.number_of_d8)
node_values = np.asarray(node_values)
return np.subtract(
node_values[grid.nodes_at_d8[:, 1]],
node_values[grid.nodes_at_d8[:, 0]],
out=out,
)
@use_field_name_or_array("node")
def calc_diff_at_diagonal(grid, node_values, out=None):
"""Calculate differences of node values over diagonals.
Calculates the difference in quantity *node_values* at each link in the
grid.
Parameters
----------
grid : ModelGrid
A ModelGrid.
node_values : ndarray or field name
Values at grid nodes.
out : ndarray, optional
Buffer to hold the result.
Returns
-------
ndarray
Differences across links.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [5.0, 5.0, 5.0, 5.0],
... [5.0, 5.0, 0.0, 0.0],
... [5.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_diff_at_diagonal(z)
array([ 0., 0., -5., 0., -5., -5., -5., 0., -5., 0., 0., 0.])
LLCATS: LINF GRAD
"""
if out is None:
out = np.empty(grid.number_of_diagonals)
node_values = np.asarray(node_values)
return np.subtract(
node_values[grid.nodes_at_diagonal[:, 1]],
node_values[grid.nodes_at_diagonal[:, 0]],
out=out,
)
def calc_grad_at_d8(grid, node_values, out=None):
"""Calculate gradients over all diagonals and links.
Parameters
----------
grid : RasterModelGrid
A grid.
node_values : array_like or field name
Values at nodes.
out : ndarray, optional
Buffer to hold result. If `None`, create a new array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [60.0, 60.0, 60.0, 60.0],
... [60.0, 60.0, 0.0, 0.0],
... [60.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_grad_at_d8(z)
array([ 0., 0., 0., 0., 0., -20., -20., 0., -15., 0., 0.,
-20., 0., 0., -15., 0., 0., 0., 0., -12., 0., -12.,
-12., -12., 0., -12., 0., 0., 0.])
LLCATS: LINF GRAD
"""
grads = calc_diff_at_d8(grid, node_values, out=out)
grads /= grid.length_of_d8[: grid.number_of_d8]
return grads
def calc_grad_at_diagonal(grid, node_values, out=None):
"""Calculate gradients over all diagonals.
Parameters
----------
grid : RasterModelGrid
A grid.
node_values : array_like or field name
Values at nodes.
out : ndarray, optional
Buffer to hold result. If `None`, create a new array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [5.0, 5.0, 5.0, 5.0],
... [5.0, 5.0, 0.0, 0.0],
... [5.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_grad_at_diagonal(z)
array([ 0., 0., -1., 0., -1., -1., -1., 0., -1., 0., 0., 0.])
LLCATS: LINF GRAD
"""
grads = calc_diff_at_diagonal(grid, node_values, out=out)
grads /= grid.length_of_diagonal[: grid.number_of_diagonals]
return grads
@use_field_name_or_array("node")
def calc_grad_at_link(grid, node_values, out=None):
"""Calculate gradients in node_values at links.
Parameters
----------
grid : RasterModelGrid
A grid.
node_values : array_like or field name
Values at nodes.
out : ndarray, optional
Buffer to hold result. If `None`, create a new array.
Returns
-------
ndarray
Gradients of the nodes values for each link.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> node_values = [0., 0., 0.,
... 1., 3., 1.,
... 2., 2., 2.]
>>> grid.calc_grad_at_link(node_values)
array([ 0., 0., 1., 3., 1., 2., -2., 1., -1., 1., 0., 0.])
>>> out = np.empty(grid.number_of_links, dtype=float)
>>> rtn = grid.calc_grad_at_link(node_values, out=out)
>>> rtn is out
True
>>> out
array([ 0., 0., 1., 3., 1., 2., -2., 1., -1., 1., 0., 0.])
>>> grid = RasterModelGrid((3, 3), xy_spacing=(2, 1))
>>> grid.calc_grad_at_link(node_values)
array([ 0., 0., 1., 3., 1., 1., -1., 1., -1., 1., 0., 0.])
>>> _ = grid.add_field("elevation", node_values, at="node")
>>> grid.calc_grad_at_link('elevation')
array([ 0., 0., 1., 3., 1., 1., -1., 1., -1., 1., 0., 0.])
LLCATS: LINF GRAD
"""
grads = gradients.calc_diff_at_link(grid, node_values, out=out)
grads /= grid.length_of_link[: grid.number_of_links]
return grads
@use_field_name_or_array("node")
def calc_grad_across_cell_faces(grid, node_values, *args, **kwds):
"""calc_grad_across_cell_faces(grid, node_values, [cell_ids], out=None)
Get gradients across the faces of a cell.
Calculate gradient of the value field provided by *node_values* across
each of the faces of the cells of a grid. The returned gradients are
ordered as right, top, left, and bottom.
Note that the returned gradients are masked to exclude neighbor nodes which
are closed. Beneath the mask is the value -1.
Parameters
----------
grid : RasterModelGrid
Source grid.
node_values : array_like or field name
Quantity to take the gradient of defined at each node.
cell_ids : array_like, optional
If provided, cell ids to measure gradients. Otherwise, find gradients
for all cells.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
(N, 4) Masked ndarray
Gradients for each face of the cell.
Examples
--------
Create a grid with two cells.
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> x = np.array([0., 0., 0., 0.,
... 0., 0., 1., 1.,
... 3., 3., 3., 3.])
A decrease in quantity across a face is a negative gradient.
>>> grid.calc_grad_across_cell_faces(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[ 1. 3. 0. 0.]
[ 0. 2. -1. -1.]],
mask =
False,
fill_value = 1e+20)
>>> grid = RasterModelGrid((3, 4), xy_spacing=(1, 2))
>>> grid.calc_grad_across_cell_faces(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[ 1. 1.5 0. 0. ]
[ 0. 1. -1. -0.5]],
mask =
False,
fill_value = 1e+20)
LLCATS: FINF GRAD
"""
padded_node_values = np.empty(node_values.size + 1, dtype=float)
padded_node_values[-1] = grid.BAD_INDEX
padded_node_values[:-1] = node_values
cell_ids = make_optional_arg_into_id_array(grid.number_of_cells, *args)
node_ids = grid.node_at_cell[cell_ids]
neighbors = grid.active_adjacent_nodes_at_node[node_ids]
if grid.BAD_INDEX != -1:
neighbors = np.where(neighbors == grid.BAD_INDEX, -1, neighbors)
values_at_neighbors = padded_node_values[neighbors]
masked_neighbor_values = np.ma.array(
values_at_neighbors, mask=neighbors == grid.BAD_INDEX
)
values_at_nodes = node_values[node_ids].reshape(len(node_ids), 1)
out = np.subtract(masked_neighbor_values, values_at_nodes, **kwds)
out[:, (0, 2)] /= grid.dx
out[:, (1, 3)] /= grid.dy
return out
@use_field_name_or_array("node")
def calc_grad_across_cell_corners(grid, node_values, *args, **kwds):
"""calc_grad_across_cell_corners(grid, node_values, [cell_ids], out=None)
Get gradients to diagonally opposite nodes.
Calculate gradient of the value field provided by *node_values* to
the values at diagonally opposite nodes. The returned gradients are
ordered as upper-right, upper-left, lower-left and lower-right.
Parameters
----------
grid : RasterModelGrid
Source grid.
node_values : array_like or field name
Quantity to take the gradient of defined at each node.
cell_ids : array_like, optional
If provided, cell ids to measure gradients. Otherwise, find gradients
for all cells.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
(N, 4) ndarray
Gradients to each diagonal node.
Examples
--------
Create a grid with two cells.
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> x = np.array([1., 0., 0., 1.,
... 0., 0., 1., 1.,
... 3., 3., 3., 3.])
A decrease in quantity to a diagonal node is a negative gradient.
>>> from math import sqrt
>>> grid.calc_grad_across_cell_corners(x) * sqrt(2.)
array([[ 3., 3., 1., 0.],
[ 2., 2., -1., 0.]])
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> grid.calc_grad_across_cell_corners(x)
array([[ 0.6, 0.6, 0.2, 0. ],
[ 0.4, 0.4, -0.2, 0. ]])
LLCATS: CNINF GRAD
"""
cell_ids = make_optional_arg_into_id_array(grid.number_of_cells, *args)
node_ids = grid.node_at_cell[cell_ids]
values_at_diagonals = node_values[grid.diagonal_adjacent_nodes_at_node[node_ids]]
values_at_nodes = node_values[node_ids].reshape(len(node_ids), 1)
out = np.subtract(values_at_diagonals, values_at_nodes, **kwds)
np.divide(out, np.sqrt(grid.dy ** 2.0 + grid.dx ** 2.0), out=out)
return out
@use_field_name_or_array("node")
def calc_grad_along_node_links(grid, node_values, *args, **kwds):
"""calc_grad_along_node_links(grid, node_values, [cell_ids], out=None)
Get gradients along links touching a node.
Calculate gradient of the value field provided by *node_values* across
each of the faces of the nodes of a grid. The returned gradients are
ordered as right, top, left, and bottom. All returned values follow our
standard sign convention, where a link pointing N or E and increasing in
value is positive, a link pointing S or W and increasing in value is
negative.
Note that the returned gradients are masked to exclude neighbor nodes which
are closed. Beneath the mask is the value -1.
Parameters
----------
grid : RasterModelGrid
Source grid.
node_values : array_like or field name
Quantity to take the gradient of defined at each node.
node_ids : array_like, optional
If provided, node ids to measure gradients. Otherwise, find gradients
for all nodes.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
(N, 4) Masked ndarray
Gradients for each link of the node. Ordering is E,N,W,S.
Examples
--------
Create a grid with nine nodes.
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> x = np.array([0., 0., 0.,
... 0., 1., 2.,
... 2., 2., 2.])
A decrease in quantity across a face is a negative gradient.
>>> grid.calc_grad_along_node_links(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[-- -- -- --]
[-- 1.0 -- --]
[-- -- -- --]
[1.0 -- -- --]
[1.0 1.0 1.0 1.0]
[-- -- 1.0 --]
[-- -- -- --]
[-- -- -- 1.0]
[-- -- -- --]],
mask =
[[ True True True True]
[ True False True True]
[ True True True True]
[False True True True]
[False False False False]
[ True True False True]
[ True True True True]
[ True True True False]
[ True True True True]],
fill_value = 1e+20)
>>> grid = RasterModelGrid((3, 3), xy_spacing=(4, 2))
>>> grid.calc_grad_along_node_links(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[-- -- -- --]
[-- 0.5 -- --]
[-- -- -- --]
[0.25 -- -- --]
[0.25 0.5 0.25 0.5]
[-- -- 0.25 --]
[-- -- -- --]
[-- -- -- 0.5]
[-- -- -- --]],
mask =
[[ True True True True]
[ True False True True]
[ True True True True]
[False True True True]
[False False False False]
[ True True False True]
[ True True True True]
[ True True True False]
[ True True True True]],
fill_value = 1e+20)
LLCATS: NINF LINF GRAD
"""
padded_node_values = np.empty(node_values.size + 1, dtype=float)
padded_node_values[-1] = grid.BAD_INDEX
padded_node_values[:-1] = node_values
node_ids = make_optional_arg_into_id_array(grid.number_of_nodes, *args)
neighbors = grid.active_adjacent_nodes_at_node[node_ids]
values_at_neighbors = padded_node_values[neighbors]
masked_neighbor_values = np.ma.array(
values_at_neighbors, mask=values_at_neighbors == grid.BAD_INDEX
)
values_at_nodes = node_values[node_ids].reshape(len(node_ids), 1)
out = np.ma.empty_like(masked_neighbor_values, dtype=float)
np.subtract(masked_neighbor_values[:, :2], values_at_nodes, out=out[:, :2], **kwds)
np.subtract(values_at_nodes, masked_neighbor_values[:, 2:], out=out[:, 2:], **kwds)
out[:, (0, 2)] /= grid.dx
out[:, (1, 3)] /= grid.dy
return out
def calc_unit_normals_at_cell_subtriangles(grid, elevs="topographic__elevation"):
"""Calculate unit normals on a cell.
Calculate the eight unit normal vectors <a, b, c> to the eight
subtriangles of a four-cornered (raster) cell.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
Returns
-------
(n_ENE, n_NNE, n_NNW, n_WNW, n_WSW, n_SSW, n_SSE, n_ESE) :
each a num-cells x length-3 array
Len-8 tuple of the eight unit normal vectors <a, b, c> for the eight
subtriangles in the cell. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 3))
>>> z = mg.node_x ** 2
>>> eight_tris = mg.calc_unit_normals_at_cell_subtriangles(z)
>>> type(eight_tris) is tuple
True
>>> len(eight_tris)
8
>>> eight_tris[0].shape == (mg.number_of_cells, 3)
True
>>> eight_tris # doctest: +NORMALIZE_WHITESPACE
(array([[-0.9486833 , 0. , 0.31622777]]),
array([[-0.9486833 , 0. , 0.31622777]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.9486833 , 0. , 0.31622777]]),
array([[-0.9486833 , 0. , 0.31622777]]))
LLCATS: CINF GRAD
"""
# identify the grid neigbors at each location
node_at_cell = grid.node_at_cell
# calculate unit normals at all nodes.
(
n_ENE,
n_NNE,
n_NNW,
n_WNW,
n_WSW,
n_SSW,
n_SSE,
n_ESE,
) = _calc_subtriangle_unit_normals_at_node(grid, elevs=elevs)
# return only those at cell.
return (
n_ENE[node_at_cell, :],
n_NNE[node_at_cell, :],
n_NNW[node_at_cell, :],
n_WNW[node_at_cell, :],
n_WSW[node_at_cell, :],
n_SSW[node_at_cell, :],
n_SSE[node_at_cell, :],
n_ESE[node_at_cell, :],
)
def _calc_subtriangle_unit_normals_at_node(grid, elevs="topographic__elevation"):
"""Private Function: Calculate unit normals on subtriangles at all nodes.
Calculate the eight unit normal vectors <a, b, c> to the eight
subtriangles of a four-cornered (raster) cell. Unlike
calc_unit_normals_at_node_subtriangles, this function also
calculated unit normals at the degenerate part-cells around the
boundary.
On the grid boundaries where the cell is not fully defined, the unit normal
is given as <nan, nan, nan>.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
Returns
-------
(n_ENE, n_NNE, n_NNW, n_WNW, n_WSW, n_SSW, n_SSE, n_ESE) :
each a num-nodes x length-3 array
Len-8 tuple of the eight unit normal vectors <a, b, c> for the eight
subtriangles in the cell. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.grid.raster_gradients import(
... _calc_subtriangle_unit_normals_at_node
... )
>>> mg = RasterModelGrid((3, 3))
>>> z = mg.node_x ** 2
>>> eight_tris = _calc_subtriangle_unit_normals_at_node(mg, z)
>>> type(eight_tris) is tuple
True
>>> len(eight_tris)
8
>>> eight_tris[0].shape == (mg.number_of_nodes, 3)
True
>>> eight_tris[0] # doctest: +NORMALIZE_WHITESPACE
array([[-0.70710678, 0. , 0.70710678],
[-0.9486833 , 0. , 0.31622777],
[ nan, nan, nan],
[-0.70710678, 0. , 0.70710678],
[-0.9486833 , 0. , 0.31622777],
[ nan, nan, nan],
[ nan, nan, nan],
[ nan, nan, nan],
[ nan, nan, nan]])
LLCATS: CINF GRAD
"""
try:
z = grid.at_node[elevs]
except TypeError:
z = elevs
# cell has center node I
# orthogonal neighbors P, R, T, V, counter clockwise from East
# diagonal neihbors Q, S, U, W, counter clocwise from North East
# There are 8 subtriangles that can be defined with the following corners
# (starting from the central node, and progressing counter-clockwise).
# ENE: IPQ
# NNE: IQR
# NNW: IRS
# WNW: IST
# WSW: ITU
# SSW: IUV
# SSE: IVW
# ESE: IWP
# There are thus 8 vectors, IP, IQ, IR, IS, IT, IU, IV, IW
# initialized difference matricies for cross product
diff_xyz_IP = np.empty((grid.number_of_nodes, 3)) # East
# ^this is the vector (xP-xI, yP-yI, zP-yI)
diff_xyz_IQ = np.empty((grid.number_of_nodes, 3)) # Northeast
diff_xyz_IR = np.empty((grid.number_of_nodes, 3)) # North
diff_xyz_IS = np.empty((grid.number_of_nodes, 3)) # Northwest
diff_xyz_IT = np.empty((grid.number_of_nodes, 3)) # West
diff_xyz_IU = np.empty((grid.number_of_nodes, 3)) # Southwest
diff_xyz_IV = np.empty((grid.number_of_nodes, 3)) # South
diff_xyz_IW = np.empty((grid.number_of_nodes, 3)) # Southeast
# identify the grid neigbors at each location
node_at_cell = np.arange(grid.number_of_nodes)
P = grid.adjacent_nodes_at_node[node_at_cell, 0]
Q = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 0]
R = grid.adjacent_nodes_at_node[node_at_cell, 1]
S = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 1]
T = grid.adjacent_nodes_at_node[node_at_cell, 2]
U = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 2]
V = grid.adjacent_nodes_at_node[node_at_cell, 3]
W = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 3]
# get x, y, z coordinates for each location
x_I = grid.node_x[node_at_cell]
y_I = grid.node_y[node_at_cell]
z_I = z[node_at_cell]
x_P = grid.node_x[P]
y_P = grid.node_y[P]
z_P = z[P]
x_Q = grid.node_x[Q]
y_Q = grid.node_y[Q]
z_Q = z[Q]
x_R = grid.node_x[R]
y_R = grid.node_y[R]
z_R = z[R]
x_S = grid.node_x[S]
y_S = grid.node_y[S]
z_S = z[S]
x_T = grid.node_x[T]
y_T = grid.node_y[T]
z_T = z[T]
x_U = grid.node_x[U]
y_U = grid.node_y[U]
z_U = z[U]
x_V = grid.node_x[V]
y_V = grid.node_y[V]
z_V = z[V]
x_W = grid.node_x[W]
y_W = grid.node_y[W]
z_W = z[W]
# calculate vectors by differencing
diff_xyz_IP[:, 0] = x_P - x_I
diff_xyz_IP[:, 1] = y_P - y_I
diff_xyz_IP[:, 2] = z_P - z_I
diff_xyz_IQ[:, 0] = x_Q - x_I
diff_xyz_IQ[:, 1] = y_Q - y_I
diff_xyz_IQ[:, 2] = z_Q - z_I
diff_xyz_IR[:, 0] = x_R - x_I
diff_xyz_IR[:, 1] = y_R - y_I
diff_xyz_IR[:, 2] = z_R - z_I
diff_xyz_IS[:, 0] = x_S - x_I
diff_xyz_IS[:, 1] = y_S - y_I
diff_xyz_IS[:, 2] = z_S - z_I
diff_xyz_IT[:, 0] = x_T - x_I
diff_xyz_IT[:, 1] = y_T - y_I
diff_xyz_IT[:, 2] = z_T - z_I
diff_xyz_IU[:, 0] = x_U - x_I
diff_xyz_IU[:, 1] = y_U - y_I
diff_xyz_IU[:, 2] = z_U - z_I
diff_xyz_IV[:, 0] = x_V - x_I
diff_xyz_IV[:, 1] = y_V - y_I
diff_xyz_IV[:, 2] = z_V - z_I
diff_xyz_IW[:, 0] = x_W - x_I
diff_xyz_IW[:, 1] = y_W - y_I
diff_xyz_IW[:, 2] = z_W - z_I
# calculate cross product to get unit normal
# cross product is orthogonal to both vectors, and is the normal
# n = <a, b, c>, where plane is ax + by + cz = d
nhat_ENE = np.cross(diff_xyz_IP, diff_xyz_IQ) # <a, b, c>
nhat_NNE = np.cross(diff_xyz_IQ, diff_xyz_IR)
nhat_NNW = np.cross(diff_xyz_IR, diff_xyz_IS)
nhat_WNW = np.cross(diff_xyz_IS, diff_xyz_IT)
nhat_WSW = np.cross(diff_xyz_IT, diff_xyz_IU)
nhat_SSW = np.cross(diff_xyz_IU, diff_xyz_IV)
nhat_SSE = np.cross(diff_xyz_IV, diff_xyz_IW)
nhat_ESE = np.cross(diff_xyz_IW, diff_xyz_IP)
# now remove the bad subtriangles based on parts of the grid
# make the bad subtriangle of length greater than one.
bad = np.nan
# first, corners:
(northeast, northwest, southwest, southeast) = grid.nodes_at_corners_of_grid
# lower left corner only has NNE and ENE
for array in (nhat_NNW, nhat_WNW, nhat_WSW, nhat_SSW, nhat_SSE, nhat_ESE):
array[southwest] = bad
# lower right corner only has NNW and WNW
for array in (nhat_ENE, nhat_NNE, nhat_WSW, nhat_SSW, nhat_SSE, nhat_ESE):
array[southeast] = bad
# upper left corner only has ESE and SSE
for array in (nhat_ENE, nhat_NNE, nhat_NNW, nhat_WNW, nhat_WSW, nhat_SSW):
array[northwest] = bad
# upper right corner only has WSW and SSW
for array in (nhat_ENE, nhat_NNE, nhat_NNW, nhat_WNW, nhat_SSE, nhat_ESE):
array[northeast] = bad
# next, sizes:
# bottom row only has Norths
bottom = grid.nodes_at_bottom_edge
for array in (nhat_WSW, nhat_SSW, nhat_SSE, nhat_ESE):
array[bottom] = bad
# left side only has Easts
left = grid.nodes_at_left_edge
for array in (nhat_NNW, nhat_WNW, nhat_WSW, nhat_SSW):
array[left] = bad
# top row only has Souths
top = grid.nodes_at_top_edge
for array in (nhat_ENE, nhat_NNE, nhat_NNW, nhat_WNW):
array[top] = bad
# right side only has Wests
right = grid.nodes_at_right_edge
for array in (nhat_ENE, nhat_NNE, nhat_SSE, nhat_ESE):
array[right] = bad
# calculate magnitude of cross product so that the result is a unit normal
nmag_ENE = np.sqrt(np.square(nhat_ENE).sum(axis=1))
nmag_NNE = np.sqrt(np.square(nhat_NNE).sum(axis=1))
nmag_NNW = np.sqrt(np.square(nhat_NNW).sum(axis=1))
nmag_WNW = np.sqrt(np.square(nhat_WNW).sum(axis=1))
nmag_WSW = np.sqrt(np.square(nhat_WSW).sum(axis=1))
nmag_SSW = np.sqrt(np.square(nhat_SSW).sum(axis=1))
nmag_SSE = np.sqrt(np.square(nhat_SSE).sum(axis=1))
nmag_ESE = np.sqrt(np.square(nhat_ESE).sum(axis=1))
# normalize the cross product with its magnitude so it is a unit normal
# instead of a variable length normal.
n_ENE = nhat_ENE / nmag_ENE.reshape(grid.number_of_nodes, 1)
n_NNE = nhat_NNE / nmag_NNE.reshape(grid.number_of_nodes, 1)
n_NNW = nhat_NNW / nmag_NNW.reshape(grid.number_of_nodes, 1)
n_WNW = nhat_WNW / nmag_WNW.reshape(grid.number_of_nodes, 1)
n_WSW = nhat_WSW / nmag_WSW.reshape(grid.number_of_nodes, 1)
n_SSW = nhat_SSW / nmag_SSW.reshape(grid.number_of_nodes, 1)
n_SSE = nhat_SSE / nmag_SSE.reshape(grid.number_of_nodes, 1)
n_ESE = nhat_ESE / nmag_ESE.reshape(grid.number_of_nodes, 1)
return (n_ENE, n_NNE, n_NNW, n_WNW, n_WSW, n_SSW, n_SSE, n_ESE)
def calc_slope_at_cell_subtriangles(
grid, elevs="topographic__elevation", subtriangle_unit_normals=None
):
"""Calculate the slope (positive magnitude of gradient) at each of the
eight cell subtriangles.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
subtriangle_unit_normals : tuple of 8 (ncells, 3) arrays (optional)
The unit normal vectors for the eight subtriangles of each cell,
if already known. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
Returns
-------
(s_ENE, s_NNE, s_NNW, s_WNW, s_WSW, s_SSW, s_SSE, s_ESE) :
each a length num-cells array
Len-8 tuple of the slopes (positive gradient magnitude) of each of the
eight cell subtriangles, in radians. Order is from north of east,
counter clockwise to south of east (East North East, North North East,
North North West, West North West, West South West, South South West,
South South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 3))
>>> z = np.array([np.sqrt(3.), 0., 4./3.,
... 0., 0., 0.,
... 1., 0., 1./np.sqrt(3.)])
>>> eight_tris = mg.calc_unit_normals_at_cell_subtriangles(z)
>>> S = mg.calc_slope_at_cell_subtriangles(z, eight_tris)
>>> S0 = mg.calc_slope_at_cell_subtriangles(z)
>>> np.allclose(S, S0)
True
>>> type(S) is tuple
True
>>> len(S)
8
>>> len(S[0]) == mg.number_of_cells
True
>>> np.allclose(S[0], S[1])
True
>>> np.allclose(S[2], S[3])
True
>>> np.allclose(S[4], S[5])
True
>>> np.allclose(S[6], S[7])
True
>>> np.allclose(np.rad2deg(S[0])[0], 30.)
True
>>> np.allclose(np.rad2deg(S[2])[0], 45.)
True
>>> np.allclose(np.rad2deg(S[4])[0], 60.)
True
>>> np.allclose(np.cos(S[6])[0], 3./5.)
True
LLCATS: CINF GRAD
"""
# calculate all subtriangle slopes
(
s_ENE,
s_NNE,
s_NNW,
s_WNW,
s_WSW,
s_SSW,
s_SSE,
s_ESE,
) = _calc_subtriangle_slopes_at_node(
grid, elevs=elevs, subtriangle_unit_normals=subtriangle_unit_normals
)
# return only those at cell
if s_ENE.shape[0] == grid.number_of_nodes:
node_at_cell = grid.node_at_cell
else:
node_at_cell = np.arange(grid.number_of_cells)
return (
s_ENE[node_at_cell],
s_NNE[node_at_cell],
s_NNW[node_at_cell],
s_WNW[node_at_cell],
s_WSW[node_at_cell],
s_SSW[node_at_cell],
s_SSE[node_at_cell],
s_ESE[node_at_cell],
)
def _calc_subtriangle_slopes_at_node(
grid, elevs="topographic__elevation", subtriangle_unit_normals=None
):
"""Private Function: Calculate subtriangles slope at all nodes.
Calculate the slope (positive magnitude of gradient) at each of the
eight subtriangles, including those at not-full cells along the
boundary.
Those subtriangles that that don't exist because they are on the edge
of the grid have slopes of NAN.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
subtriangle_unit_normals : tuple of 8 (ncells, 3) or (nnodes, 3) arrays
(optional)
The unit normal vectors for the eight subtriangles of each cell or
node,if already known. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
Returns
-------
(s_ENE, s_NNE, s_NNW, s_WNW, s_WSW, s_SSW, s_SSE, s_ESE) :
each a length num-cells array
Len-8 tuple of the slopes (positive gradient magnitude) of each of the
eight cell subtriangles, in radians. Order is from north of east,
counter clockwise to south of east (East North East, North North East,
North North West, West North West, West South West, South South West,
South South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.grid.raster_gradients import(
... _calc_subtriangle_unit_normals_at_node,
... _calc_subtriangle_slopes_at_node)
>>> mg = RasterModelGrid((3, 3))
>>> z = np.array([np.sqrt(3.), 0., 4./3.,
... 0., 0., 0.,
... 1., 0., 1./np.sqrt(3.)])
>>> eight_tris = _calc_subtriangle_unit_normals_at_node(mg, z)
>>> S = _calc_subtriangle_slopes_at_node(mg, z, eight_tris)
>>> S0 = _calc_subtriangle_slopes_at_node(mg, z)
>>> np.allclose(S, S0, equal_nan=True)
True
>>> type(S) is tuple
True
>>> len(S)
8
>>> len(S[0]) == mg.number_of_nodes
True
>>> np.allclose(S[0][mg.core_nodes], S[1][mg.core_nodes])
True
>>> np.allclose(S[2][mg.core_nodes], S[3][mg.core_nodes])
True
>>> np.allclose(S[4][mg.core_nodes], S[5][mg.core_nodes])
True
>>> np.allclose(S[6][mg.core_nodes], S[7][mg.core_nodes])
True
>>> np.allclose(np.rad2deg(S[0][mg.core_nodes]), 30.)
True
>>> np.allclose(np.rad2deg(S[2][mg.core_nodes]), 45.)
True
>>> np.allclose(np.rad2deg(S[4])[mg.core_nodes], 60.)
True
>>> np.allclose(np.cos(S[6])[mg.core_nodes], 3./5.)
True
LLCATS: CINF GRAD
"""
# verify that subtriangle_unit_normals is of the correct form.
if subtriangle_unit_normals is not None:
assert len(subtriangle_unit_normals) == 8
assert subtriangle_unit_normals[0].shape[1] == 3
assert subtriangle_unit_normals[1].shape[1] == 3
assert subtriangle_unit_normals[2].shape[1] == 3
assert subtriangle_unit_normals[3].shape[1] == 3
assert subtriangle_unit_normals[4].shape[1] == 3
assert subtriangle_unit_normals[5].shape[1] == 3
assert subtriangle_unit_normals[6].shape[1] == 3
assert subtriangle_unit_normals[7].shape[1] == 3
(
n_ENE,
n_NNE,
n_NNW,
n_WNW,
n_WSW,
n_SSW,
n_SSE,
n_ESE,
) = subtriangle_unit_normals
if subtriangle_unit_normals[7].shape[0] == grid.number_of_nodes:
reshape_size = grid.number_of_nodes
elif subtriangle_unit_normals[7].shape[0] == grid.number_of_cells:
reshape_size = grid.number_of_cells
else:
ValueError("Subtriangles must be of lenght nnodes or ncells")
else:
(
n_ENE,
n_NNE,
n_NNW,
n_WNW,
n_WSW,
n_SSW,
n_SSE,
n_ESE,
) = _calc_subtriangle_unit_normals_at_node(grid, elevs)
reshape_size = grid.number_of_nodes
# combine z direction element of all eight so that the arccosine portion
# only takes one function call.
dotprod = np.empty((reshape_size, 8))
dotprod[:, 0] = n_ENE[:, 2] # by definition
dotprod[:, 1] = n_NNE[:, 2]
dotprod[:, 2] = n_NNW[:, 2]
dotprod[:, 3] = n_WNW[:, 2]
dotprod[:, 4] = n_WSW[:, 2]
dotprod[:, 5] = n_SSW[:, 2]
dotprod[:, 6] = n_SSE[:, 2]
dotprod[:, 7] = n_ESE[:, 2]
# take the inverse cosine of the z component to get the slope angle
slopes_at_cell_subtriangles = np.arccos(dotprod) #
# split array into each subtriangle component.
s_ENE = slopes_at_cell_subtriangles[:, 0].reshape(reshape_size)
s_NNE = slopes_at_cell_subtriangles[:, 1].reshape(reshape_size)
s_NNW = slopes_at_cell_subtriangles[:, 2].reshape(reshape_size)
s_WNW = slopes_at_cell_subtriangles[:, 3].reshape(reshape_size)
s_WSW = slopes_at_cell_subtriangles[:, 4].reshape(reshape_size)
s_SSW = slopes_at_cell_subtriangles[:, 5].reshape(reshape_size)
s_SSE = slopes_at_cell_subtriangles[:, 6].reshape(reshape_size)
s_ESE = slopes_at_cell_subtriangles[:, 7].reshape(reshape_size)
return (s_ENE, s_NNE, s_NNW, s_WNW, s_WSW, s_SSW, s_SSE, s_ESE)
def calc_aspect_at_cell_subtriangles(
grid, elevs="topographic__elevation", subtriangle_unit_normals=None, unit="degrees"
):
"""Get tuple of arrays of aspect of each of the eight cell subtriangles.
Aspect is returned as radians clockwise of north, unless input parameter
units is set to 'degrees'.
If subtriangle_unit_normals is provided the aspect will be calculated from
these data.
If it is not, it will be derived from elevation data at the nodes,
which can either be a string referring to a grid field (default:
'topographic__elevation'), or an nnodes-long numpy array of the
values themselves.
Parameters
----------
grid : ModelGrid
A ModelGrid.
elevs : str or array (optional)
Node field name or node array of elevations.
If *subtriangle_unit_normals* is not provided, must be set, but unused
otherwise.
subtriangle_unit_normals : tuple of 8 (ncels, 3) arrays (optional)
The unit normal vectors for the eight subtriangles of each cell,
if already known. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
unit : {'degrees', 'radians'}
Controls the unit that the aspect is returned as.
Returns
-------
(a_ENE, a_NNE, a_NNW, a_WNW, a_WSW, a_SSW, a_SSE, a_ESE) :
each a length num-cells array
Len-8 tuple of the aspect of each of the eight cell subtriangles.
Aspect is returned as angle clockwise of north. Units are given as
radians unless input parameter units is set to 'degrees'.
Order is from north of east, counter clockwise to south of east (East
North East, North North East, North North West, West North West, West
South West, South South West, South South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 3))
>>> z = np.array([1., 0., 1., 0., 0., 0., 1., 0., 1.])
>>> eight_tris = mg.calc_unit_normals_at_cell_subtriangles(z)
>>> A = mg.calc_aspect_at_cell_subtriangles(z, eight_tris)
>>> A0 = mg.calc_aspect_at_cell_subtriangles(z)
>>> np.allclose(A, A0)
True
>>> type(A) is tuple
True
>>> len(A)
8
>>> len(A[0]) == mg.number_of_cells
True
>>> A0 # doctest: +NORMALIZE_WHITESPACE
(array([ 180.]), array([ 270.]), array([ 90.]), array([ 180.]),
array([ 0.]), array([ 90.]), array([ 270.]), array([ 0.]))
LLCATS: CINF SURF
"""
# calculate all subtriangle slopes
(
angle_ENE,
angle_NNE,
angle_NNW,
angle_WNW,
angle_WSW,
angle_SSW,
angle_SSE,
angle_ESE,
) = _calc_subtriangle_aspect_at_node(
grid, elevs=elevs, subtriangle_unit_normals=subtriangle_unit_normals, unit=unit
)
# return only those at cell
if angle_ESE.shape[0] == grid.number_of_nodes:
node_at_cell = grid.node_at_cell
else:
node_at_cell = np.arange(grid.number_of_cells)
if unit == "degrees" or unit == "radians":
return (
angle_ENE[node_at_cell],
angle_NNE[node_at_cell],
angle_NNW[node_at_cell],
angle_WNW[node_at_cell],
angle_WSW[node_at_cell],
angle_SSW[node_at_cell],
angle_SSE[node_at_cell],
angle_ESE[node_at_cell],
)
else:
raise TypeError("unit must be 'degrees' or 'radians'")
def _calc_subtriangle_aspect_at_node(
grid, elevs="topographic__elevation", subtriangle_unit_normals=None, unit="degrees"
):
"""Private Function: Aspect of subtriangles at node.
This function calculates the aspect of all subtriangles, including those
that are at noded without cells (on the boundaries).
Aspect is returned as radians clockwise of north, unless input parameter
units is set to 'degrees'.
If subtriangle_unit_normals is provided the aspect will be calculated from
these data.
If it is not, it will be derived from elevation data at the nodes,
which can either be a string referring to a grid field (default:
'topographic__elevation'), or an nnodes-long numpy array of the
values themselves.
Parameters
----------
grid : ModelGrid
A ModelGrid.
elevs : str or array (optional)
Node field name or node array of elevations.
If *subtriangle_unit_normals* is not provided, must be set, but unused
otherwise.
subtriangle_unit_normals : tuple of 8 (ncells, 3) or (nnodes, 3) arrays
(optional)
The unit normal vectors for the eight subtriangles of each cell or
node,if already known. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
unit : {'degrees', 'radians'}
Controls the unit that the aspect is returned as.
Returns
-------
(a_ENE, a_NNE, a_NNW, a_WNW, a_WSW, a_SSW, a_SSE, a_ESE) :
each a length num-cells array
Len-8 tuple of the aspect of each of the eight cell subtriangles.
Aspect is returned as angle clockwise of north. Units are given as
radians unless input parameter units is set to 'degrees'.
Order is from north of east, counter clockwise to south of east (East
North East, North North East, North North West, West North West, West
South West, South South West, South South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.grid.raster_gradients import (
... _calc_subtriangle_unit_normals_at_node,
... _calc_subtriangle_aspect_at_node)
>>> mg = RasterModelGrid((3, 3))
>>> z = np.array([1., 0., 1., 0., 0., 0., 1., 0., 1.])
>>> eight_tris = _calc_subtriangle_unit_normals_at_node(mg, z)
>>> A = _calc_subtriangle_aspect_at_node(mg, z, eight_tris)
>>> A0 = _calc_subtriangle_aspect_at_node(mg, z)
>>> np.allclose(A, A0, equal_nan=True)
True
>>> type(A) is tuple
True
>>> len(A)
8
>>> len(A[0]) == mg.number_of_nodes
True
>>> A0 # doctest: +NORMALIZE_WHITESPACE
(array([ 90., 315., nan, 90., 180., nan, nan, nan, nan]),
array([ 0., 90., nan, 135., 270., nan, nan, nan, nan]),
array([ nan, 90., 0., nan, 90., 225., nan, nan, nan]),
array([ nan, 45., 270., nan, 180., 90., nan, nan, nan]),
array([ nan, nan, nan, nan, 0., 90., nan, 135., 270.]),
array([ nan, nan, nan, nan, 90., 315., nan, 90., 180.]),
array([ nan, nan, nan, 45., 270., nan, 180., 90., nan]),
array([ nan, nan, nan, 270., 0., nan, 90., 225., nan]))
LLCATS: CINF SURF
"""
# verify that subtriangle_unit_normals is of the correct form.
if subtriangle_unit_normals is not None:
assert len(subtriangle_unit_normals) == 8
assert subtriangle_unit_normals[0].shape[1] == 3
assert subtriangle_unit_normals[1].shape[1] == 3
assert subtriangle_unit_normals[2].shape[1] == 3
assert subtriangle_unit_normals[3].shape[1] == 3
assert subtriangle_unit_normals[4].shape[1] == 3
assert subtriangle_unit_normals[5].shape[1] == 3
assert subtriangle_unit_normals[6].shape[1] == 3
assert subtriangle_unit_normals[7].shape[1] == 3
if subtriangle_unit_normals[7].shape[0] == grid.number_of_nodes:
reshape_size = grid.number_of_nodes
elif subtriangle_unit_normals[7].shape[0] == grid.number_of_cells:
reshape_size = grid.number_of_cells
else:
ValueError("Subtriangles must be of lenght nnodes or ncells")
(
n_ENE,
n_NNE,
n_NNW,
n_WNW,
n_WSW,
n_SSW,
n_SSE,
n_ESE,
) = subtriangle_unit_normals
# otherwise create it.
else:
(
n_ENE,
n_NNE,
n_NNW,
n_WNW,
n_WSW,
n_SSW,
n_SSE,
n_ESE,
) = _calc_subtriangle_unit_normals_at_node(grid, elevs)
reshape_size = grid.number_of_nodes
# calculate the aspect as an angle ccw from the x axis (math angle)
angle_from_x_ccw_ENE = np.reshape(
np.arctan2(n_ENE[:, 1], n_ENE[:, 0]), reshape_size
)
angle_from_x_ccw_NNE = np.reshape(
np.arctan2(n_NNE[:, 1], n_NNE[:, 0]), reshape_size
)
angle_from_x_ccw_NNW = np.reshape(
np.arctan2(n_NNW[:, 1], n_NNW[:, 0]), reshape_size
)
angle_from_x_ccw_WNW = np.reshape(
np.arctan2(n_WNW[:, 1], n_WNW[:, 0]), reshape_size
)
angle_from_x_ccw_WSW = np.reshape(
np.arctan2(n_WSW[:, 1], n_WSW[:, 0]), reshape_size
)
angle_from_x_ccw_SSW = np.reshape(
np.arctan2(n_SSW[:, 1], n_SSW[:, 0]), reshape_size
)
angle_from_x_ccw_SSE = np.reshape(
np.arctan2(n_SSE[:, 1], n_SSE[:, 0]), reshape_size
)
angle_from_x_ccw_ESE = np.reshape(
np.arctan2(n_ESE[:, 1], n_ESE[:, 0]), reshape_size
)
# convert reference from math angle to angles clockwise from north
# return as either radians or degrees depending on unit.
if unit == "degrees":
return (
radians_to_degrees(angle_from_x_ccw_ENE),
radians_to_degrees(angle_from_x_ccw_NNE),
radians_to_degrees(angle_from_x_ccw_NNW),
radians_to_degrees(angle_from_x_ccw_WNW),
radians_to_degrees(angle_from_x_ccw_WSW),
radians_to_degrees(angle_from_x_ccw_SSW),
radians_to_degrees(angle_from_x_ccw_SSE),
radians_to_degrees(angle_from_x_ccw_ESE),
)
elif unit == "radians":
angle_from_north_cw_ENE = (5.0 * np.pi / 2.0 - angle_from_x_ccw_ENE) % (
2.0 * np.pi
)
angle_from_north_cw_NNE = (5.0 * np.pi / 2.0 - angle_from_x_ccw_NNE) % (
2.0 * np.pi
)
angle_from_north_cw_NNW = (5.0 * np.pi / 2.0 - angle_from_x_ccw_NNW) % (
2.0 * np.pi
)
angle_from_north_cw_WNW = (5.0 * np.pi / 2.0 - angle_from_x_ccw_WNW) % (
2.0 * np.pi
)
angle_from_north_cw_WSW = (5.0 * np.pi / 2.0 - angle_from_x_ccw_WSW) % (
2.0 * np.pi
)
angle_from_north_cw_SSW = (5.0 * np.pi / 2.0 - angle_from_x_ccw_SSW) % (
2.0 * np.pi
)
angle_from_north_cw_SSE = (5.0 * np.pi / 2.0 - angle_from_x_ccw_SSE) % (
2.0 * np.pi
)
angle_from_north_cw_ESE = (5.0 * np.pi / 2.0 - angle_from_x_ccw_ESE) % (
2.0 * np.pi
)
return (
angle_from_north_cw_ENE,
angle_from_north_cw_NNE,
angle_from_north_cw_NNW,
angle_from_north_cw_WNW,
angle_from_north_cw_WSW,
angle_from_north_cw_SSW,
angle_from_north_cw_SSE,
angle_from_north_cw_ESE,
)
else:
raise TypeError("unit must be 'degrees' or 'radians'")
def calc_unit_normals_at_patch_subtriangles(grid, elevs="topographic__elevation"):
"""Calculate unit normals on a patch.
Calculate the four unit normal vectors <a, b, c> to the four possible
subtriangles of a four-cornered (raster) patch.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
Returns
-------
(n_TR, n_TL, n_BL, n_BR) : each a num-patches x length-3 array
Len-4 tuple of the four unit normal vectors <a, b, c> for the four
possible subtriangles in the patch. Order is (topright, topleft,
bottomleft, bottomright).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> z = mg.node_x ** 2
>>> four_tris = mg.calc_unit_normals_at_patch_subtriangles(z)
>>> type(four_tris) is tuple
True
>>> len(four_tris)
4
>>> np.allclose(four_tris[0], four_tris[1])
True
>>> np.allclose(four_tris[2], four_tris[3])
True
>>> np.allclose(four_tris[0], four_tris[2])
True
>>> np.allclose(np.square(four_tris[0]).sum(axis=1), 1.)
True
>>> four_tris[0]
array([[-0.70710678, 0. , 0.70710678],
[-0.9486833 , 0. , 0.31622777],
[-0.98058068, 0. , 0.19611614],
[-0.98994949, 0. , 0.14142136],
[-0.70710678, 0. , 0.70710678],
[-0.9486833 , 0. , 0.31622777],
[-0.98058068, 0. , 0.19611614],
[-0.98994949, 0. , 0.14142136],
[-0.70710678, 0. , 0.70710678],
[-0.9486833 , 0. , 0.31622777],
[-0.98058068, 0. , 0.19611614],
[-0.98994949, 0. , 0.14142136]])
LLCATS: PINF GRAD
"""
try:
z = grid.at_node[elevs]
except TypeError:
z = elevs
# conceptualize patches as TWO sets of 3 nodes
# the corners are PQRS, CC from NE
diff_xyz_PQ = np.empty((grid.number_of_patches, 3)) # TOP
# ^this is the vector (xQ-xP, yQ-yP, zQ-yP)
diff_xyz_PS = np.empty((grid.number_of_patches, 3)) # RIGHT
# we have RS and QR implicitly in PQ and PS - but store them too
diff_xyz_RS = np.empty((grid.number_of_patches, 3)) # BOTTOM
diff_xyz_QR = np.empty((grid.number_of_patches, 3)) # LEFT
P = grid.nodes_at_patch[:, 0]
Q = grid.nodes_at_patch[:, 1]
R = grid.nodes_at_patch[:, 2]
S = grid.nodes_at_patch[:, 3]
x_P = grid.node_x[P]
y_P = grid.node_y[P]
z_P = z[P]
x_Q = grid.node_x[Q]
y_Q = grid.node_y[Q]
z_Q = z[Q]
x_R = grid.node_x[R]
y_R = grid.node_y[R]
z_R = z[R]
x_S = grid.node_x[S]
y_S = grid.node_y[S]
z_S = z[S]
diff_xyz_PQ[:, 0] = x_Q - x_P
diff_xyz_PQ[:, 1] = y_Q - y_P
diff_xyz_PQ[:, 2] = z_Q - z_P
diff_xyz_PS[:, 0] = x_S - x_P
diff_xyz_PS[:, 1] = y_S - y_P
diff_xyz_PS[:, 2] = z_S - z_P
diff_xyz_RS[:, 0] = x_S - x_R
diff_xyz_RS[:, 1] = y_S - y_R
diff_xyz_RS[:, 2] = z_S - z_R
diff_xyz_QR[:, 0] = x_R - x_Q
diff_xyz_QR[:, 1] = y_R - y_Q
diff_xyz_QR[:, 2] = z_R - z_Q
# make the other ones
# cross product is orthogonal to both vectors, and is the normal
# n = <a, b, c>, where plane is ax + by + cz = d
nhat_topleft = np.cross(diff_xyz_PQ, diff_xyz_QR) # <a, b, c>
nhat_bottomright = np.cross(diff_xyz_PS, diff_xyz_RS)
nhat_topright = np.cross(diff_xyz_PQ, diff_xyz_PS)
nhat_bottomleft = np.cross(diff_xyz_QR, diff_xyz_RS)
nmag_topleft = np.sqrt(np.square(nhat_topleft).sum(axis=1))
nmag_bottomright = np.sqrt(np.square(nhat_bottomright).sum(axis=1))
nmag_topright = np.sqrt(np.square(nhat_topright).sum(axis=1))
nmag_bottomleft = np.sqrt(np.square(nhat_bottomleft).sum(axis=1))
n_TR = nhat_topright / nmag_topright.reshape(grid.number_of_patches, 1)
n_TL = nhat_topleft / nmag_topleft.reshape(grid.number_of_patches, 1)
n_BL = nhat_bottomleft / nmag_bottomleft.reshape(grid.number_of_patches, 1)
n_BR = nhat_bottomright / nmag_bottomright.reshape(grid.number_of_patches, 1)
return (n_TR, n_TL, n_BL, n_BR)
def calc_slope_at_patch(
grid,
elevs="topographic__elevation",
ignore_closed_nodes=True,
subtriangle_unit_normals=None,
):
"""Calculate the slope (positive magnitude of gradient) at raster patches.
Returns the mean of the slopes of the four possible patch subtriangles.
If ignore_closed_nodes is True, closed nodes do not affect slope
calculations. If more than one closed node is present in a patch, the
patch slope is set to zero.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
ignore_closed_nodes : bool
If True, do not incorporate values at closed nodes into the calc.
subtriangle_unit_normals : tuple of 4 (npatches, 3) arrays (optional)
The unit normal vectors for the four subtriangles of each patch,
if already known. Order is TR, TL, BL, BR.
Returns
-------
slopes_at_patch : n_patches-long array
The slope (positive gradient magnitude) of each patch, in radians.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> z = mg.node_x
>>> S = mg.calc_slope_at_patch(elevs=z)
>>> S.size == mg.number_of_patches
True
>>> np.allclose(S, np.pi/4.)
True
>>> z = mg.node_y**2
>>> mg.calc_slope_at_patch(elevs=z).reshape((3, 4))
array([[ 0.78539816, 0.78539816, 0.78539816, 0.78539816],
[ 1.24904577, 1.24904577, 1.24904577, 1.24904577],
[ 1.37340077, 1.37340077, 1.37340077, 1.37340077]])
>>> z = mg.node_x.copy()
>>> mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> mg.status_at_node[11] = mg.BC_NODE_IS_CLOSED
>>> mg.status_at_node[9] = mg.BC_NODE_IS_FIXED_VALUE
>>> z[11] = 100. # this should get ignored now
>>> z[9] = 2. # this should be felt by patch 7 only
>>> mg.calc_slope_at_patch(elevs=z, ignore_closed_nodes=True).reshape(
... (3, 4)) * 4./np.pi
array([[ 0., 0., 0., 0.],
[ 0., 1., 1., 1.],
[ 0., 0., 0., 0.]])
LLCATS: PINF GRAD
"""
if subtriangle_unit_normals is not None:
assert len(subtriangle_unit_normals) == 4
assert subtriangle_unit_normals[0].shape[1] == 3
assert subtriangle_unit_normals[1].shape[1] == 3
assert subtriangle_unit_normals[2].shape[1] == 3
assert subtriangle_unit_normals[3].shape[1] == 3
n_TR, n_TL, n_BL, n_BR = subtriangle_unit_normals
else:
n_TR, n_TL, n_BL, n_BR = grid.calc_unit_normals_at_patch_subtriangles(elevs)
dotprod_TL = n_TL[:, 2] # by definition
dotprod_BR = n_BR[:, 2]
dotprod_TR = n_TR[:, 2]
dotprod_BL = n_BL[:, 2]
slopes_at_patch_TL = np.arccos(dotprod_TL) # 1 node order
slopes_at_patch_BR = np.arccos(dotprod_BR) # 3
slopes_at_patch_TR = np.arccos(dotprod_TR) # 0
slopes_at_patch_BL = np.arccos(dotprod_BL) # 2
if ignore_closed_nodes:
badnodes = grid.status_at_node[grid.nodes_at_patch] == grid.BC_NODE_IS_CLOSED
tot_bad = badnodes.sum(axis=1)
tot_tris = 4.0 - 3.0 * (tot_bad > 0) # 4 where all good, 1 where not
# now shut down the bad tris. Remember, one bad node => 3 bad tris.
# anywhere where badnodes > 1 will have zero from summing, so div by 1
# assert np.all(np.logical_or(np.isclose(tot_tris, 4.),
# np.isclose(tot_tris, 1.)))
corners_rot = deque(
[
slopes_at_patch_BR,
slopes_at_patch_TR,
slopes_at_patch_TL,
slopes_at_patch_BL,
]
)
# note initial offset so we are centered around TR on first slice
for i in range(4):
for j in range(3):
(corners_rot[j])[badnodes[:, i]] = 0.0
corners_rot.rotate(-1)
else:
tot_tris = 4.0
mean_slope_at_patch = (
slopes_at_patch_TR
+ slopes_at_patch_TL
+ slopes_at_patch_BL
+ slopes_at_patch_BR
) / tot_tris
return mean_slope_at_patch
def calc_grad_at_patch(
grid,
elevs="topographic__elevation",
ignore_closed_nodes=True,
subtriangle_unit_normals=None,
slope_magnitude=None,
):
"""Calculate the components of the gradient of each raster patch.
Returns the mean gradient of the four possible patch subtriangles,
in radians.
If ignore_closed_nodes is True, closed nodes do not affect gradient
calculations. If more than one closed node is present in a patch, the
patch gradients in both x and y directions are set to zero.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
ignore_closed_nodes : bool
If True, do not incorporate values at closed nodes into the calc.
subtriangle_unit_normals : tuple of 4 (npatches, 3) arrays (optional)
The unit normal vectors for the four subtriangles of each patch,
if already known. Order is TR, TL, BL, BR.
slope_magnitude : array with size num_patches (optional)
The mean slope of each patch, if already known. Units must be the
same as provided here!
Returns
-------
gradient_tuple : (x_component_at_patch, y_component_at_patch)
Len-2 tuple of arrays giving components of gradient in the x and y
directions, in the units of *radians*.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> z = mg.node_y
>>> (x_grad, y_grad) = mg.calc_grad_at_patch(elevs=z)
>>> np.allclose(y_grad, np.pi/4.)
True
>>> np.allclose(x_grad, 0.)
True
>>> z = mg.node_x.copy()
>>> mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> mg.status_at_node[11] = mg.BC_NODE_IS_CLOSED
>>> mg.status_at_node[[9, 2]] = mg.BC_NODE_IS_FIXED_VALUE
>>> z[11] = 100. # this should get ignored now
>>> z[9] = 2. # this should be felt by patch 7 only
>>> z[2] = 1. # should be felt by patches 1 and 2
>>> xgrad, ygrad = mg.calc_grad_at_patch(
... elevs=z, ignore_closed_nodes=True)
>>> (xgrad.reshape((3, 4)) * 4./np.pi)[1, 1:]
array([ 1., 1., -1.])
>>> np.allclose(ygrad[1:3], xgrad[1:3])
True
LLCATS: PINF GRAD
"""
if subtriangle_unit_normals is not None:
assert len(subtriangle_unit_normals) == 4
assert subtriangle_unit_normals[0].shape[1] == 3
assert subtriangle_unit_normals[1].shape[1] == 3
assert subtriangle_unit_normals[2].shape[1] == 3
assert subtriangle_unit_normals[3].shape[1] == 3
n_TR, n_TL, n_BL, n_BR = subtriangle_unit_normals
else:
n_TR, n_TL, n_BL, n_BR = grid.calc_unit_normals_at_patch_subtriangles(elevs)
if slope_magnitude is not None:
assert slope_magnitude.size == grid.number_of_patches
slopes_at_patch = slope_magnitude
else:
slopes_at_patch = grid.calc_slope_at_patch(
elevs=elevs,
ignore_closed_nodes=ignore_closed_nodes,
subtriangle_unit_normals=(n_TR, n_TL, n_BL, n_BR),
)
if ignore_closed_nodes:
badnodes = grid.status_at_node[grid.nodes_at_patch] == grid.BC_NODE_IS_CLOSED
corners_rot = deque([n_BR, n_TR, n_TL, n_BL])
# note initial offset so we are centered around TR on first slice
for i in range(4):
for j in range(3):
(corners_rot[j])[badnodes[:, i], :] = 0.0
corners_rot.rotate(-1)
n_sum_x = n_TR[:, 0] + n_TL[:, 0] + n_BL[:, 0] + n_BR[:, 0]
n_sum_y = n_TR[:, 1] + n_TL[:, 1] + n_BL[:, 1] + n_BR[:, 1]
theta_sum = np.arctan2(-n_sum_y, -n_sum_x)
x_slope_patches = np.cos(theta_sum) * slopes_at_patch
y_slope_patches = np.sin(theta_sum) * slopes_at_patch
return (x_slope_patches, y_slope_patches)
def calc_slope_at_node(
grid,
elevs="topographic__elevation",
method="patch_mean",
ignore_closed_nodes=True,
return_components=False,
):
"""Array of slopes at nodes, averaged over neighboring patches.
Produces a value for node slope (i.e., mean gradient magnitude)
at each node in a manner analogous to a GIS-style slope map.
If method=='patch_mean', it averages the gradient on each of the
patches surrounding the node; if method=='Horn', it returns the
resolved slope direction. Directional information can still be
returned through use of the return_components keyword.
All values are returned in radians, including the components;
take the tan to recover the rise/run.
Note that under these definitions, it is not always true that::
mag, cmp = mg.calc_slope_at_node(z)
mag**2 == cmp[0]**2 + cmp[1]**2 # only if method=='Horn'
If ignore_closed_nodes is False, all proximal elevation values will be used
in the calculation. If True, only unclosed nodes are used.
This is a verion of this code specialized for a raster. It subdivides
the four square patches around each node into subtriangles,
in order to ensure more correct solutions that incorporate equally
weighted information from all surrounding nodes on rough surfaces.
Parameters
----------
elevs : str or ndarray, optional
Field name or array of node values.
method : {'patch_mean', 'Horn'}
Controls the slope algorithm. Current options are 'patch_mean',
which takes the mean slope of each pf the four neighboring
square patches, and 'Horn', which is the standard ArcGIS slope
algorithm. These produce very similar solutions; the Horn method
gives a vector mean and the patch_mean gives a scalar mean.
ignore_closed_nodes : bool
If True, do not incorporate values at closed nodes into the calc.
return_components : bool
If True, return a tuple, (array_of_magnitude,
(array_of_slope_x_radians, array_of_slope_y_radians)).
If false, return an array of floats of the slope magnitude.
Returns
-------
float array or length-2 tuple of float arrays
If return_components, returns (array_of_magnitude,
(array_of_slope_x_radians, array_of_slope_y_radians)).
If not return_components, returns an array of slope magnitudes.
Examples
--------
>>> import numpy as np
>>> from landlab import RadialModelGrid, RasterModelGrid
>>> mg = RasterModelGrid((5, 5))
>>> z = mg.node_x
>>> slopes = mg.calc_slope_at_node(elevs=z)
>>> np.allclose(slopes, np.pi / 4.)
True
>>> mg = RasterModelGrid((4, 5), xy_spacing=2.)
>>> z = - mg.node_y
>>> slope_mag, cmp = mg.calc_slope_at_node(elevs=z,
... return_components=True)
>>> np.allclose(slope_mag, np.pi / 4.)
True
>>> np.allclose(cmp[0], 0.)
True
>>> np.allclose(cmp[1], - np.pi / 4.)
True
>>> mg = RasterModelGrid((4, 4))
>>> z = mg.node_x ** 2 + mg.node_y ** 2
>>> slopes, cmp = mg.calc_slope_at_node(z, return_components=True)
>>> slopes
array([ 0.95531662, 1.10991779, 1.32082849, 1.37713803, 1.10991779,
1.20591837, 1.3454815 , 1.38904403, 1.32082849, 1.3454815 ,
1.39288142, 1.41562833, 1.37713803, 1.38904403, 1.41562833,
1.43030663])
>>> np.allclose(cmp[0].reshape((4, 4))[:, 0],
... cmp[1].reshape((4, 4))[0, :]) # test radial symmetry
True
LLCATS: NINF GRAD SURF
"""
if method not in ("patch_mean", "Horn"):
raise ValueError("method name not understood")
try:
patches_at_node = grid.patches_at_node()
except TypeError: # was a property, not a fn (=> new style)
if not ignore_closed_nodes:
patches_at_node = np.ma.masked_where(
grid.patches_at_node == -1, grid.patches_at_node, copy=False
)
else:
patches_at_node = np.ma.masked_where(
np.logical_not(grid.patches_present_at_node),
grid.patches_at_node,
copy=False,
)
# now, we also want to mask any "closed" patches (any node closed)
closed_patches = (
grid.status_at_node[grid.nodes_at_patch] == grid.BC_NODE_IS_CLOSED
).sum(axis=1) > 0
closed_patch_mask = np.logical_or(
patches_at_node.mask, closed_patches[patches_at_node.data]
)
if method == "patch_mean":
n_TR, n_TL, n_BL, n_BR = grid.calc_unit_normals_at_patch_subtriangles(elevs)
mean_slope_at_patches = grid.calc_slope_at_patch(
elevs=elevs,
ignore_closed_nodes=ignore_closed_nodes,
subtriangle_unit_normals=(n_TR, n_TL, n_BL, n_BR),
)
# now CAREFUL - patches_at_node is MASKED
slopes_at_node_unmasked = mean_slope_at_patches[patches_at_node]
slopes_at_node_masked = np.ma.array(
slopes_at_node_unmasked, mask=closed_patch_mask
)
slope_mag = np.mean(slopes_at_node_masked, axis=1).data
if return_components:
(x_slope_patches, y_slope_patches) = grid.calc_grad_at_patch(
elevs=elevs,
ignore_closed_nodes=ignore_closed_nodes,
subtriangle_unit_normals=(n_TR, n_TL, n_BL, n_BR),
slope_magnitude=mean_slope_at_patches,
)
x_slope_unmasked = x_slope_patches[patches_at_node]
x_slope_masked = np.ma.array(x_slope_unmasked, mask=closed_patch_mask)
x_slope = np.mean(x_slope_masked, axis=1).data
y_slope_unmasked = y_slope_patches[patches_at_node]
y_slope_masked = np.ma.array(y_slope_unmasked, mask=closed_patch_mask)
y_slope = np.mean(y_slope_masked, axis=1).data
mean_grad_x = x_slope
mean_grad_y = y_slope
elif method == "Horn":
z = np.empty(grid.number_of_nodes + 1, dtype=float)
mean_grad_x = grid.empty(at="node", dtype=float)
mean_grad_y = grid.empty(at="node", dtype=float)
z[-1] = 0.0
try:
z[:-1] = grid.at_node[elevs]
except TypeError:
z[:-1] = elevs
# proof code for bad indexing:
diags = grid.diagonal_neighbors_at_node.copy() # LL order
orthos = grid.adjacent_nodes_at_node.copy()
# these have closed node neighbors...
for dirs in (diags, orthos):
dirs[dirs == grid.BAD_INDEX] = -1 # indexing to work
# now make an array like patches_at_node to store the interim calcs
patch_slopes_x = np.ma.zeros(patches_at_node.shape, dtype=float)
patch_slopes_y = np.ma.zeros(patches_at_node.shape, dtype=float)
diff_E = z[orthos[:, 0]] - z[:-1]
diff_W = z[:-1] - z[orthos[:, 2]]
diff_N = z[orthos[:, 1]] - z[:-1]
diff_S = z[:-1] - z[orthos[:, 3]]
patch_slopes_x[:, 0] = z[diags[:, 0]] - z[orthos[:, 1]] + diff_E
patch_slopes_x[:, 1] = z[orthos[:, 1]] - z[diags[:, 1]] + diff_W
patch_slopes_x[:, 2] = z[orthos[:, 3]] - z[diags[:, 2]] + diff_W
patch_slopes_x[:, 3] = z[diags[:, 3]] - z[orthos[:, 3]] + diff_E
patch_slopes_y[:, 0] = z[diags[:, 0]] - z[orthos[:, 0]] + diff_N
patch_slopes_y[:, 1] = z[diags[:, 1]] - z[orthos[:, 2]] + diff_N
patch_slopes_y[:, 2] = z[orthos[:, 2]] - z[diags[:, 2]] + diff_S
patch_slopes_y[:, 3] = z[orthos[:, 0]] - z[diags[:, 3]] + diff_S
patch_slopes_x /= 2.0 * grid.dx
patch_slopes_y /= 2.0 * grid.dy
patch_slopes_x.mask = closed_patch_mask
patch_slopes_y.mask = closed_patch_mask
mean_grad_x = patch_slopes_x.mean(axis=1).data
mean_grad_y = patch_slopes_y.mean(axis=1).data
slope_mag = np.arctan(np.sqrt(np.square(mean_grad_x) + np.square(mean_grad_y)))
if return_components:
mean_grad_x = np.arctan(mean_grad_x)
mean_grad_y = np.arctan(mean_grad_y)
if return_components:
return slope_mag, (mean_grad_x, mean_grad_y)
else:
return slope_mag
| mit |
SkeltonThatcher/rancher-buildeng-catalog | integration/core/test_catalog.py | 1 | 3164 | import pytest
import cattle
import subprocess
import sys
import os
import re
# import yaml
def _base():
return os.path.dirname(__file__)
def _file(f):
return os.path.join(_base(), '../../{}'.format(f))
class CatalogService(object):
def __init__(self, catalog_bin):
self.catalog_bin = catalog_bin
def assert_retcode(self, ret_code, *args):
p = self.call(*args)
r_code = p.wait()
assert r_code == ret_code
def call(self, *args, **kw):
cmd = [self.catalog_bin]
cmd.extend(args)
kw_args = {
'stdin': subprocess.PIPE,
'stdout': sys.stdout,
'stderr': sys.stderr,
'cwd': _base(),
}
kw_args.update(kw)
return subprocess.Popen(cmd, **kw_args)
@pytest.fixture(scope='session')
def catalog_bin():
c = '/usr/bin/rancher-catalog-service'
assert os.path.exists(c)
return c
@pytest.fixture(scope='session')
def catalog_service(catalog_bin):
return CatalogService(catalog_bin)
@pytest.fixture(scope='session')
def client():
url = 'http://localhost:8088/v1-catalog/schemas'
return cattle.from_env(url=url)
@pytest.fixture(scope='session')
def templates(client):
templates = client.list_template()
assert len(templates) > 0
return templates
@pytest.fixture(scope='session')
def requests():
return requests.Session()
@pytest.fixture(scope='session')
def template_details(client, templates):
for template in templates:
template.versionDetails = {}
for version, link in template.versionLinks.iteritems():
template.versionDetails[version] = client._get(link)
return templates
def test_validate_exits_normal(catalog_service):
catalog_service.assert_retcode(
0, '-catalogUrl',
_file('./'),
'-validate', '-port', '18088')
def test_stack_name(templates):
hostname_label = re.compile(r'^[a-zA-Z0-9\-]{1,63}$')
for template in templates:
# stack_name must be a valid hostname label
assert hostname_label.match(template.id.split(':')[-1].split('*')[-1])
def test_maintainers(templates):
maintainer = re.compile(r'^([\S]+ ){2,5}<[^@]+@[^@]+\.[^@]+>$')
for template in templates:
# Maintainer will soon be a requirement
# assert template.maintainer
if template.maintainer:
assert maintainer.match(template.maintainer)
def test_versions(templates):
for template in templates:
# default version must be defined
assert template.defaultVersion
# template with default version must be defined
assert template.versionLinks[template.defaultVersion]
def test_template_questions(template_details):
for template in template_details:
for _, template in template.versionDetails.iteritems():
# there must exist a rancher-compose.yml file
assert template.files['rancher-compose.yml']
# rancherConfig = yaml.load(template.files['rancher-compose.yml'])
# there must exist at least one question
# assert len(rancherConfig['.catalog']['questions']) > 0
| mit |
jhoos/django | tests/proxy_models/tests.py | 260 | 16941 | from __future__ import unicode_literals
import datetime
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions, management
from django.core.urlresolvers import reverse
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug, ProxyImprovement,
ProxyProxyBug, ProxyTrackerUser, State, StateProxy, StatusPerson,
TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(
Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id + 1
)
self.assertRaises(
Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id + 1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
# don't register this model in the app_cache for the current app,
# otherwise the check fails when other tests are being run.
app_label = 'no_such_app'
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
hint=None,
obj=None,
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
def test_swappable(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['proxy_models'].models.copy()
try:
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
finally:
apps.app_configs['proxy_models'].models = _old_models
apps.all_models['proxy_models'] = _old_models
apps.clear_cache()
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(
name='Contributor', status='contrib')
with self.assertRaises(exceptions.FieldError):
TrackerUser.objects.filter(issue=None),
self.assertQuerysetEqual(
ProxyTrackerUser.objects.filter(issue=None),
[tu], lambda x: x
)
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='proxy_models.urls',)
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = AuthUser.objects.create(
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(7):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.login(username='super', password='secret')
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
self.client.logout()
| bsd-3-clause |
FatalTouch/BlogExample | handlers/signup.py | 1 | 2233 | import entities
from handlers import ViewHandler
from utility import validate
# Handler for our signup page
class SignupPage(ViewHandler):
# Get request handler
def get(self):
# If user is already logged in, send back to home page otherwise
# render the signup.html view
if not self.user:
self.render("signup.html")
else:
self.redirect('/')
# Post request handler
def post(self):
params = {}
has_error = False
username = self.request.get("username")
password = self.request.get("password")
verify = self.request.get("verify")
email = self.request.get("email")
# Set the username and email the to be same as they are received
params["username"] = username
params["email"] = email
# check if user name has any error
username_error = validate.is_valid_username(username)
if username_error:
params["error"] = username_error
has_error = True
# check if password has any error
password_error = validate.is_valid_password(password, verify)
if password_error:
params["error"] = password_error
has_error = True
# check if email has any error
email_error = validate.is_valid_email(email)
if email_error:
params["error"] = email_error
has_error = True
# if there are errors on the page render back the signup.html
# view and include the errors to be shown to the user
if has_error:
self.render("signup.html", **params)
else:
# If there are no errors then create a new user in the database
user = entities.User.create_user(username, password, email)
if user:
# Login the user and redirect them to welcome page
self.login(user, False)
self.redirect('/welcome')
else:
# Otherwise some db error occurred and we render the
# signup.html view again
params["error"] = "Unable to create user due to unknown error"
self.render("signup.html", **params)
| apache-2.0 |
crossroadchurch/paul | openlp/core/ui/themestab.py | 1 | 11371 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The Themes configuration tab
"""
from PyQt4 import QtCore, QtGui
from openlp.core.common import Registry, Settings, ThemeLevel, UiStrings, translate
from openlp.core.lib import SettingsTab
from openlp.core.lib.ui import find_and_set_in_combo_box
class ThemesTab(SettingsTab):
"""
ThemesTab is the theme settings tab in the settings dialog.
"""
def __init__(self, parent):
"""
Constructor
"""
self.icon_path = ':/themes/theme_new.png'
theme_translated = translate('OpenLP.ThemesTab', 'Themes')
super(ThemesTab, self).__init__(parent, 'Themes', theme_translated)
def setupUi(self):
"""
Set up the UI
"""
self.setObjectName('ThemesTab')
super(ThemesTab, self).setupUi()
self.global_group_box = QtGui.QGroupBox(self.left_column)
self.global_group_box.setObjectName('global_group_box')
self.global_group_box_layout = QtGui.QVBoxLayout(self.global_group_box)
self.global_group_box_layout.setObjectName('global_group_box_layout')
self.default_combo_box = QtGui.QComboBox(self.global_group_box)
self.default_combo_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.default_combo_box.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.default_combo_box.setObjectName('default_combo_box')
self.global_group_box_layout.addWidget(self.default_combo_box)
self.default_list_view = QtGui.QLabel(self.global_group_box)
self.default_list_view.setObjectName('default_list_view')
self.global_group_box_layout.addWidget(self.default_list_view)
self.left_layout.addWidget(self.global_group_box)
self.universal_group_box = QtGui.QGroupBox(self.left_column)
self.universal_group_box.setObjectName('universal_group_box')
self.universal_group_box_layout = QtGui.QVBoxLayout(self.universal_group_box)
self.universal_group_box_layout.setObjectName('universal_group_box_layout')
self.wrap_footer_check_box = QtGui.QCheckBox(self.universal_group_box)
self.wrap_footer_check_box.setObjectName('wrap_footer_check_box')
self.universal_group_box_layout.addWidget(self.wrap_footer_check_box)
self.left_layout.addWidget(self.universal_group_box)
self.left_layout.addStretch()
self.level_group_box = QtGui.QGroupBox(self.right_column)
self.level_group_box.setObjectName('level_group_box')
self.level_layout = QtGui.QFormLayout(self.level_group_box)
self.level_layout.setLabelAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
self.level_layout.setFormAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
self.level_layout.setObjectName('level_layout')
self.song_level_radio_button = QtGui.QRadioButton(self.level_group_box)
self.song_level_radio_button.setObjectName('song_level_radio_button')
self.song_level_label = QtGui.QLabel(self.level_group_box)
self.song_level_label.setObjectName('song_level_label')
self.level_layout.addRow(self.song_level_radio_button, self.song_level_label)
self.service_level_radio_button = QtGui.QRadioButton(self.level_group_box)
self.service_level_radio_button.setObjectName('service_level_radio_button')
self.service_level_label = QtGui.QLabel(self.level_group_box)
self.service_level_label.setObjectName('service_level_label')
self.level_layout.addRow(self.service_level_radio_button, self.service_level_label)
self.global_level_radio_button = QtGui.QRadioButton(self.level_group_box)
self.global_level_radio_button.setObjectName('global_level_radio_button')
self.global_level_label = QtGui.QLabel(self.level_group_box)
self.global_level_label.setObjectName('global_level_label')
self.level_layout.addRow(self.global_level_radio_button, self.global_level_label)
label_top_margin = (self.song_level_radio_button.sizeHint().height() -
self.song_level_label.sizeHint().height()) // 2
for label in [self.song_level_label, self.service_level_label, self.global_level_label]:
rect = label.rect()
rect.setTop(rect.top() + label_top_margin)
label.setFrameRect(rect)
label.setWordWrap(True)
self.right_layout.addWidget(self.level_group_box)
self.right_layout.addStretch()
self.song_level_radio_button.clicked.connect(self.on_song_level_button_clicked)
self.service_level_radio_button.clicked.connect(self.on_service_level_button_clicked)
self.global_level_radio_button.clicked.connect(self.on_global_level_button_clicked)
self.default_combo_box.activated.connect(self.on_default_combo_box_changed)
Registry().register_function('theme_update_list', self.update_theme_list)
def retranslateUi(self):
"""
Translate the UI on the fly
"""
self.tab_title_visible = UiStrings().Themes
self.global_group_box.setTitle(translate('OpenLP.ThemesTab', 'Global Theme'))
self.universal_group_box.setTitle(translate('OpenLP.ThemesTab', 'Universal Settings'))
self.wrap_footer_check_box.setText(translate('OpenLP.ThemesTab', '&Wrap footer text'))
self.level_group_box.setTitle(translate('OpenLP.ThemesTab', 'Theme Level'))
self.song_level_radio_button.setText(translate('OpenLP.ThemesTab', 'S&ong Level'))
self.song_level_label.setText(
translate('OpenLP.ThemesTab', 'Use the theme from each song in the database. If a song doesn\'t have a '
'theme associated with it, then use the service\'s theme. If the service '
'doesn\'t have a theme, then use the global theme.'))
self.service_level_radio_button.setText(translate('OpenLP.ThemesTab', '&Service Level'))
self.service_level_label.setText(
translate('OpenLP.ThemesTab', 'Use the theme from the service, overriding any of the individual '
'songs\' themes. If the service doesn\'t have a theme, then use the global '
'theme.'))
self.global_level_radio_button.setText(translate('OpenLP.ThemesTab', '&Global Level'))
self.global_level_label.setText(translate('OpenLP.ThemesTab', 'Use the global theme, overriding any themes '
'associated with either the service or the '
'songs.'))
def load(self):
"""
Load the theme settings into the tab
"""
settings = Settings()
settings.beginGroup(self.settings_section)
self.theme_level = settings.value('theme level')
self.global_theme = settings.value('global theme')
self.wrap_footer_check_box.setChecked(settings.value('wrap footer'))
settings.endGroup()
if self.theme_level == ThemeLevel.Global:
self.global_level_radio_button.setChecked(True)
elif self.theme_level == ThemeLevel.Service:
self.service_level_radio_button.setChecked(True)
else:
self.song_level_radio_button.setChecked(True)
def save(self):
"""
Save the settings
"""
settings = Settings()
settings.beginGroup(self.settings_section)
settings.setValue('theme level', self.theme_level)
settings.setValue('global theme', self.global_theme)
settings.setValue('wrap footer', self.wrap_footer_check_box.isChecked())
settings.endGroup()
self.renderer.set_theme_level(self.theme_level)
if self.tab_visited:
self.settings_form.register_post_process('theme_update_global')
self.tab_visited = False
def on_song_level_button_clicked(self):
"""
Set the theme level
"""
self.theme_level = ThemeLevel.Song
def on_service_level_button_clicked(self):
"""
Set the theme level
"""
self.theme_level = ThemeLevel.Service
def on_global_level_button_clicked(self):
"""
Set the theme level
"""
self.theme_level = ThemeLevel.Global
def on_default_combo_box_changed(self, value):
"""
Set the global default theme
"""
self.global_theme = self.default_combo_box.currentText()
self.renderer.set_global_theme()
self._preview_global_theme()
def update_theme_list(self, theme_list):
"""
Called from ThemeManager when the Themes have changed.
:param theme_list: The list of available themes::
['Bible Theme', 'Song Theme']
"""
# Reload as may have been triggered by the ThemeManager.
self.global_theme = Settings().value(self.settings_section + '/global theme')
self.default_combo_box.clear()
self.default_combo_box.addItems(theme_list)
find_and_set_in_combo_box(self.default_combo_box, self.global_theme)
self.renderer.set_global_theme()
self.renderer.set_theme_level(self.theme_level)
if self.global_theme is not '':
self._preview_global_theme()
def _preview_global_theme(self):
"""
Utility method to update the global theme preview image.
"""
image = self.theme_manager.get_preview_image(self.global_theme)
preview = QtGui.QPixmap(str(image))
if not preview.isNull():
preview = preview.scaled(300, 255, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
self.default_list_view.setPixmap(preview)
| gpl-2.0 |
garbled1/ansible | lib/ansible/modules/cloud/ovirt/ovirt_networks_facts.py | 73 | 3447 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks_facts
short_description: Retrieve facts about one or more oVirt/RHV networks
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV networks."
notes:
- "This module creates a new top-level C(ovirt_networks) fact, which
contains a list of networks."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search network starting with string vlan1 use: name=vlan1*"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all networks which names start with C(vlan1):
- ovirt_networks_facts:
pattern: name=vlan1*
- debug:
var: ovirt_networks
'''
RETURN = '''
ovirt_networks:
description: "List of dictionaries describing the networks. Network attribues are mapped to dictionary keys,
all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
networks_service = connection.system_service().networks_service()
networks = networks_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_networks=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in networks
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
yashtrivedi96/coala-bears | docs/conf.py | 34 | 9336 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# coala documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 3 16:49:01 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# Import for version information
from bears.Constants import VERSION
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'coala-bears'
copyright = '2016, The coala Developers'
author = 'The coala Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'coala-bearsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'coala-bears.tex', 'coala-bears Documentation',
'The coala-bears Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'coala-bears', 'coala-bears Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'coala-bears', 'coala-bears Documentation',
author, 'coala-bears', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| agpl-3.0 |
nistormihai/superdesk-core | apps/publish/enqueue/enqueue_killed.py | 3 | 2619 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import config
from superdesk import get_resource_service
from superdesk.metadata.item import CONTENT_STATE
from apps.publish.enqueue.enqueue_service import EnqueueService
logger = logging.getLogger(__name__)
class EnqueueKilledService(EnqueueService):
publish_type = 'kill'
published_state = 'killed'
def get_subscribers(self, doc, target_media_type):
"""Get the subscribers for this document based on the target_media_type for kill.
Kill is sent to all subscribers that have received the item previously (published or corrected)
:param doc: Document to kill
:param target_media_type: Valid values are - Wire, Digital.
:return: (list, dict, dict) List of filtered subscribers, product codes per subscriber,
associations per subscriber
"""
query = {'$and': [{'item_id': doc['item_id']},
{'publishing_action': {'$in': [CONTENT_STATE.PUBLISHED, CONTENT_STATE.CORRECTED]}}]}
subscribers, subscriber_codes, associations = self._get_subscribers_for_previously_sent_items(query)
return subscribers, subscriber_codes, associations
def enqueue_archived_kill_item(self, item, transmission_details):
"""Enqueue items that are killed from dusty archive.
:param dict item: item from the archived collection.
:param list transmission_details: list of legal publish queue entries
"""
subscriber_ids = [transmission_record['_subscriber_id'] for transmission_record in transmission_details]
api_subscribers = {t['_subscriber_id'] for t in transmission_details if
t.get('destination', {}).get('delivery_type') == 'content_api'}
query = {'$and': [{config.ID_FIELD: {'$in': subscriber_ids}}]}
subscribers = list(get_resource_service('subscribers').get(req=None, lookup=query))
for subscriber in subscribers:
subscriber['api_enabled'] = subscriber.get(config.ID_FIELD) in api_subscribers
self.queue_transmission(item, subscribers)
logger.info('Queued Transmission for article: {}'.format(item[config.ID_FIELD]))
self.publish_content_api(item, [subscriber for subscriber in subscribers if subscriber['api_enabled']])
| agpl-3.0 |
tykayn/card-deck | node_old/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 426 | 120645 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| apache-2.0 |
isrohutamahopetechnik/MissionPlanner | Lib/encodings/koi8_u.py | 93 | 14325 | """ Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
jehoffmann/l4linux | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
mrGeen/eden | modules/s3/pyvttbl/data_restructurer.py | 11 | 3415 | from __future__ import print_function
# Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
# Python 2 to 3 workarounds
import sys
if sys.version_info[0] == 2:
_xrange = xrange
elif sys.version_info[0] == 3:
_xrange = range
import csv
from pyvttbl import DataFrame, _xunique_combinations
def long2wide(in_fname, id, dvs, between=[], within=[],
covariates=[], out_fname=None, nested=True):
# load in_fname into a PyvtTbl object
print('reading "%s"...'%in_fname)
cls = DataFrame()
cls.read_tbl(in_fname)
# loop through DVs and append within columns
d = [sorted(set(cls[id]))]
header = [id] + covariates + between
for col in covariates+between:
z = cls.pivot(col, cols=[id], aggregate='arbitrary')
d.extend(list(z))
# start controls whether nested factors are examined
if nested : start = 1
else : start = len(within)
for i, dv in enumerate(dvs):
print('\ncollaborating %s'%dv)
for j in _xrange(start, len(within)+1):
for factors in _xunique_combinations(within, j):
print(' pivoting', factors, '...')
z = cls.pivot(dv, rows=factors, cols=[id],
aggregate='avg')
d.extend(list(z))
# process headers
for names in z.rnames:
h = '_'.join(('%s.%s'%(f, str(c)) for (f,c) in names))
header.append('%s__%s'%(dv, h))
# Now we can write the data
if out_fname == None:
out_fname = 'wide_data.csv'
with open(out_fname,'wb') as f:
wtr = csv.writer(f)
wtr.writerow([n.upper() for n in header])
wtr.writerows(zip(*d)) # transpose and write
##long2wide(in_fname='long_test_data.csv',
## id='participant',
## dvs=['dv1','dv2'],
## between=['bfactor1'],
## within=['wfactor1','wfactor2','wfactor3'],
## covariates=['cov1','cov2'],
## out_fname='formatted.csv',
## nested=False)
##import time
##
##t0=time.time()
##print('need to format data for spss... (this may take a few minutes)')
##
##fname='collaborated.csv'
##
##covariates='age,gender,dicho_correct,dicho_misses,dicho_FA,SAAT_noncomp_correct,'\
## 'SAAT_noncomp_incorrect,SAAT_comp_correct,SAAT_comp_incorrect'.split(',')
##
##within='speed,target_dir,agreement'.split(',')
##
##dvs='correct_decision_raw,decision_at_safe_distance_raw,decision_distance_raw,'\
## 'decision_latency_raw,decision_proportion_raw,decision_ttc_proportion_raw,'\
## 'decision_ttc_raw,detection_distance_raw,detection_latency_raw,'\
## 'detection_proportion_raw,detection_ttc_proportion_raw,detection_ttc_raw,'\
## 'position_distance_raw,position_latency_raw,risk_level_raw,trial_raw'.split(',')
##
####long2wide(fname, 'participant',dvs=dvs,within=within,covariates=covariates,nested=False)
##
##long2wide(in_fname=fname,
## id='participant',
## dvs=dvs,
## between=[],
## within=within,
## covariates=covariates,
## out_fname='formatted.csv',
## nested=True)
##
##print('\ndone.')
##print(time.time()-t0)
| mit |
camilonova/django | setup.py | 37 | 3287 | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name='Django',
version=version,
url='https://www.djangoproject.com/',
author='Django Software Foundation',
author_email='foundation@djangoproject.com',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
install_requires=['pytz'],
extras_require={
"bcrypt": ["bcrypt"],
"argon2": ["argon2-cffi >= 16.1.0"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
| bsd-3-clause |
spaceof7/QGIS | python/plugins/processing/algs/qgis/ZonalStatistics.py | 8 | 6214 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ZonalStatistics.py
---------------------
Date : September 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from collections import OrderedDict
from qgis.PyQt.QtGui import QIcon
from qgis.analysis import QgsZonalStatistics
from qgis.core import (QgsProcessing,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterString,
QgsProcessingParameterBand,
QgsProcessingParameterEnum,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ZonalStatistics(QgisAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
RASTER_BAND = 'RASTER_BAND'
INPUT_VECTOR = 'INPUT_VECTOR'
COLUMN_PREFIX = 'COLUMN_PREFIX'
STATISTICS = 'STATS'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'zonalstats.png'))
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.STATS = OrderedDict([(self.tr('Count'), QgsZonalStatistics.Count),
(self.tr('Sum'), QgsZonalStatistics.Sum),
(self.tr('Mean'), QgsZonalStatistics.Mean),
(self.tr('Median'), QgsZonalStatistics.Median),
(self.tr('Std. dev.'), QgsZonalStatistics.StDev),
(self.tr('Min'), QgsZonalStatistics.Min),
(self.tr('Max'), QgsZonalStatistics.Max),
(self.tr('Range'), QgsZonalStatistics.Range),
(self.tr('Minority'), QgsZonalStatistics.Minority),
(self.tr('Majority (mode)'), QgsZonalStatistics.Majority),
(self.tr('Variety'), QgsZonalStatistics.Variety),
(self.tr('Variance'), QgsZonalStatistics.Variance),
(self.tr('All'), QgsZonalStatistics.All)])
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT_RASTER,
self.tr('Raster layer')))
self.addParameter(QgsProcessingParameterBand(self.RASTER_BAND,
self.tr('Raster band'),
1,
self.INPUT_RASTER))
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT_VECTOR,
self.tr('Vector layer containing zones'),
[QgsProcessing.TypeVectorPolygon]))
self.addParameter(QgsProcessingParameterString(self.COLUMN_PREFIX,
self.tr('Output column prefix'), '_'))
keys = list(self.STATS.keys())
self.addParameter(QgsProcessingParameterEnum(self.STATISTICS,
self.tr('Statistics to calculate'),
keys,
allowMultiple=True, defaultValue=[0, 1, 2]))
self.addOutput(QgsProcessingOutputVectorLayer(self.INPUT_VECTOR,
self.tr('Zonal statistics'),
QgsProcessing.TypeVectorPolygon))
self.bandNumber = None
self.columnPrefix = None
self.selectedStats = None
self.vectorLayer = None
self.rasterLayer = None
def name(self):
return 'zonalstatistics'
def displayName(self):
return self.tr('Zonal statistics')
def prepareAlgorithm(self, parameters, context, feedback):
self.bandNumber = self.parameterAsInt(parameters, self.RASTER_BAND, context)
self.columnPrefix = self.parameterAsString(parameters, self.COLUMN_PREFIX, context)
st = self.parameterAsEnums(parameters, self.STATISTICS, context)
keys = list(self.STATS.keys())
self.selectedStats = 0
for i in st:
self.selectedStats |= self.STATS[keys[i]]
self.vectorLayer = self.parameterAsVectorLayer(parameters, self.INPUT_VECTOR, context)
self.rasterLayer = self.parameterAsRasterLayer(parameters, self.INPUT_RASTER, context)
return True
def processAlgorithm(self, parameters, context, feedback):
zs = QgsZonalStatistics(self.vectorLayer,
self.rasterLayer,
self.columnPrefix,
self.bandNumber,
QgsZonalStatistics.Statistics(self.selectedStats))
zs.calculateStatistics(feedback)
return {self.INPUT_VECTOR: self.vectorLayer}
| gpl-2.0 |
bmazin/ARCONS-pipeline | QEcal/QEfile.py | 1 | 3896 | '''
Author: Alex Walter
Date: 5-13-2013
Helper class for QECalibration.py
We do the actual QE calculation here
'''
from os.path import isfile
import numpy as np
import math
class QEfile():
def __init__(self, fn=None, ang=0):
if not isfile(fn):
raise fileNameError(fn)
self.filename=fn
self.angle=ang
self.loadData()
self.areaArcons = (222.0*10**-6)**2 #[m^2] effective area of arcons
#area of inductor * magnification of microlense = area of pixel (approximately)
self.areaDect = 1.0*10.0**-4. #[m^2] area of optical detector
self.areaIRDect = 1.0*math.pi*(1.5*10**-3)**2 #[m^2] area of IR detector (wavelength >= 1100
self.magnification = 1.2 # magnification with Palamar optics
def findQE(self, nArcons, wavelength):
#nArcons: average number of photons detected at arcons per second
#nDect: average number of photons detected at power meter per second
#wavelength in nanometers
nDect = self.data[np.where(self.data[:,0]==wavelength)[0][0],-7]
nDect*=10.0**7 #[photons/s]
# print 'Detected Count Rate: ' + str(nDect)
# print 'Arcons Count Rate: ' + str(nArcons)
if wavelength >= 1099.999:
dectArea=self.areaIRDect
else:
dectArea=self.areaDect
# print 'Detector Area: ' + str(dectArea)
return 1.0*nArcons/(self.magnification**2*self.areaArcons)/(nDect/(dectArea))
def loadData(self):
print 'loading data from '+self.filename
d=np.loadtxt(str(self.filename))
self.data=np.zeros((len(d),len(d[0])+6))
self.data[:,:-6]=d
self.determineTiming()
def saveParam(self):
fileparam = 'param'.join(str(self.filename).rsplit('txt',1))
np.savetxt(fileparam, self.data[:,-6:],fmt='%i', delimiter='\t')
print 'saved parameters: ' + str(fileparam)
def determineTiming(self):
#Can make a more sophisticated guess later
#Works well for 20130503POL2.txt, obs_20130503-234952.h5
#load data from param file
fileparam = 'param'.join(str(self.filename).rsplit('txt',1))
if isfile(fileparam):
param = np.loadtxt(str(fileparam))
self.data[:,-6:] = param
print 'loaded parameters: ' + str(fileparam)
else:
startTime=125
widthTime=15
troughTime=70
start2Num=15
start2Time=1465
for i in range(len(self.data)):
f1=startTime
j=i
if i>=(start2Num-1):
f1=start2Time
j=i-(start2Num-1)
f1+=j*(widthTime+troughTime)
f2=f1+widthTime
t1=(3*f1-f2-troughTime)/2.0
t2=(f2+f1-troughTime)/2.0
t3=(f1+f2+troughTime)/2.0
t4=(3*f2-f1+troughTime)/2.0
self.data[i,-6]=round(t1)
self.data[i,-5]=round(t2)
self.data[i,-4]=round(f1)
self.data[i,-3]=round(f2)
self.data[i,-2]=round(t3)
self.data[i,-1]=round(t4)
self.saveParam()
# def getData(self):
# return self.data
# def setData(self, x=-1, y=-1, value=0):
# print 'old value: ' +str(self.data[x,y])
# print 'new value: '+str(value)
# self.data[x,y] = value
# def getLength(self):
# return len(self.data)
# def getAngle(self):
# return self.angle
class fileNameError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) + ' is not a file'
| gpl-2.0 |
Amitgb14/sos | sos/plugins/insights.py | 6 | 1569 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
class RedHatAccessInsights(Plugin, RedHatPlugin):
'''Collect config and log for Red Hat Access Insights
'''
plugin_name = 'insights'
packages = ['redhat-access-insights']
profiles = ('system', 'sysmgmt')
conf_file = '/etc/redhat-access-insights/redhat-access-insights.conf'
def setup(self):
log_size = self.get_option('log_size')
self.add_copy_spec(self.conf_file)
self.add_copy_spec_limit('/var/log/redhat-access-insights/*.log',
sizelimit=log_size)
def postproc(self):
self.do_file_sub(
self.conf_file,
r'(password[\t\ ]*=[\t\ ]*)(.+)',
r'\1********'
)
self.do_file_sub(
self.conf_file,
r'(proxy[\t\ ]*=.*)(:)(.*)(@.*)',
r'\1\2********\4'
)
| gpl-2.0 |
finlandhuang/stardict-3 | tools/src/hanzim2dict.py | 44 | 3116 | #!/usr/bin/env python
#
# hanzim2dict
#
# Original version written by Michael Robinson (robinson@netrinsics.com)
# Version 0.0.2
# Copyright 2004
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Usage: Run hanzim2dict in a directory containing the "zidianf.gb",
# "cidianf.gb", and "sanzidianf.gb" files from the Hanzi Master distribution
# (available at http://zakros.ucsd.edu/~arobert/hanzim.html). The output
# will be a StarDict dictionary in 2.4.2 format: hanzim.dict, hanzim.idx,
# and hanzim.ifo
#
# The dictionary and index files may be compressed as follows:
# $ gzip -9 hanzim.idx
# $ dictzip hanzim.dict
#
from string import split
from codecs import getdecoder, getencoder
from struct import pack
class Word:
def __init__(self, code, definition):
self.code = code
self.definition = [definition]
def add(self, definition):
self.definition.append(definition)
wordmap = {}
fromGB = getdecoder("GB2312")
toUTF = getencoder("utf_8")
file = open("zidianf.gb", "r")
lines = map(lambda x: split(x[:-1], '\t'), file.readlines())
for line in lines:
code = toUTF(fromGB(line[0])[0])[0]
pinyin = line[2]
definition = '<'+pinyin+'> '+line[3]+' ['+line[1]+']'
if wordmap.has_key(code):
wordmap[code].add(definition)
else:
wordmap[code] = Word(code, definition)
for filename in ("cidianf.gb", "sanzicidianf.gb"):
file = open(filename, "r")
lines = map(lambda x: split(x[:-1], '\t'), file.readlines())
for line in lines:
if len(line) < 2:
print len(line)
continue
code = toUTF(fromGB(line[0][:-2])[0])[0]
definition = line[1]+' ['+line[0][-1:]+']'
if wordmap.has_key(code):
wordmap[code].add(definition)
else:
wordmap[code] = Word(code, definition)
dict = open("hanzim.dict", "wb")
idx = open("hanzim.idx", "wb")
ifo = open("hanzim.ifo", "wb")
offset = 0
count = 0
keylen = 0
keys = list(wordmap.keys())
keys.sort()
for key in keys:
word = wordmap[key]
deftext = ""
multi = False
for d in word.definition:
if multi:
deftext += '\n'
deftext += d
multi = True
dict.write(deftext)
idx.write(key+'\0')
idx.write(pack("!I", offset))
idx.write(pack("!I", len(deftext)))
offset += len(deftext)
count += 1
keylen += len(key)
dict.close()
idx.close()
ifo.write("StarDict's dict ifo file\n")
ifo.write("version=2.4.2\n")
ifo.write("bookname=Hanzi Master 1.3\n")
ifo.write("wordcount="+str(count)+"\n")
ifo.write("idxfilesize="+str(keylen+(count*9))+"\n")
ifo.write("author=Adrian Robert\n")
ifo.write("email=arobert@cogsci.ucsd.edu\n")
ifo.write("website=http://zakros.ucsd.edu/~arobert/hanzim.html\n")
ifo.write("sametypesequence=m\n")
ifo.close()
| gpl-3.0 |
drwyrm/Flexget | flexget/plugins/output/ftp_download.py | 7 | 7352 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse, unquote
import logging
import os
import ftplib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('ftp')
class OutputFtp(object):
"""
Ftp Download plugin
input-url: ftp://<user>:<password>@<host>:<port>/<path to file>
Example: ftp://anonymous:anon@my-ftp-server.com:21/torrent-files-dir
config:
ftp_download:
use-ssl: <True/False>
ftp_tmp_path: <path>
delete_origin: <True/False>
download_empty_dirs: <True/False>
TODO:
- Resume downloads
- create banlists files
- validate connection parameters
"""
schema = {
'type': 'object',
'properties': {
'use-ssl': {'type': 'boolean', 'default': False},
'ftp_tmp_path': {'type': 'string', 'format': 'path'},
'delete_origin': {'type': 'boolean', 'default': False},
'download_empty_dirs': {'type': 'boolean', 'default': False}
},
'additionalProperties': False
}
def prepare_config(self, config, task):
config.setdefault('use-ssl', False)
config.setdefault('delete_origin', False)
config.setdefault('ftp_tmp_path', os.path.join(task.manager.config_base, 'temp'))
config.setdefault('download_empty_dirs', False)
return config
def ftp_connect(self, config, ftp_url, current_path):
if config['use-ssl']:
ftp = ftplib.FTP_TLS()
else:
ftp = ftplib.FTP()
# ftp.set_debuglevel(2)
log.debug("Connecting to " + ftp_url.hostname)
ftp.connect(ftp_url.hostname, ftp_url.port)
ftp.login(ftp_url.username, ftp_url.password)
if config['use-ssl']:
ftp.prot_p()
ftp.sendcmd('TYPE I')
ftp.set_pasv(True)
log.debug("Changing directory to: " + current_path)
ftp.cwd(current_path)
return ftp
def check_connection(self, ftp, config, ftp_url, current_path):
try:
ftp.voidcmd("NOOP")
except (IOError, ftplib.Error):
ftp = self.ftp_connect(config, ftp_url, current_path)
return ftp
def on_task_download(self, task, config):
config = self.prepare_config(config, task)
for entry in task.accepted:
ftp_url = urlparse(entry.get('url'))
ftp_url = ftp_url._replace(path=unquote(ftp_url.path))
current_path = os.path.dirname(ftp_url.path)
try:
ftp = self.ftp_connect(config, ftp_url, current_path)
except ftplib.all_errors as e:
entry.fail("Unable to connect to server : %s" % (e))
break
if not os.path.isdir(config['ftp_tmp_path']):
log.debug('creating base path: %s' % config['ftp_tmp_path'])
os.mkdir(config['ftp_tmp_path'])
file_name = os.path.basename(ftp_url.path)
try:
# Directory
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd(file_name)
self.ftp_walk(ftp, os.path.join(config['ftp_tmp_path'], file_name), config, ftp_url, ftp_url.path)
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd('..')
if config['delete_origin']:
ftp.rmd(file_name)
except ftplib.error_perm:
# File
self.ftp_down(ftp, file_name, config['ftp_tmp_path'], config, ftp_url, current_path)
ftp.close()
def on_task_output(self, task, config):
"""Count this as an output plugin."""
def ftp_walk(self, ftp, tmp_path, config, ftp_url, current_path):
log.debug("DIR->" + ftp.pwd())
log.debug("FTP tmp_path : " + tmp_path)
try:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
dirs = ftp.nlst(ftp.pwd())
except ftplib.error_perm as ex:
log.info("Error %s" % ex)
return ftp
if not dirs:
if config['download_empty_dirs']:
os.mkdir(tmp_path)
else:
log.debug("Empty directory, skipping.")
return ftp
for file_name in (path for path in dirs if path not in ('.', '..')):
file_name = os.path.basename(file_name)
try:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd(file_name)
if not os.path.isdir(tmp_path):
os.mkdir(tmp_path)
log.debug("Directory %s created" % tmp_path)
ftp = self.ftp_walk(ftp,
os.path.join(tmp_path, os.path.basename(file_name)),
config,
ftp_url,
os.path.join(current_path, os.path.basename(file_name)))
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd('..')
if config['delete_origin']:
ftp.rmd(os.path.basename(file_name))
except ftplib.error_perm:
ftp = self.ftp_down(ftp, os.path.basename(file_name), tmp_path, config, ftp_url, current_path)
ftp = self.check_connection(ftp, config, ftp_url, current_path)
return ftp
def ftp_down(self, ftp, file_name, tmp_path, config, ftp_url, current_path):
log.debug("Downloading %s into %s" % (file_name, tmp_path))
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
local_file = open(os.path.join(tmp_path, file_name), 'a+b')
ftp = self.check_connection(ftp, config, ftp_url, current_path)
try:
ftp.sendcmd("TYPE I")
file_size = ftp.size(file_name)
except Exception:
file_size = 1
max_attempts = 5
log.info("Starting download of %s into %s" % (file_name, tmp_path))
while file_size > local_file.tell():
try:
if local_file.tell() != 0:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.retrbinary('RETR %s' % file_name, local_file.write, local_file.tell())
else:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.retrbinary('RETR %s' % file_name, local_file.write)
except Exception as error:
if max_attempts != 0:
log.debug("Retrying download after error %s" % error)
else:
log.error("Too many errors downloading %s. Aborting." % file_name)
break
local_file.close()
if config['delete_origin']:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.delete(file_name)
return ftp
@event('plugin.register')
def register_plugin():
plugin.register(OutputFtp, 'ftp_download', api_ver=2)
| mit |
hrishioa/Aviato | flask/Lib/site-packages/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-2.0 |
rubenvereecken/pokemongo-api | POGOProtos/Inventory/EggIncubator_pb2.py | 9 | 5234 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Inventory/EggIncubator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory.Item import ItemId_pb2 as POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2
from POGOProtos.Inventory import EggIncubatorType_pb2 as POGOProtos_dot_Inventory_dot_EggIncubatorType__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Inventory/EggIncubator.proto',
package='POGOProtos.Inventory',
syntax='proto3',
serialized_pb=_b('\n\'POGOProtos/Inventory/EggIncubator.proto\x12\x14POGOProtos.Inventory\x1a&POGOProtos/Inventory/Item/ItemId.proto\x1a+POGOProtos/Inventory/EggIncubatorType.proto\"\xed\x01\n\x0c\x45ggIncubator\x12\n\n\x02id\x18\x01 \x01(\t\x12\x32\n\x07item_id\x18\x02 \x01(\x0e\x32!.POGOProtos.Inventory.Item.ItemId\x12>\n\x0eincubator_type\x18\x03 \x01(\x0e\x32&.POGOProtos.Inventory.EggIncubatorType\x12\x16\n\x0euses_remaining\x18\x04 \x01(\x05\x12\x12\n\npokemon_id\x18\x05 \x01(\x04\x12\x17\n\x0fstart_km_walked\x18\x06 \x01(\x01\x12\x18\n\x10target_km_walked\x18\x07 \x01(\x01\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2.DESCRIPTOR,POGOProtos_dot_Inventory_dot_EggIncubatorType__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EGGINCUBATOR = _descriptor.Descriptor(
name='EggIncubator',
full_name='POGOProtos.Inventory.EggIncubator',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='POGOProtos.Inventory.EggIncubator.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='item_id', full_name='POGOProtos.Inventory.EggIncubator.item_id', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='incubator_type', full_name='POGOProtos.Inventory.EggIncubator.incubator_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uses_remaining', full_name='POGOProtos.Inventory.EggIncubator.uses_remaining', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='POGOProtos.Inventory.EggIncubator.pokemon_id', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='start_km_walked', full_name='POGOProtos.Inventory.EggIncubator.start_km_walked', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target_km_walked', full_name='POGOProtos.Inventory.EggIncubator.target_km_walked', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=388,
)
_EGGINCUBATOR.fields_by_name['item_id'].enum_type = POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2._ITEMID
_EGGINCUBATOR.fields_by_name['incubator_type'].enum_type = POGOProtos_dot_Inventory_dot_EggIncubatorType__pb2._EGGINCUBATORTYPE
DESCRIPTOR.message_types_by_name['EggIncubator'] = _EGGINCUBATOR
EggIncubator = _reflection.GeneratedProtocolMessageType('EggIncubator', (_message.Message,), dict(
DESCRIPTOR = _EGGINCUBATOR,
__module__ = 'POGOProtos.Inventory.EggIncubator_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Inventory.EggIncubator)
))
_sym_db.RegisterMessage(EggIncubator)
# @@protoc_insertion_point(module_scope)
| mit |
Tehsmash/ironic | ironic/cmd/dbsync.py | 7 | 2897 | # -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Run storage database migration.
"""
import sys
from oslo.config import cfg
from ironic.common import service
from ironic.db import migration
CONF = cfg.CONF
class DBCommand(object):
def upgrade(self):
migration.upgrade(CONF.command.revision)
def downgrade(self):
migration.downgrade(CONF.command.revision)
def revision(self):
migration.revision(CONF.command.message, CONF.command.autogenerate)
def stamp(self):
migration.stamp(CONF.command.revision)
def version(self):
print(migration.version())
def create_schema(self):
migration.create_schema()
def add_command_parsers(subparsers):
command_object = DBCommand()
parser = subparsers.add_parser('upgrade')
parser.set_defaults(func=command_object.upgrade)
parser.add_argument('--revision', nargs='?')
parser = subparsers.add_parser('downgrade')
parser.set_defaults(func=command_object.downgrade)
parser.add_argument('--revision', nargs='?')
parser = subparsers.add_parser('stamp')
parser.add_argument('--revision', nargs='?')
parser.set_defaults(func=command_object.stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.set_defaults(func=command_object.revision)
parser = subparsers.add_parser('version')
parser.set_defaults(func=command_object.version)
parser = subparsers.add_parser('create_schema')
parser.set_defaults(func=command_object.create_schema)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help='Available commands',
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
# this is hack to work with previous usage of ironic-dbsync
# pls change it to ironic-dbsync upgrade
valid_commands = set([
'upgrade', 'downgrade', 'revision',
'version', 'stamp', 'create_schema',
])
if not set(sys.argv) & valid_commands:
sys.argv.append('upgrade')
service.prepare_service(sys.argv)
CONF.command.func()
| apache-2.0 |
yslzsl/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
ubirch/aws-tools | virtual-env/lib/python2.7/site-packages/boto/ec2/autoscale/tag.py | 173 | 3379 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Tag(object):
"""
A name/value tag on an AutoScalingGroup resource.
:ivar key: The key of the tag.
:ivar value: The value of the tag.
:ivar propagate_at_launch: Boolean value which specifies whether the
new tag will be applied to instances launched after the tag is created.
:ivar resource_id: The name of the autoscaling group.
:ivar resource_type: The only supported resource type at this time
is "auto-scaling-group".
"""
def __init__(self, connection=None, key=None, value=None,
propagate_at_launch=False, resource_id=None,
resource_type='auto-scaling-group'):
self.connection = connection
self.key = key
self.value = value
self.propagate_at_launch = propagate_at_launch
self.resource_id = resource_id
self.resource_type = resource_type
def __repr__(self):
return 'Tag(%s=%s)' % (self.key, self.value)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'Value':
self.value = value
elif name == 'PropagateAtLaunch':
if value.lower() == 'true':
self.propagate_at_launch = True
else:
self.propagate_at_launch = False
elif name == 'ResourceId':
self.resource_id = value
elif name == 'ResourceType':
self.resource_type = value
def build_params(self, params, i):
"""
Populates a dictionary with the name/value pairs necessary
to identify this Tag in a request.
"""
prefix = 'Tags.member.%d.' % i
params[prefix + 'ResourceId'] = self.resource_id
params[prefix + 'ResourceType'] = self.resource_type
params[prefix + 'Key'] = self.key
params[prefix + 'Value'] = self.value
if self.propagate_at_launch:
params[prefix + 'PropagateAtLaunch'] = 'true'
else:
params[prefix + 'PropagateAtLaunch'] = 'false'
def delete(self):
return self.connection.delete_tags([self])
| apache-2.0 |
emilio/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/tests/test_update.py | 3 | 29387 | import json
import mock
import os
import pytest
import sys
from io import BytesIO
from .. import metadata, manifestupdate
from ..update import WPTUpdate
from ..update.base import StepRunner, Step
from mozlog import structuredlog, handlers, formatters
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
from manifest import manifest, item as manifest_item
def rel_path_to_test_url(rel_path):
assert not os.path.isabs(rel_path)
return rel_path.replace(os.sep, "/")
def SourceFileWithTest(path, hash, cls, *args):
s = mock.Mock(rel_path=path, hash=hash)
test = cls("/foobar", path, "/", rel_path_to_test_url(path), *args)
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
item_classes = {"testharness": manifest_item.TestharnessTest,
"reftest": manifest_item.RefTest,
"reftest_node": manifest_item.RefTestNode,
"manual": manifest_item.ManualTest,
"stub": manifest_item.Stub,
"wdspec": manifest_item.WebDriverSpecTest,
"conformancechecker": manifest_item.ConformanceCheckerTest,
"visual": manifest_item.VisualTest,
"support": manifest_item.SupportFile}
def update(tests, *logs):
id_test_map, updater = create_updater(tests)
for log in logs:
log = create_log(log)
updater.update_from_log(log)
return list(metadata.update_results(id_test_map,
["debug", "os", "version", "processor", "bits"],
["debug"],
False))
def create_updater(tests, url_base="/", **kwargs):
id_test_map = {}
m = create_test_manifest(tests, url_base)
expected_data = {}
metadata.load_expected = lambda _, __, test_path, *args: expected_data[test_path]
id_test_map = metadata.create_test_tree(None, m)
for test_path, test_ids, test_type, manifest_str in tests:
expected_data[test_path] = manifestupdate.compile(BytesIO(manifest_str),
test_path,
url_base)
return id_test_map, metadata.ExpectedUpdater(id_test_map, **kwargs)
def create_log(entries):
data = BytesIO()
if isinstance(entries, list):
logger = structuredlog.StructuredLogger("expected_test")
handler = handlers.StreamHandler(data, formatters.JSONFormatter())
logger.add_handler(handler)
for item in entries:
action, kwargs = item
getattr(logger, action)(**kwargs)
logger.remove_handler(handler)
else:
json.dump(entries, data)
data.seek(0)
return data
def suite_log(entries, run_info=None):
return ([("suite_start", {"tests": [], "run_info": run_info or {}})] +
entries +
[("suite_end", {})])
def create_test_manifest(tests, url_base="/"):
source_files = []
for i, (test, _, test_type, _) in enumerate(tests):
if test_type:
source_files.append((SourceFileWithTest(test, str(i) * 40, item_classes[test_type]), True))
m = manifest.Manifest()
m.update(source_files)
return m
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_0():
tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": "/path/to/test.htm"}),
("test_status", {"test": "/path/to/test.htm",
"subtest": "test1",
"status": "PASS",
"expected": "FAIL"}),
("test_end", {"test": "/path/to/test.htm",
"status": "OK"})])
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_1():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: ERROR""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "ERROR"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get("expected") == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_skip_0():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_new_subtest():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_status", {"test": test_id,
"subtest": "test2",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get("expected") == "FAIL"
assert new_manifest.get_test(test_id).children[1].get("expected") == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_0():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "linux"}) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_1():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "linux"}) == "TIMEOUT"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "windows"}) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_2():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "osx"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": True, "os": "osx"}) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_3():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: FAIL
if not debug and os == "osx": TIMEOUT""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "osx"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": True, "os": "osx"}) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_ignore_existing():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: TIMEOUT
if not debug and os == "osx": NOTRUN""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "linux"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "windows"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": True, "os": "osx"}) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}) == "NOTRUN"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_0():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == 7
assert new_manifest.get_test(test_id).get("min-asserts") == 2
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_1():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 1,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == 4
assert new_manifest.get_test(test_id).has_key("min-asserts") is False
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_2():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 3,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_3():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "windows"})
log_1 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 7,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == 8
assert new_manifest.get_test(test_id).get("min-asserts") == 2
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_4():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 0,
"max_expected": 0}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "windows"})
log_1 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 7,
"min_expected": 0,
"max_expected": 0}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "8"
assert new_manifest.get_test(test_id).has_key("min-asserts") is False
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_0():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_1():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
lsan-allowed: [foo]""")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]}),
("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_2():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/__dir__", ["path/__dir__"], None, """
lsan-allowed: [foo]"""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"],
"allowed_match": ["foo"]}),
("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_3():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]})],
run_info={"os": "win"})
log_1 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_0():
tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = {"run_info": {},
"results": [
{"test": "/path/to/test.htm",
"subtests": [{"name": "test1",
"status": "PASS",
"expected": "FAIL"}],
"status": "OK"}]}
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_1():
tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness", ""),
("path/to/__dir__", ["path/to/__dir__"], None, "")]
log = {"run_info": {},
"results": [],
"lsan_leaks": [{"scope": "path/to/",
"frames": ["baz", "foobar"]}]}
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].get("lsan-allowed") == ["baz"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_0():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 0,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("leak-threshold") == ['default:51200']
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_1():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 1000,
"objects": []})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_2():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 110,
"objects": []})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_3():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 100""")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 1000,
"threshold": 100,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("leak-threshold") == ['default:51200']
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_4():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
log_0 = suite_log([
("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]}),
("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 110,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.has_key("leak-threshold") is False
class TestStep(Step):
def create(self, state):
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", "")]
state.foo = create_test_manifest(tests)
class UpdateRunner(StepRunner):
steps = [TestStep]
@pytest.mark.xfail(sys.version[0] == "3",
reason="update.state doesn't support py3")
def test_update_pickle():
logger = structuredlog.StructuredLogger("expected_test")
args = {
"test_paths": {
"/": {"tests_path": ""},
},
"abort": False,
"continue": False,
"sync": False,
}
args2 = args.copy()
args2["abort"] = True
wptupdate = WPTUpdate(logger, **args2)
wptupdate = WPTUpdate(logger, runner_cls=UpdateRunner, **args)
wptupdate.run()
| mpl-2.0 |
bigzz/linux-stable | scripts/checkkconfigsymbols.py | 88 | 15783 | #!/usr/bin/env python2
"""Find Kconfig symbols that are referenced but not defined."""
# (c) 2014-2015 Valentin Rothberg <valentinrothberg@gmail.com>
# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
#
# Licensed under the terms of the GNU GPL License version 2
import difflib
import os
import re
import signal
import sys
from multiprocessing import Pool, cpu_count
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)')
REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
def parse_options():
"""The user interface of this module."""
usage = "%prog [options]\n\n" \
"Run this tool to detect Kconfig symbols that are referenced but " \
"not defined in\nKconfig. The output of this tool has the " \
"format \'Undefined symbol\\tFile list\'\n\n" \
"If no option is specified, %prog will default to check your\n" \
"current tree. Please note that specifying commits will " \
"\'git reset --hard\'\nyour current tree! You may save " \
"uncommitted changes to avoid losing data."
parser = OptionParser(usage=usage)
parser.add_option('-c', '--commit', dest='commit', action='store',
default="",
help="Check if the specified commit (hash) introduces "
"undefined Kconfig symbols.")
parser.add_option('-d', '--diff', dest='diff', action='store',
default="",
help="Diff undefined symbols between two commits. The "
"input format bases on Git log's "
"\'commmit1..commit2\'.")
parser.add_option('-f', '--find', dest='find', action='store_true',
default=False,
help="Find and show commits that may cause symbols to be "
"missing. Required to run with --diff.")
parser.add_option('-i', '--ignore', dest='ignore', action='store',
default="",
help="Ignore files matching this pattern. Note that "
"the pattern needs to be a Python regex. To "
"ignore defconfigs, specify -i '.*defconfig'.")
parser.add_option('-s', '--sim', dest='sim', action='store', default="",
help="Print a list of maximum 10 string-similar symbols.")
parser.add_option('', '--force', dest='force', action='store_true',
default=False,
help="Reset current Git tree even when it's dirty.")
(opts, _) = parser.parse_args()
if opts.commit and opts.diff:
sys.exit("Please specify only one option at once.")
if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
sys.exit("Please specify valid input in the following format: "
"\'commmit1..commit2\'")
if opts.commit or opts.diff:
if not opts.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
" Please run this script in a clean Git tree or pass "
"'--force' if you\nwant to ignore this warning and "
"continue.")
if opts.commit:
opts.find = False
if opts.ignore:
try:
re.match(opts.ignore, "this/is/just/a/test.c")
except:
sys.exit("Please specify a valid Python regex.")
return opts
def main():
"""Main function of this module."""
opts = parse_options()
if opts.sim and not opts.commit and not opts.diff:
sims = find_sims(opts.sim, opts.ignore)
if sims:
print "%s: %s" % (yel("Similar symbols"), ', '.join(sims))
else:
print "%s: no similar symbols found" % yel("Similar symbols")
sys.exit(0)
# dictionary of (un)defined symbols
defined = {}
undefined = {}
if opts.commit or opts.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
if opts.commit:
commit_a = opts.commit + "~"
commit_b = opts.commit
elif opts.diff:
split = opts.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
execute("git reset --hard %s" % commit_a)
undefined_a, _ = check_symbols(opts.ignore)
# get undefined items for the commit
execute("git reset --hard %s" % commit_b)
undefined_b, defined = check_symbols(opts.ignore)
# report cases that are present for the commit but not before
for feature in sorted(undefined_b):
# feature has not been undefined before
if not feature in undefined_a:
files = sorted(undefined_b.get(feature))
undefined[feature] = files
# check if there are new files that reference the undefined feature
else:
files = sorted(undefined_b.get(feature) -
undefined_a.get(feature))
if files:
undefined[feature] = files
# reset to head
execute("git reset --hard %s" % head)
# default to check the entire tree
else:
undefined, defined = check_symbols(opts.ignore)
# now print the output
for feature in sorted(undefined):
print red(feature)
files = sorted(undefined.get(feature))
print "%s: %s" % (yel("Referencing files"), ", ".join(files))
sims = find_sims(feature, opts.ignore, defined)
sims_out = yel("Similar symbols")
if sims:
print "%s: %s" % (sims_out, ', '.join(sims))
else:
print "%s: %s" % (sims_out, "no similar symbols found")
if opts.find:
print "%s:" % yel("Commits changing symbol")
commits = find_commits(feature, opts.diff)
if commits:
for commit in commits:
commit = commit.split(" ", 1)
print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1])
else:
print "\t- no commit found"
print # new line
def yel(string):
"""
Color %string yellow.
"""
return "\033[33m%s\033[0m" % string
def red(string):
"""
Color %string red.
"""
return "\033[31m%s\033[0m" % string
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
(stdout, _) = pop.communicate() # wait until finished
if pop.returncode != 0:
sys.exit(stdout)
return stdout
def find_commits(symbol, diff):
"""Find commits changing %symbol in the given range of %diff."""
commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s"
% (symbol, diff))
return [x for x in commits.split("\n") if x]
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
stdout = execute("git status --porcelain")
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
return False
def get_head():
"""Return commit hash of current HEAD."""
stdout = execute("git rev-parse HEAD")
return stdout.strip('\n')
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in xrange(size)]
def init_worker():
"""Set signal handler to ignore SIGINT."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def find_sims(symbol, ignore, defined = []):
"""Return a list of max. ten Kconfig symbols that are string-similar to
@symbol."""
if defined:
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
pool = Pool(cpu_count(), init_worker)
kfiles = []
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kfiles.append(gitfile)
arglist = []
for part in partition(kfiles, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined.extend(res[0])
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
def get_files():
"""Return a list of all files in the current git directory."""
# use 'git ls-files' to get the worklist
stdout = execute("git ls-files")
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
files = []
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile) or \
gitfile.startswith("tools/"):
continue
files.append(gitfile)
return files
def check_symbols(ignore):
"""Find undefined Kconfig symbols and return a dict with the symbol as key
and a list of referencing files as value. Files matching %ignore are not
checked for undefined symbols."""
pool = Pool(cpu_count(), init_worker)
try:
return check_symbols_helper(pool, ignore)
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit(1)
def check_symbols_helper(pool, ignore):
"""Helper method for check_symbols(). Used to catch keyboard interrupts in
check_symbols() in order to properly terminate running worker processes."""
source_files = []
kconfig_files = []
defined_features = []
referenced_features = dict() # {file: [features]}
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
if ignore and not re.match(ignore, gitfile):
continue
# add source files that do not match the ignore pattern
source_files.append(gitfile)
# parse source files
arglist = partition(source_files, cpu_count())
for res in pool.map(parse_source_files, arglist):
referenced_features.update(res)
# parse kconfig files
arglist = []
for part in partition(kconfig_files, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined_features.extend(res[0])
referenced_features.update(res[1])
defined_features = set(defined_features)
# inverse mapping of referenced_features to dict(feature: [files])
inv_map = dict()
for _file, features in referenced_features.iteritems():
for feature in features:
inv_map[feature] = inv_map.get(feature, set())
inv_map[feature].add(_file)
referenced_features = inv_map
undefined = {} # {feature: [files]}
for feature in sorted(referenced_features):
# filter some false positives
if feature == "FOO" or feature == "BAR" or \
feature == "FOO_BAR" or feature == "XXX":
continue
if feature not in defined_features:
if feature.endswith("_MODULE"):
# avoid false positives for kernel modules
if feature[:-len("_MODULE")] in defined_features:
continue
undefined[feature] = referenced_features.get(feature)
return undefined, defined_features
def parse_source_files(source_files):
"""Parse each source file in @source_files and return dictionary with source
files as keys and lists of references Kconfig symbols as values."""
referenced_features = dict()
for sfile in source_files:
referenced_features[sfile] = parse_source_file(sfile)
return referenced_features
def parse_source_file(sfile):
"""Parse @sfile and return a list of referenced Kconfig features."""
lines = []
references = []
if not os.path.exists(sfile):
return references
with open(sfile, "r") as stream:
lines = stream.readlines()
for line in lines:
if not "CONFIG_" in line:
continue
features = REGEX_SOURCE_FEATURE.findall(line)
for feature in features:
if not REGEX_FILTER_FEATURES.search(feature):
continue
references.append(feature)
return references
def get_features_in_line(line):
"""Return mentioned Kconfig features in @line."""
return REGEX_FEATURE.findall(line)
def parse_kconfig_files(args):
"""Parse kconfig files and return tuple of defined and references Kconfig
symbols. Note, @args is a tuple of a list of files and the @ignore
pattern."""
kconfig_files = args[0]
ignore = args[1]
defined_features = []
referenced_features = dict()
for kfile in kconfig_files:
defined, references = parse_kconfig_file(kfile)
defined_features.extend(defined)
if ignore and re.match(ignore, kfile):
# do not collect references for files that match the ignore pattern
continue
referenced_features[kfile] = references
return (defined_features, referenced_features)
def parse_kconfig_file(kfile):
"""Parse @kfile and update feature definitions and references."""
lines = []
defined = []
references = []
skip = False
if not os.path.exists(kfile):
return defined, references
with open(kfile, "r") as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
feature_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(feature_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
features = get_features_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
features.extend(get_features_in_line(line))
for feature in set(features):
if REGEX_NUMERIC.match(feature):
# ignore numeric values
continue
references.append(feature)
return defined, references
if __name__ == "__main__":
main()
| gpl-2.0 |
jiwanlimbu/aura | keystone/auth/plugins/core.py | 1 | 7241 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log
import six
from keystone.common import dependency
import keystone.conf
from keystone import exception
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
def construct_method_map_from_config():
"""Determine authentication method types for deployment.
:returns: a dictionary containing the methods and their indexes
"""
method_map = dict()
method_index = 1
for method in CONF.auth.methods:
method_map[method_index] = method
method_index = method_index * 2
return method_map
def convert_method_list_to_integer(methods):
"""Convert the method type(s) to an integer.
:param methods: a list of method names
:returns: an integer representing the methods
"""
method_map = construct_method_map_from_config()
method_ints = []
for method in methods:
for k, v in method_map.items():
if v == method:
method_ints.append(k)
return sum(method_ints)
def convert_integer_to_method_list(method_int):
"""Convert an integer to a list of methods.
:param method_int: an integer representing methods
:returns: a corresponding list of methods
"""
# If the method_int is 0 then no methods were used so return an empty
# method list
if method_int == 0:
return []
method_map = construct_method_map_from_config()
method_ints = []
for k, v in method_map.items():
method_ints.append(k)
method_ints.sort(reverse=True)
confirmed_methods = []
for m_int in method_ints:
# (lbragstad): By dividing the method_int by each key in the
# method_map, we know if the division results in an integer of 1, that
# key was used in the construction of the total sum of the method_int.
# In that case, we should confirm the key value and store it so we can
# look it up later. Then we should take the remainder of what is
# confirmed and the method_int and continue the process. In the end, we
# should have a list of integers that correspond to indexes in our
# method_map and we can reinflate the methods that the original
# method_int represents.
if (method_int / m_int) == 1:
confirmed_methods.append(m_int)
method_int = method_int - m_int
methods = []
for method in confirmed_methods:
methods.append(method_map[method])
return methods
@dependency.requires('identity_api', 'resource_api')
class BaseUserInfo(object):
@classmethod
def create(cls, auth_payload, method_name):
user_auth_info = cls()
user_auth_info._validate_and_normalize_auth_data(auth_payload)
user_auth_info.METHOD_NAME = method_name
return user_auth_info
def __init__(self):
self.user_id = None
self.user_ref = None
self.METHOD_NAME = None
def _assert_domain_is_enabled(self, domain_ref):
try:
self.resource_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_user_is_enabled(self, user_ref):
try:
self.identity_api.assert_user_enabled(
user_id=user_ref['id'],
user=user_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.resource_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.warning(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _validate_and_normalize_auth_data(self, auth_payload):
if 'user' not in auth_payload:
raise exception.ValidationError(attribute='user',
target=self.METHOD_NAME)
user_info = auth_payload['user']
user_id = user_info.get('id')
user_name = user_info.get('name')
if not user_id and not user_name:
raise exception.ValidationError(attribute='id or name',
target='user')
try:
if user_name:
if 'domain' not in user_info:
raise exception.ValidationError(attribute='domain',
target='user')
domain_ref = self._lookup_domain(user_info['domain'])
user_ref = self.identity_api.get_user_by_name(
user_name, domain_ref['id'])
else:
user_ref = self.identity_api.get_user(user_id)
domain_ref = self.resource_api.get_domain(
user_ref['domain_id'])
self._assert_domain_is_enabled(domain_ref)
except exception.UserNotFound as e:
LOG.warning(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_user_is_enabled(user_ref)
self.user_ref = user_ref
self.user_id = user_ref['id']
self.domain_id = domain_ref['id']
class UserAuthInfo(BaseUserInfo):
def __init__(self):
super(UserAuthInfo, self).__init__()
self.password = None
def _validate_and_normalize_auth_data(self, auth_payload):
super(UserAuthInfo, self)._validate_and_normalize_auth_data(
auth_payload)
user_info = auth_payload['user']
self.password = user_info.get('password')
class TOTPUserInfo(BaseUserInfo):
def __init__(self):
super(TOTPUserInfo, self).__init__()
self.passcode = None
def _validate_and_normalize_auth_data(self, auth_payload):
super(TOTPUserInfo, self)._validate_and_normalize_auth_data(
auth_payload)
user_info = auth_payload['user']
self.passcode = user_info.get('passcode')
| apache-2.0 |
lumig242/Hue-Integration-with-CDAP | apps/useradmin/src/useradmin/migrations/0005_auto__add_field_userprofile_last_activity.py | 17 | 6321 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.last_activity'
if db.backend_name.lower() == 'oracle':
# Oracle requires strict TO_DATE format to be specified
db.execute("ALTER TABLE \"USERADMIN_USERPROFILE\" ADD \"LAST_ACTIVITY\" TIMESTAMP DEFAULT TO_DATE('1969-12-31 00:00:00', 'YYYY-MM-DD HH24:MI:SS') NOT NULL")
else:
db.add_column(u'useradmin_userprofile', 'last_activity',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(1969, 12, 31, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.last_activity'
db.delete_column(u'useradmin_userprofile', 'last_activity')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'useradmin.grouppermission': {
'Meta': {'object_name': 'GroupPermission'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'hue_permission': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['useradmin.HuePermission']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'useradmin.huepermission': {
'Meta': {'object_name': 'HuePermission'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'app': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'through': u"orm['useradmin.GroupPermission']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'useradmin.ldapgroup': {
'Meta': {'object_name': 'LdapGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group'", 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'useradmin.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'HUE'", 'max_length': '64'}),
'first_login': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'home_directory': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1969, 12, 31, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['useradmin'] | apache-2.0 |
wilmerhenao/Tomotherapy-Without-Pulse | SinogramComparisons.py | 1 | 5247 | __author__ = 'wilmer'
# This one corresponds to the AverageOpeningTime.pdf document (first model)
try:
import mkl
have_mkl = True
print("Running with MKL Acceleration")
except ImportError:
have_mkl = False
print("Running with normal backends")
import pickle
import time
import socket
import numpy as np
import matplotlib.pyplot as plt
from pylab import Line2D, gca
from scipy.stats import describe
from gurobipy import *
import math
from itertools import product
import pylab as pl
from matplotlib import collections as mc
import itertools
def plotSinogramIndependent(t, L, nameChunk, outputDirectory):
plt.figure()
ax = gca()
lines = []
for l in range(L):
for aperture in range(len(t[l])):
a, b = t[l][aperture]
lines.append([(a, l), (b, l)])
lc = mc.LineCollection(lines, linewidths = 3, colors = 'blue')
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.autoscale()
plt.title('Sinogram')
plt.xlabel('time in seconds')
plt.ylabel('leaves')
plt.savefig(outputDirectory + 'SinogramIndependent' + nameChunk + '.png')
nameoutputdirectory = 'outputMultiProj/'
#nameChunk1 = 'pickleresults-ProstatefullModel-MinLOT-0.03-minAvgLot-0.17-vxls-8340-ntnsty-700'
#nameChunk1 = 'pickleresults-ProstatefullModel-MinLOT-0.03-minAvgLot-0.17-vxls-8340-ntnsty-700'
#nameChunk2 = 'pickleresults-ProstatepairModel-MinLOT-0.03-minAvgLot-0.17-vxls-16677-ntnsty-700'
#nameChunk1 = 'pickleresults-Prostate-51-pairModel-MinLOT-0.02-minAvgLot-0.17-vxls-16677-ntnsty-700'
#nameChunk2 = 'pickleresults-Prostate-51-fullModel-MinLOT-0.02-minAvgLot-0.17-vxls-16677-ntnsty-700'
nameChunk1 = 'pickleresults-Prostate-51-fullModel-MinLOT-0.02-minAvgLot-0.17-vxls-1385-ntnsty-700'
nameChunk2 = 'pickleresults-Prostate-51-fullModel-MinLOT-0.02-minAvgLot-0.17-vxls-16677-ntnsty-700'
picklefile1 = nameoutputdirectory + nameChunk1 + '.pkl'
picklefile2 = nameoutputdirectory + nameChunk2 + '.pkl'
input = open(picklefile1, 'rb')
sData1 = pickle.load(input)
input = open(picklefile2, 'rb')
sData2 = pickle.load(input)
t1 = sData1['t']
t2 = sData2['t']
L = 64
#plotSinogramIndependent(t1, L, nameChunk1, nameoutputdirectory)
#plotSinogramIndependent(t2, L, nameChunk2, nameoutputdirectory)
#bothOn = [[max(first[0], second[0]), min(first[1], second[1])] for first in t1 for second in t2 if max(first[0], second[0]) <= min(first[1], second[1])]
myeps = 0.001
def range_diff(r1, r2):
s1, e1 = r1
s2, e2 = r2
endpoints = sorted((s1, s2, e1, e2))
result = []
if endpoints[0] == s1 and (endpoints[1] - endpoints[0]) > myeps:
result.append((endpoints[0], endpoints[1]))
if endpoints[3] == e1 and (endpoints[3] - endpoints[2]) > myeps:
result.append((endpoints[2], endpoints[3]))
return result
def multirange_diff(r1_list, r2_list):
for r2 in r2_list:
r1_list = list(itertools.chain(*[range_diff(r1, r2) for r1 in r1_list]))
return r1_list
r1_list = [(1, 1001), (1100, 1201)]
r2_list = [(30, 51), (60, 201), (1150, 1301)]
print(multirange_diff(r1_list, r2_list))
firstOnly = []
secondOnly = []
bothOn = []
for l in range(L):
firstOnly.append(multirange_diff(t1[l], t2[l]))
secondOnly.append(multirange_diff(t2[l], t1[l]))
if len(t1[l]) > 0 and len(t2[l]) > 0:
bothOn.append([[max(first[0], second[0]), min(first[1], second[1])] for first in t1[l] for second in t2[l] if max(first[0], second[0]) <= min(first[1], second[1])])
else:
bothOn.append([])
def plotSinogramIndependentMixed(firstOnly, secondOnly, middleOnly, L, nameChunk1, nameChunk2, outputDirectory):
plt.figure()
ax = gca()
linesFirst = []
linesSecond = []
linesMiddle = []
for l in range(L):
for aperture in range(len(firstOnly[l])):
a, b = firstOnly[l][aperture]
linesFirst.append([(a, l), (b, l)])
for aperture in range(len(secondOnly[l])):
a, b = secondOnly[l][aperture]
linesSecond.append([(a, l), (b, l)])
for aperture in range(len(middleOnly[l])):
a, b = middleOnly[l][aperture]
linesMiddle.append([(a, l), (b, l)])
lc = mc.LineCollection(linesFirst, linewidths = 3, colors = 'red')
rc = mc.LineCollection(linesSecond, linewidths = 3, colors = 'blue')
middlec = mc.LineCollection(linesMiddle, linewidths = 3, colors = 'purple')
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.add_collection(rc)
ax.add_collection(middlec)
ax.autoscale()
#plt.title('Sinogram Comparison of Odd-Even Model vs. Detailed Model')
plt.title('Sinograms of Low Resolution Model (red) vs. Full Resolution Model (blue)')
plt.xlabel('time in seconds')
plt.ylabel('leaves')
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=True) #
ax.set_yticklabels([])
plt.savefig(outputDirectory + 'Sinogram-Comparison-FullModelvspairModel.pdf', format = 'pdf')
plotSinogramIndependentMixed(firstOnly, secondOnly, bothOn, L, nameChunk1, nameChunk2, nameoutputdirectory) | mit |
barbarubra/Don-t-know-What-i-m-doing. | python/src/Lib/ctypes/test/test_struct_fields.py | 68 | 1507 | import unittest
from ctypes import *
class StructFieldsTestCase(unittest.TestCase):
# Structure/Union classes must get 'finalized' sooner or
# later, when one of these things happen:
#
# 1. _fields_ is set.
# 2. An instance is created.
# 3. The type is used as field of another Structure/Union.
# 4. The type is subclassed
#
# When they are finalized, assigning _fields_ is no longer allowed.
def test_1_A(self):
class X(Structure):
pass
self.failUnlessEqual(sizeof(X), 0) # not finalized
X._fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_1_B(self):
class X(Structure):
_fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_2(self):
class X(Structure):
pass
X()
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_3(self):
class X(Structure):
pass
class Y(Structure):
_fields_ = [("x", X)] # finalizes X
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_4(self):
class X(Structure):
pass
class Y(X):
pass
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
Y._fields_ = []
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ray-zhong/github_trend_spider | ENV/Lib/site-packages/pymongo/topology_description.py | 19 | 18509 | # Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Represent a deployment of MongoDB servers."""
from collections import namedtuple
from pymongo import common
from pymongo.errors import ConfigurationError
from pymongo.read_preferences import ReadPreference
from pymongo.server_description import ServerDescription
from pymongo.server_selectors import Selection
from pymongo.server_type import SERVER_TYPE
TOPOLOGY_TYPE = namedtuple('TopologyType', ['Single', 'ReplicaSetNoPrimary',
'ReplicaSetWithPrimary', 'Sharded',
'Unknown'])(*range(5))
class TopologyDescription(object):
def __init__(self,
topology_type,
server_descriptions,
replica_set_name,
max_set_version,
max_election_id,
topology_settings):
"""Representation of a deployment of MongoDB servers.
:Parameters:
- `topology_type`: initial type
- `server_descriptions`: dict of (address, ServerDescription) for
all seeds
- `replica_set_name`: replica set name or None
- `max_set_version`: greatest setVersion seen from a primary, or None
- `max_election_id`: greatest electionId seen from a primary, or None
- `topology_settings`: a TopologySettings
"""
self._topology_type = topology_type
self._replica_set_name = replica_set_name
self._server_descriptions = server_descriptions
self._max_set_version = max_set_version
self._max_election_id = max_election_id
# The heartbeat_frequency is used in staleness estimates.
self._topology_settings = topology_settings
# Is PyMongo compatible with all servers' wire protocols?
self._incompatible_err = None
for s in self._server_descriptions.values():
# s.min/max_wire_version is the server's wire protocol.
# MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports.
server_too_new = (
# Server too new.
s.min_wire_version is not None
and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION)
server_too_old = (
# Server too old.
s.max_wire_version is not None
and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION)
if server_too_new or server_too_old:
self._incompatible_err = (
"Server at %s:%d "
"uses wire protocol versions %d through %d, "
"but PyMongo only supports %d through %d"
% (s.address[0], s.address[1],
s.min_wire_version, s.max_wire_version,
common.MIN_SUPPORTED_WIRE_VERSION,
common.MAX_SUPPORTED_WIRE_VERSION))
break
def check_compatible(self):
"""Raise ConfigurationError if any server is incompatible.
A server is incompatible if its wire protocol version range does not
overlap with PyMongo's.
"""
if self._incompatible_err:
raise ConfigurationError(self._incompatible_err)
def has_server(self, address):
return address in self._server_descriptions
def reset_server(self, address):
"""A copy of this description, with one server marked Unknown."""
return updated_topology_description(self, ServerDescription(address))
def reset(self):
"""A copy of this description, with all servers marked Unknown."""
if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
topology_type = self._topology_type
# The default ServerDescription's type is Unknown.
sds = dict((address, ServerDescription(address))
for address in self._server_descriptions)
return TopologyDescription(
topology_type,
sds,
self._replica_set_name,
self._max_set_version,
self._max_election_id,
self._topology_settings)
def server_descriptions(self):
"""Dict of (address,
:class:`~pymongo.server_description.ServerDescription`)."""
return self._server_descriptions.copy()
@property
def topology_type(self):
"""The type of this topology."""
return self._topology_type
@property
def topology_type_name(self):
"""The topology type as a human readable string.
.. versionadded:: 3.4
"""
return TOPOLOGY_TYPE._fields[self._topology_type]
@property
def replica_set_name(self):
"""The replica set name."""
return self._replica_set_name
@property
def max_set_version(self):
"""Greatest setVersion seen from a primary, or None."""
return self._max_set_version
@property
def max_election_id(self):
"""Greatest electionId seen from a primary, or None."""
return self._max_election_id
@property
def known_servers(self):
"""List of Servers of types besides Unknown."""
return [s for s in self._server_descriptions.values()
if s.is_server_type_known]
@property
def common_wire_version(self):
"""Minimum of all servers' max wire versions, or None."""
servers = self.known_servers
if servers:
return min(s.max_wire_version for s in self.known_servers)
return None
@property
def heartbeat_frequency(self):
return self._topology_settings.heartbeat_frequency
def apply_selector(self, selector, address):
def apply_local_threshold(selection):
if not selection:
return []
settings = self._topology_settings
# Round trip time in seconds.
fastest = min(
s.round_trip_time for s in selection.server_descriptions)
threshold = settings.local_threshold_ms / 1000.0
return [s for s in selection.server_descriptions
if (s.round_trip_time - fastest) <= threshold]
if getattr(selector, 'min_wire_version', 0):
common_wv = self.common_wire_version
if common_wv and common_wv < selector.min_wire_version:
raise ConfigurationError(
"%s requires min wire version %d, but topology's min"
" wire version is %d" % (selector,
selector.min_wire_version,
common_wv))
if self.topology_type == TOPOLOGY_TYPE.Single:
# Ignore the selector.
return self.known_servers
elif address:
description = self.server_descriptions().get(address)
return [description] if description else []
elif self.topology_type == TOPOLOGY_TYPE.Sharded:
# Ignore the read preference, but apply localThresholdMS.
return apply_local_threshold(
Selection.from_topology_description(self))
else:
return apply_local_threshold(
selector(Selection.from_topology_description(self)))
def has_readable_server(self, read_preference=ReadPreference.PRIMARY):
"""Does this topology have any readable servers available matching the
given read preference?
:Parameters:
- `read_preference`: an instance of a read preference from
:mod:`~pymongo.read_preferences`. Defaults to
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`.
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4
"""
common.validate_read_preference("read_preference", read_preference)
return any(self.apply_selector(read_preference, None))
def has_writable_server(self):
"""Does this topology have a writable server available?
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4
"""
return self.has_readable_server(ReadPreference.PRIMARY)
# If topology type is Unknown and we receive an ismaster response, what should
# the new topology type be?
_SERVER_TYPE_TO_TOPOLOGY_TYPE = {
SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded,
SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary,
SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
}
def updated_topology_description(topology_description, server_description):
"""Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
an ismaster call
Called after attempting (successfully or not) to call ismaster on the
server at server_description.address. Does not modify topology_description.
"""
address = server_description.address
# These values will be updated, if necessary, to form the new
# TopologyDescription.
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
max_set_version = topology_description.max_set_version
max_election_id = topology_description.max_election_id
server_type = server_description.server_type
# Don't mutate the original dict of server descriptions; copy it.
sds = topology_description.server_descriptions()
# Replace this server's description with the new one.
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
# Single type never changes.
return TopologyDescription(
TOPOLOGY_TYPE.Single,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type == SERVER_TYPE.Standalone:
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
# Server type is Unknown or RSGhost: did we just lose the primary?
topology_type = _check_has_primary(sds)
# Return updated copy.
return TopologyDescription(topology_type,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
def _update_rs_from_primary(
sds,
replica_set_name,
server_description,
max_set_version,
max_election_id):
"""Update topology description from a primary's ismaster response.
Pass in a dict of ServerDescriptions, current replica set name, the
ServerDescription we are processing, and the TopologyDescription's
max_set_version and max_election_id if any.
Returns (new topology type, new replica_set_name, new max_set_version,
new max_election_id).
"""
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
# We found a primary but it doesn't have the replica_set_name
# provided by the user.
sds.pop(server_description.address)
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
max_election_tuple = max_set_version, max_election_id
if None not in server_description.election_tuple:
if (None not in max_election_tuple and
max_election_tuple > server_description.election_tuple):
# Stale primary, set to type Unknown.
address = server_description.address
sds[address] = ServerDescription(address)
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
max_election_id = server_description.election_id
if (server_description.set_version is not None and
(max_set_version is None or
server_description.set_version > max_set_version)):
max_set_version = server_description.set_version
# We've heard from the primary. Is it the same primary as before?
for server in sds.values():
if (server.server_type is SERVER_TYPE.RSPrimary
and server.address != server_description.address):
# Reset old primary's type to Unknown.
sds[server.address] = ServerDescription(server.address)
# There can be only one prior primary.
break
# Discover new hosts from this primary's response.
for new_address in server_description.all_hosts:
if new_address not in sds:
sds[new_address] = ServerDescription(new_address)
# Remove hosts not in the response.
for addr in set(sds) - server_description.all_hosts:
sds.pop(addr)
# If the host list differs from the seed list, we may not have a primary
# after all.
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
def _update_rs_with_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS with known primary. Process a response from a non-primary.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns new topology type.
"""
assert replica_set_name is not None
if replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
elif (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
# Had this member been the primary?
return _check_has_primary(sds)
def _update_rs_no_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS without known primary. Update from a non-primary's response.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns (new topology type, new replica_set_name).
"""
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
return topology_type, replica_set_name
# This isn't the primary's response, so don't remove any servers
# it doesn't report. Only add new servers.
for address in server_description.all_hosts:
if address not in sds:
sds[address] = ServerDescription(address)
if (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
return topology_type, replica_set_name
def _check_has_primary(sds):
"""Current topology type is ReplicaSetWithPrimary. Is primary still known?
Pass in a dict of ServerDescriptions.
Returns new topology type.
"""
for s in sds.values():
if s.server_type == SERVER_TYPE.RSPrimary:
return TOPOLOGY_TYPE.ReplicaSetWithPrimary
else:
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
| mit |
kantel/processingpy | mpmathtest/mpmath/tests/test_division.py | 15 | 5340 | from mpmath.libmp import *
from mpmath import mpf, mp
from random import randint, choice, seed
all_modes = [round_floor, round_ceiling, round_down, round_up, round_nearest]
fb = from_bstr
fi = from_int
ff = from_float
def test_div_1_3():
a = fi(1)
b = fi(3)
c = fi(-1)
# floor rounds down, ceiling rounds up
assert mpf_div(a, b, 7, round_floor) == fb('0.01010101')
assert mpf_div(a, b, 7, round_ceiling) == fb('0.01010110')
assert mpf_div(a, b, 7, round_down) == fb('0.01010101')
assert mpf_div(a, b, 7, round_up) == fb('0.01010110')
assert mpf_div(a, b, 7, round_nearest) == fb('0.01010101')
# floor rounds up, ceiling rounds down
assert mpf_div(c, b, 7, round_floor) == fb('-0.01010110')
assert mpf_div(c, b, 7, round_ceiling) == fb('-0.01010101')
assert mpf_div(c, b, 7, round_down) == fb('-0.01010101')
assert mpf_div(c, b, 7, round_up) == fb('-0.01010110')
assert mpf_div(c, b, 7, round_nearest) == fb('-0.01010101')
def test_mpf_divi_1_3():
a = 1
b = fi(3)
c = -1
assert mpf_rdiv_int(a, b, 7, round_floor) == fb('0.01010101')
assert mpf_rdiv_int(a, b, 7, round_ceiling) == fb('0.01010110')
assert mpf_rdiv_int(a, b, 7, round_down) == fb('0.01010101')
assert mpf_rdiv_int(a, b, 7, round_up) == fb('0.01010110')
assert mpf_rdiv_int(a, b, 7, round_nearest) == fb('0.01010101')
assert mpf_rdiv_int(c, b, 7, round_floor) == fb('-0.01010110')
assert mpf_rdiv_int(c, b, 7, round_ceiling) == fb('-0.01010101')
assert mpf_rdiv_int(c, b, 7, round_down) == fb('-0.01010101')
assert mpf_rdiv_int(c, b, 7, round_up) == fb('-0.01010110')
assert mpf_rdiv_int(c, b, 7, round_nearest) == fb('-0.01010101')
def test_div_300():
q = fi(1000000)
a = fi(300499999) # a/q is a little less than a half-integer
b = fi(300500000) # b/q exactly a half-integer
c = fi(300500001) # c/q is a little more than a half-integer
# Check nearest integer rounding (prec=9 as 2**8 < 300 < 2**9)
assert mpf_div(a, q, 9, round_down) == fi(300)
assert mpf_div(b, q, 9, round_down) == fi(300)
assert mpf_div(c, q, 9, round_down) == fi(300)
assert mpf_div(a, q, 9, round_up) == fi(301)
assert mpf_div(b, q, 9, round_up) == fi(301)
assert mpf_div(c, q, 9, round_up) == fi(301)
# Nearest even integer is down
assert mpf_div(a, q, 9, round_nearest) == fi(300)
assert mpf_div(b, q, 9, round_nearest) == fi(300)
assert mpf_div(c, q, 9, round_nearest) == fi(301)
# Nearest even integer is up
a = fi(301499999)
b = fi(301500000)
c = fi(301500001)
assert mpf_div(a, q, 9, round_nearest) == fi(301)
assert mpf_div(b, q, 9, round_nearest) == fi(302)
assert mpf_div(c, q, 9, round_nearest) == fi(302)
def test_tight_integer_division():
# Test that integer division at tightest possible precision is exact
N = 100
seed(1)
for i in range(N):
a = choice([1, -1]) * randint(1, 1<<randint(10, 100))
b = choice([1, -1]) * randint(1, 1<<randint(10, 100))
p = a * b
width = bitcount(abs(b)) - trailing(b)
a = fi(a); b = fi(b); p = fi(p)
for mode in all_modes:
assert mpf_div(p, a, width, mode) == b
def test_epsilon_rounding():
# Verify that mpf_div uses infinite precision; this result will
# appear to be exactly 0.101 to a near-sighted algorithm
a = fb('0.101' + ('0'*200) + '1')
b = fb('1.10101')
c = mpf_mul(a, b, 250, round_floor) # exact
assert mpf_div(c, b, bitcount(a[1]), round_floor) == a # exact
assert mpf_div(c, b, 2, round_down) == fb('0.10')
assert mpf_div(c, b, 3, round_down) == fb('0.101')
assert mpf_div(c, b, 2, round_up) == fb('0.11')
assert mpf_div(c, b, 3, round_up) == fb('0.110')
assert mpf_div(c, b, 2, round_floor) == fb('0.10')
assert mpf_div(c, b, 3, round_floor) == fb('0.101')
assert mpf_div(c, b, 2, round_ceiling) == fb('0.11')
assert mpf_div(c, b, 3, round_ceiling) == fb('0.110')
# The same for negative numbers
a = fb('-0.101' + ('0'*200) + '1')
b = fb('1.10101')
c = mpf_mul(a, b, 250, round_floor)
assert mpf_div(c, b, bitcount(a[1]), round_floor) == a
assert mpf_div(c, b, 2, round_down) == fb('-0.10')
assert mpf_div(c, b, 3, round_up) == fb('-0.110')
# Floor goes up, ceiling goes down
assert mpf_div(c, b, 2, round_floor) == fb('-0.11')
assert mpf_div(c, b, 3, round_floor) == fb('-0.110')
assert mpf_div(c, b, 2, round_ceiling) == fb('-0.10')
assert mpf_div(c, b, 3, round_ceiling) == fb('-0.101')
def test_mod():
mp.dps = 15
assert mpf(234) % 1 == 0
assert mpf(-3) % 256 == 253
assert mpf(0.25) % 23490.5 == 0.25
assert mpf(0.25) % -23490.5 == -23490.25
assert mpf(-0.25) % 23490.5 == 23490.25
assert mpf(-0.25) % -23490.5 == -0.25
# Check that these cases are handled efficiently
assert mpf('1e10000000000') % 1 == 0
assert mpf('1.23e-1000000000') % 1 == mpf('1.23e-1000000000')
# test __rmod__
assert 3 % mpf('1.75') == 1.25
def test_div_negative_rnd_bug():
mp.dps = 15
assert (-3) / mpf('0.1531879017645047') == mpf('-19.583791966887116')
assert mpf('-2.6342475750861301') / mpf('0.35126216427941814') == mpf('-7.4993775104985909')
| mit |
imsut/commons | src/python/twitter/common/log/__init__.py | 2 | 2176 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'Brian Wickman'
import logging
from twitter.common.log.initialize import (
init,
teardown_disk_logging,
teardown_stderr_logging)
try:
from twitter.common import app
from twitter.common.log.options import LogOptions
class LoggingSubsystem(app.Module):
def __init__(self):
app.Module.__init__(self, __name__, description="Logging subsystem.")
def setup_function(self):
if not LogOptions._is_disk_logging_required():
init()
else:
init(app.name())
app.register_module(LoggingSubsystem())
except ImportError:
# Do not require twitter.common.app
pass
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
fatal = logging.fatal
log = logging.log
logger = logging.getLogger
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
__all__ = [
# directives
'debug',
'info',
'warning',
'warn', # alias
'error',
'fatal',
'log',
'logger',
# levels
'DEBUG',
'INFO',
'WARNING',
'WARN',
'ERROR',
'FATAL',
# only if you're not using app directly.
'init',
'teardown_stderr_logging',
'teardown_disk_logging',
# ditto
'formatters'
]
| apache-2.0 |
radhika-raghavendran/mbed-os5.1-onsemi | tools/misc/docs_gen.py | 16 | 1751 | """An api for generating documentation from the codebase
"""
from os.path import dirname, join
from os import sep
from re import compile
import subprocess
def generate_documentation(dirs, output_dir):
"""Use doxygen to generate the documentation
Positional arguments:
dirs - the directories that doxygen should scan for documentation
output_dir - location of the documentation after the return of this function
"""
print dirs
with open(join(dirname(__file__), "Doxyfile")) as doxyfile:
proc = subprocess.Popen(["doxygen", "-"], stdin=subprocess.PIPE)
proc.stdin.write(doxyfile.read())
proc.stdin.write("OUTPUT_DIRECTORY={}\n".format(output_dir))
proc.stdin.write("INPUT={}".format(" ".join(dirs)))
proc.stdin.close()
proc.wait()
EXCLUDES = ["targets", "features/FEATURE", "features/mbedtls",
"features/nanostack", "features/storage"]
def is_not_excluded(src):
return all(exclude not in src for exclude in EXCLUDES)
if __name__ == "__main__":
import sys
from os.path import abspath, dirname, join
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), "..", ".."))
sys.path.insert(0, ROOT)
from tools.toolchains.gcc import GCC_ARM
from tools.targets import TARGET_MAP
toolchain = GCC_ARM(TARGET_MAP["Super_Target"])
resources = toolchain.scan_resources(".")
generate_documentation(filter(is_not_excluded,
sum(map(lambda x:x.headers,
resources.features.values()),
resources.headers)),
join(dirname(dirname(__file__)), "mbed-docs"))
| apache-2.0 |
kirberich/osmosis | osmosis/migrations/0006_preserve_relationships.py | 2 | 3556 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
# Keep the relation between existing Shards and Tasks
shards = orm['osmosis.ImportShard'].objects.all()
for shard in shards:
task = shard.task
shard.task_pk = task.pk
shard.task_model_path = task.model_path
shard.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'osmosis.importshard': {
'Meta': {'object_name': 'ImportShard'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error_csv_filename': ('django.db.models.fields.CharField', [], {'max_length': '1023'}),
'error_csv_written': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_row_processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'source_data_json': ('django.db.models.fields.TextField', [], {}),
'start_line_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['osmosis.ImportTask']"}),
'task_model_path': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'task_pk': ('django.db.models.fields.PositiveIntegerField', [], {}),
'total_rows': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'osmosis.importsharderror': {
'Meta': {'object_name': 'ImportShardError'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.TextField', [], {}),
'shard': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['osmosis.ImportShard']"})
},
u'osmosis.importtask': {
'Meta': {'object_name': 'ImportTask'},
'error_csv': ('django.db.models.fields.files.FileField', [], {'max_length': '1023', 'null': 'True'}),
'error_csv_filename': ('django.db.models.fields.CharField', [], {'max_length': '1023'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_path': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'row_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'shard_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'shards_error_csv_written': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shards_processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'source_data': ('django.db.models.fields.files.FileField', [], {'max_length': '1023'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '32'})
}
}
complete_apps = ['osmosis']
symmetrical = True
| mit |
jnordling/cabin | onadata/apps/logger/tests/test_parsing.py | 5 | 6995 | # vim: ai ts=4 sts=4 et sw=4 fileencoding=utf-8
import os
import re
from xml.dom import minidom
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.logger.xform_instance_parser import XFormInstanceParser,\
xpath_from_xml_node
from onadata.apps.logger.xform_instance_parser import get_uuid_from_xml,\
get_meta_from_xml, get_deprecated_uuid_from_xml
from onadata.libs.utils.common_tags import XFORM_ID_STRING
from onadata.apps.logger.models.xform import XForm
XML = u"xml"
DICT = u"dict"
FLAT_DICT = u"flat_dict"
ID = XFORM_ID_STRING
class TestXFormInstanceParser(TestBase):
def _publish_and_submit_new_repeats(self):
self._create_user_and_login()
# publish our form which contains some some repeats
xls_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../fixtures/new_repeats/new_repeats.xls"
)
count = XForm.objects.count()
self._publish_xls_file_and_set_xform(xls_file_path)
self.assertEqual(count + 1, XForm.objects.count())
# submit an instance
xml_submission_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../fixtures/new_repeats/instances/"
"new_repeats_2012-07-05-14-33-53.xml"
)
self._make_submission(xml_submission_file_path)
self.assertEqual(self.response.status_code, 201)
# load xml file to parse and compare
xml_file = open(xml_submission_file_path)
self.xml = xml_file.read()
xml_file.close()
def test_parse_xform_nested_repeats(self):
self._publish_and_submit_new_repeats()
parser = XFormInstanceParser(self.xml, self.xform.data_dictionary())
dict = parser.to_dict()
expected_dict = {
u'new_repeats': {
u'info':
{
u'age': u'80',
u'name': u'Adam'
},
u'kids':
{
u'kids_details':
[
{
u'kids_age': u'50',
u'kids_name': u'Abel'
},
],
u'has_kids': u'1'
},
u'web_browsers': u'chrome ie',
u'gps': u'-1.2627557 36.7926442 0.0 30.0'
}
}
self.assertEqual(dict, expected_dict)
flat_dict = parser.to_flat_dict()
expected_flat_dict = {
u'gps': u'-1.2627557 36.7926442 0.0 30.0',
u'kids/kids_details':
[
{
u'kids/kids_details/kids_name': u'Abel',
u'kids/kids_details/kids_age': u'50'
}
],
u'kids/has_kids': u'1',
u'info/age': u'80',
u'web_browsers': u'chrome ie',
u'info/name': u'Adam'
}
self.assertEqual(flat_dict, expected_flat_dict)
def test_xpath_from_xml_node(self):
xml_str = '<?xml version=\'1.0\' ?><test_item_name_matches_repeat ' \
'id="repeat_child_name_matches_repeat">' \
'<formhub><uuid>c911d71ce1ac48478e5f8bac99addc4e</uuid>' \
'</formhub><gps><gps>-1.2625149 36.7924478 0.0 30.0</gps>' \
'<info>Yo</info></gps><gps>' \
'<gps>-1.2625072 36.7924328 0.0 30.0</gps>' \
'<info>What</info></gps></test_item_name_matches_repeat>'
clean_xml_str = xml_str.strip()
clean_xml_str = re.sub(ur">\s+<", u"><", clean_xml_str)
root_node = minidom.parseString(clean_xml_str).documentElement
# get the first top-level gps element
gps_node = root_node.firstChild.nextSibling
self.assertEqual(gps_node.nodeName, u'gps')
# get the info element within the gps element
info_node = gps_node.getElementsByTagName(u'info')[0]
# create an xpath that should look like gps/info
xpath = xpath_from_xml_node(info_node)
self.assertEqual(xpath, u'gps/info')
def test_get_meta_from_xml(self):
with open(
os.path.join(
os.path.dirname(__file__), "..", "fixtures", "tutorial",
"instances", "tutorial_2012-06-27_11-27-53_w_uuid_edited.xml"),
"r") as xml_file:
xml_str = xml_file.read()
instanceID = get_meta_from_xml(xml_str, "instanceID")
self.assertEqual(instanceID,
"uuid:2d8c59eb-94e9-485d-a679-b28ffe2e9b98")
deprecatedID = get_meta_from_xml(xml_str, "deprecatedID")
self.assertEqual(deprecatedID, "uuid:729f173c688e482486a48661700455ff")
def test_get_meta_from_xml_without_uuid_returns_none(self):
with open(
os.path.join(
os.path.dirname(__file__), "..", "fixtures", "tutorial",
"instances", "tutorial_2012-06-27_11-27-53.xml"),
"r") as xml_file:
xml_str = xml_file.read()
instanceID = get_meta_from_xml(xml_str, "instanceID")
self.assertIsNone(instanceID)
def test_get_uuid_from_xml(self):
with open(
os.path.join(
os.path.dirname(__file__), "..", "fixtures", "tutorial",
"instances", "tutorial_2012-06-27_11-27-53_w_uuid.xml"),
"r") as xml_file:
xml_str = xml_file.read()
instanceID = get_uuid_from_xml(xml_str)
self.assertEqual(instanceID, "729f173c688e482486a48661700455ff")
def test_get_deprecated_uuid_from_xml(self):
with open(
os.path.join(
os.path.dirname(__file__), "..", "fixtures", "tutorial",
"instances", "tutorial_2012-06-27_11-27-53_w_uuid_edited.xml"),
"r") as xml_file:
xml_str = xml_file.read()
deprecatedID = get_deprecated_uuid_from_xml(xml_str)
self.assertEqual(deprecatedID, "729f173c688e482486a48661700455ff")
def test_parse_xform_nested_repeats_multiple_nodes(self):
self._create_user_and_login()
# publish our form which contains some some repeats
xls_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../fixtures/new_repeats/new_repeats.xls"
)
count = XForm.objects.count()
self._publish_xls_file_and_set_xform(xls_file_path)
self.assertEqual(count + 1, XForm.objects.count())
# submit an instance
xml_submission_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../fixtures/new_repeats/instances/"
"multiple_nodes_error.xml"
)
self._make_submission(xml_submission_file_path)
self.assertContains(self.response,
"Multiple nodes with the same name",
status_code=400)
| bsd-2-clause |
chenss/ChatRoom | 14.5 已经能运行(虽然有很多Warning)的Django-nonrel框架/django/core/management/commands/inspectdb.py | 203 | 7614 | import keyword
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield ''
yield 'from %s import models' % self.db_module
yield ''
for table_name in connection.introspection.get_table_list(cursor):
yield 'class %s(models.Model):' % table2model(table_name)
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
column_name = row[0]
att_name = column_name.lower()
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
# If the column name can't be used verbatim as a Python
# attribute, set the "db_column" for this Field.
if ' ' in att_name or '-' in att_name or keyword.iskeyword(att_name) or column_name != att_name:
extra_params['db_column'] = column_name
# Modify the field name to make it Python-compatible.
if ' ' in att_name:
att_name = att_name.replace(' ', '_')
comment_notes.append('Field renamed to remove spaces.')
if '-' in att_name:
att_name = att_name.replace('-', '_')
comment_notes.append('Field renamed to remove dashes.')
if column_name != att_name:
comment_notes.append('Field name made lowercase.')
if i in relations:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
field_type = 'ForeignKey(%s' % rel_to
if att_name.endswith('_id'):
att_name = att_name[:-3]
else:
extra_params['db_column'] = column_name
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
field_type += '('
if keyword.iskeyword(att_name):
att_name += '_field'
comment_notes.append('Field renamed because it was a Python reserved word.')
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(['%s=%r' % (k, v) for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [' class Meta:',
' db_table = %r' % table_name,
'']
| gpl-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/bgp_community.py | 5 | 2429 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BGPCommunity(Model):
"""Contains bgp community information offered in Service Community resources.
:param service_supported_region: The region which the service support.
e.g. For O365, region is Global.
:type service_supported_region: str
:param community_name: The name of the bgp community. e.g. Skype.
:type community_name: str
:param community_value: The value of the bgp community. For more
information:
https://docs.microsoft.com/en-us/azure/expressroute/expressroute-routing.
:type community_value: str
:param community_prefixes: The prefixes that the bgp community contains.
:type community_prefixes: list[str]
:param is_authorized_to_use: Customer is authorized to use bgp community
or not.
:type is_authorized_to_use: bool
:param service_group: The service group of the bgp community contains.
:type service_group: str
"""
_attribute_map = {
'service_supported_region': {'key': 'serviceSupportedRegion', 'type': 'str'},
'community_name': {'key': 'communityName', 'type': 'str'},
'community_value': {'key': 'communityValue', 'type': 'str'},
'community_prefixes': {'key': 'communityPrefixes', 'type': '[str]'},
'is_authorized_to_use': {'key': 'isAuthorizedToUse', 'type': 'bool'},
'service_group': {'key': 'serviceGroup', 'type': 'str'},
}
def __init__(self, service_supported_region=None, community_name=None, community_value=None, community_prefixes=None, is_authorized_to_use=None, service_group=None):
super(BGPCommunity, self).__init__()
self.service_supported_region = service_supported_region
self.community_name = community_name
self.community_value = community_value
self.community_prefixes = community_prefixes
self.is_authorized_to_use = is_authorized_to_use
self.service_group = service_group
| mit |
jffernandez/kivy | kivy/uix/camera.py | 52 | 3461 | '''
Camera
======
The :class:`Camera` widget is used to capture and display video from a camera.
Once the widget is created, the texture inside the widget will be automatically
updated. Our :class:`~kivy.core.camera.CameraBase` implementation is used under
the hood::
cam = Camera()
By default, the first camera found on your system is used. To use a different
camera, set the index property::
cam = Camera(index=1)
You can also select the camera resolution::
cam = Camera(resolution=(320, 240))
.. warning::
The camera texture is not updated as soon as you have created the object.
The camera initialization is asynchronous, so there may be a delay before
the requested texture is created.
'''
__all__ = ('Camera', )
from kivy.uix.image import Image
from kivy.core.camera import Camera as CoreCamera
from kivy.properties import NumericProperty, ListProperty, \
BooleanProperty
class Camera(Image):
'''Camera class. See module documentation for more information.
'''
play = BooleanProperty(True)
'''Boolean indicating whether the camera is playing or not.
You can start/stop the camera by setting this property::
# start the camera playing at creation (default)
cam = Camera(play=True)
# create the camera, and start later
cam = Camera(play=False)
# and later
cam.play = True
:attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to
True.
'''
index = NumericProperty(-1)
'''Index of the used camera, starting from 0.
:attr:`index` is a :class:`~kivy.properties.NumericProperty` and defaults
to -1 to allow auto selection.
'''
resolution = ListProperty([-1, -1])
'''Preferred resolution to use when invoking the camera. If you are using
[-1, -1], the resolution will be the default one::
# create a camera object with the best image available
cam = Camera()
# create a camera object with an image of 320x240 if possible
cam = Camera(resolution=(320, 240))
.. warning::
Depending on the implementation, the camera may not respect this
property.
:attr:`resolution` is a :class:`~kivy.properties.ListProperty` and defaults
to [-1, -1].
'''
def __init__(self, **kwargs):
self._camera = None
super(Camera, self).__init__(**kwargs)
if self.index == -1:
self.index = 0
on_index = self._on_index
fbind = self.fbind
fbind('index', on_index)
fbind('resolution', on_index)
on_index()
def on_tex(self, *l):
self.canvas.ask_update()
def _on_index(self, *largs):
self._camera = None
if self.index < 0:
return
if self.resolution[0] < 0 or self.resolution[1] < 0:
return
self._camera = CoreCamera(index=self.index,
resolution=self.resolution, stopped=True)
self._camera.bind(on_load=self._camera_loaded)
if self.play:
self._camera.start()
self._camera.bind(on_texture=self.on_tex)
def _camera_loaded(self, *largs):
self.texture = self._camera.texture
self.texture_size = list(self.texture.size)
def on_play(self, instance, value):
if not self._camera:
return
if value:
self._camera.start()
else:
self._camera.stop()
| mit |
AOSPU/external_chromium_org | tools/site_compare/utils/browser_iterate.py | 189 | 5836 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to use a browser to visit multiple URLs.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "measurepageloadtimeextension@google.com" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
import scrapers
import socket
import time
from drivers import windowing
# Constants
MAX_URL = 1024
PORT = 42492
def SetupIterationCommandLine(cmd):
"""Adds the necessary flags for iteration to a command.
Args:
cmd: an object created by cmdline.AddCommand
"""
cmd.AddArgument(
["-b", "--browser"], "Browser to use (ie, firefox, chrome)",
type="string", required=True)
cmd.AddArgument(
["-b1v", "--browserver"], "Version of browser", metaname="VERSION")
cmd.AddArgument(
["-p", "--browserpath"], "Path to browser.",
type="string", required=False)
cmd.AddArgument(
["-u", "--url"], "URL to visit")
cmd.AddArgument(
["-l", "--list"], "File containing list of URLs to visit", type="readfile")
cmd.AddMutualExclusion(["--url", "--list"])
cmd.AddArgument(
["-s", "--startline"], "First line of URL list", type="int")
cmd.AddArgument(
["-e", "--endline"], "Last line of URL list (exclusive)", type="int")
cmd.AddArgument(
["-c", "--count"], "Number of lines of URL file to use", type="int")
cmd.AddDependency("--startline", "--list")
cmd.AddRequiredGroup(["--url", "--list"])
cmd.AddDependency("--endline", "--list")
cmd.AddDependency("--count", "--list")
cmd.AddMutualExclusion(["--count", "--endline"])
cmd.AddDependency("--count", "--startline")
cmd.AddArgument(
["-t", "--timeout"], "Amount of time (seconds) to wait for browser to "
"finish loading",
type="int", default=300)
cmd.AddArgument(
["-sz", "--size"], "Browser window size", default=(800, 600), type="coords")
def Iterate(command, iteration_func):
"""Iterates over a list of URLs, calling a function on each.
Args:
command: the command line containing the iteration flags
iteration_func: called for each URL with (proc, wnd, url, result)
"""
# Retrieve the browser scraper to use to invoke the browser
scraper = scrapers.GetScraper((command["--browser"], command["--browserver"]))
def AttachToBrowser(path, timeout):
"""Invoke the browser process and connect to the socket."""
(proc, frame, wnd) = scraper.GetBrowser(path)
if not wnd: raise ValueError("Could not invoke browser.")
# Try to connect the socket. If it fails, wait and try
# again. Do this for ten seconds
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
for attempt in xrange(10):
try:
s.connect(("localhost", PORT))
except socket.error:
time.sleep(1)
continue
break
try:
s.getpeername()
except socket.error:
raise ValueError("Could not connect to browser")
if command["--size"]:
# Resize and reposition the frame
windowing.MoveAndSizeWindow(frame, (0, 0), command["--size"], wnd)
s.settimeout(timeout)
Iterate.proc = proc
Iterate.wnd = wnd
Iterate.s = s
def DetachFromBrowser():
"""Close the socket and kill the process if necessary."""
if Iterate.s:
Iterate.s.close()
Iterate.s = None
if Iterate.proc:
if not windowing.WaitForProcessExit(Iterate.proc, 0):
try:
windowing.EndProcess(Iterate.proc)
windowing.WaitForProcessExit(Iterate.proc, 0)
except pywintypes.error:
# Exception here most likely means the process died on its own
pass
Iterate.proc = None
if command["--browserpath"]:
browser = command["--browserpath"]
else:
browser = None
# Read the URLs from the file
if command["--url"]:
url_list = [command["--url"]]
else:
startline = command["--startline"]
if command["--count"]:
endline = startline+command["--count"]
else:
endline = command["--endline"]
url_list = []
file = open(command["--list"], "r")
for line in xrange(startline-1):
file.readline()
for line in xrange(endline-startline):
url_list.append(file.readline().strip())
timeout = command["--timeout"]
# Loop through the URLs and send them through the socket
Iterate.s = None
Iterate.proc = None
Iterate.wnd = None
for url in url_list:
# Invoke the browser if necessary
if not Iterate.proc:
AttachToBrowser(browser, timeout)
# Send the URL and wait for a response
Iterate.s.send(url + "\n")
response = ""
while (response.find("\n") < 0):
try:
recv = Iterate.s.recv(MAX_URL)
response = response + recv
# Workaround for an oddity: when Firefox closes
# gracefully, somehow Python doesn't detect it.
# (Telnet does)
if not recv:
raise socket.error
except socket.timeout:
response = url + ",hang\n"
DetachFromBrowser()
except socket.error:
# If there was a socket error, it's probably a crash
response = url + ",crash\n"
DetachFromBrowser()
# If we received a timeout response, restart the browser
if response[-9:] == ",timeout\n":
DetachFromBrowser()
# Invoke the iteration function
iteration_func(url, Iterate.proc, Iterate.wnd, response)
# We're done
DetachFromBrowser()
| bsd-3-clause |
redsolution/django-trusted-html | example/tests.py | 1 | 2963 | import unittest
from django.contrib.auth.models import User
from django.test import Client
from django.test.testcases import TestCase
from example.models import MyModel, ExternalModel
class ViewsTest(unittest.TestCase):
def test_views(self):
client = Client()
self.assertEqual(client.get('/response').status_code, 200)
self.assertEqual(client.get('/notfound').status_code, 404)
self.assertEqual(client.get('/error').status_code, 500)
self.assertEqual(client.get('/redirect_response').status_code, 302)
self.assertEqual(client.get('/redirect_notfound').status_code, 302)
self.assertEqual(client.get('/redirect_redirect_response').status_code, 302)
self.assertEqual(client.get('/redirect_cicle').status_code, 302)
self.assertEqual(client.get('/permanent_redirect_response').status_code, 301)
self.assertEqual(client.get('/http404').status_code, 404)
self.assertRaises(Exception, client.get, '/http500')
self.assertEqual(client.get('/request_true_response').content, 'True')
self.assertEqual(client.get('/request_false_response').content, 'False')
self.assertEqual(client.get('/doesnotexists').status_code, 404)
self.assertEqual(client.get('/').status_code, 404)
class AdminTest(TestCase):
XSS_TEXT = r'''<IMG SRC="javascript:alert('XSS');">%s test'''
TRUSTED_TEXT = '<p>%s test</p>'
def test_direct_changes(self):
MyModel.objects.create(html=self.XSS_TEXT)
self.assertEqual(MyModel.objects.get().html, self.XSS_TEXT)
def login(self):
User.objects.create_superuser(
username='admin', email='admin@example.com', password='admin')
self.assertTrue(self.client.login(username='admin', password='admin'))
def test_trusted_text_field(self):
self.login()
response = self.client.post(
'/admin/example/mymodel/add/',
data={
'html': self.XSS_TEXT % 'html',
'short': self.XSS_TEXT % 'short',
})
self.assertEqual(response.status_code, 302)
self.assertEqual(MyModel.objects.get().html, self.TRUSTED_TEXT % 'html')
self.assertEqual(MyModel.objects.get().short, self.TRUSTED_TEXT % 'short')
def test_trusted_models(self):
self.login()
response = self.client.post(
'/admin/example/externalmodel/add/',
data={
'name': self.XSS_TEXT % 'name',
'description': self.XSS_TEXT % 'description',
'not_trusted': self.XSS_TEXT % 'not_trusted',
})
self.assertEqual(response.status_code, 302)
self.assertEqual(ExternalModel.objects.get().name, self.TRUSTED_TEXT % 'name')
self.assertEqual(ExternalModel.objects.get().description, self.TRUSTED_TEXT % 'description')
self.assertEqual(ExternalModel.objects.get().not_trusted, self.XSS_TEXT % 'not_trusted')
| gpl-3.0 |
hmen89/odoo | addons/website_project/controllers/main.py | 373 | 1513 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web import http
from openerp.addons.web.http import request
class website_project(http.Controller):
@http.route(['/project/<model("project.project"):project>'], type='http', auth="public", website=True)
def project(self, project=None, **post):
cr, uid, context = request.cr, request.uid, request.context
render_values = {
'project': project,
'main_object': project,
}
return request.website.render("website_project.index", render_values)
| agpl-3.0 |
medspx/QGIS | python/plugins/processing/core/parameters.py | 1 | 10020 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Parameters.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
from qgis.core import (QgsRasterLayer,
QgsVectorLayer,
QgsMapLayer,
QgsCoordinateReferenceSystem,
QgsExpression,
QgsProject,
QgsRectangle,
QgsVectorFileWriter,
QgsProcessing,
QgsProcessingUtils,
QgsProcessingParameters,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterRange,
QgsProcessingParameterPoint,
QgsProcessingParameterEnum,
QgsProcessingParameterExtent,
QgsProcessingParameterMatrix,
QgsProcessingParameterFile,
QgsProcessingParameterField,
QgsProcessingParameterVectorDestination,
QgsProcessingParameterFileDestination,
QgsProcessingParameterFolderDestination,
QgsProcessingParameterRasterDestination,
QgsProcessingParameterString,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterNumber)
def getParameterFromString(s):
# Try the parameter definitions used in description files
if '|' in s and (s.startswith("QgsProcessingParameter") or s.startswith("*QgsProcessingParameter") or s.startswith('Parameter') or s.startswith('*Parameter')):
isAdvanced = False
if s.startswith("*"):
s = s[1:]
isAdvanced = True
tokens = s.split("|")
params = [t if str(t) != str(None) else None for t in tokens[1:]]
if True:
clazz = getattr(sys.modules[__name__], tokens[0])
# convert to correct type
if clazz == QgsProcessingParameterRasterLayer:
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
elif clazz == QgsProcessingParameterVectorLayer:
if len(params) > 2:
params[2] = [int(p) for p in params[2].split(';')]
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
elif clazz == QgsProcessingParameterBoolean:
if len(params) > 2:
params[2] = True if params[2].lower() == 'true' else False
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
elif clazz == QgsProcessingParameterPoint:
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
elif clazz == QgsProcessingParameterCrs:
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
elif clazz == QgsProcessingParameterRange:
if len(params) > 2:
params[2] = QgsProcessingParameterNumber.Integer if params[2].lower().endswith('integer') else QgsProcessingParameterNumber.Double
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
elif clazz == QgsProcessingParameterExtent:
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
elif clazz == QgsProcessingParameterEnum:
if len(params) > 2:
params[2] = params[2].split(';')
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
if len(params) > 4:
# For multiple values; default value is a list of int
if params[3] == True:
params[4] = [int(v) for v in params[4].split(',')]
else:
params[4] = int(params[4])
if len(params) > 5:
params[5] = True if params[5].lower() == 'true' else False
elif clazz == QgsProcessingParameterFeatureSource:
if len(params) > 2:
params[2] = [int(p) for p in params[2].split(';')]
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
elif clazz == QgsProcessingParameterMultipleLayers:
if len(params) > 2:
params[2] = int(params[2])
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
elif clazz == QgsProcessingParameterMatrix:
if len(params) > 2:
params[2] = int(params[2])
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
if len(params) > 4:
params[4] = params[4].split(';')
elif clazz == QgsProcessingParameterField:
if len(params) > 4:
params[4] = int(params[4])
if len(params) > 5:
params[5] = True if params[5].lower() == 'true' else False
if len(params) > 6:
params[6] = True if params[6].lower() == 'true' else False
elif clazz == QgsProcessingParameterFile:
if len(params) > 2:
params[2] = QgsProcessingParameterFile.File if params[2].lower() == 'false' else QgsProcessingParameterFile.Folder
if len(params) > 5:
params[5] = True if params[5].lower() == 'true' else False
elif clazz == QgsProcessingParameterNumber:
if len(params) > 2:
params[2] = QgsProcessingParameterNumber.Integer if params[2].lower().endswith('integer') else QgsProcessingParameterNumber.Double
if len(params) > 3:
params[3] = float(params[3].strip()) if params[3] is not None else None
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
if len(params) > 5:
params[5] = float(params[5].strip()) if params[5] is not None else -sys.float_info.max + 1
if len(params) > 6:
params[6] = float(params[6].strip()) if params[6] is not None else sys.float_info.max - 1
elif clazz == QgsProcessingParameterString:
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
elif clazz == QgsProcessingParameterFileDestination:
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
elif clazz == QgsProcessingParameterFolderDestination:
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
elif clazz == QgsProcessingParameterRasterDestination:
if len(params) > 3:
params[3] = True if params[3].lower() == 'true' else False
elif clazz == QgsProcessingParameterVectorDestination:
if len(params) > 2:
if params[2].lower().endswith('point'):
params[2] = QgsProcessing.TypeVectorPoint
elif params[2].lower().endswith('line'):
params[2] = QgsProcessing.TypeVectorLine
elif params[2].lower().endswith('polygon'):
params[2] = QgsProcessing.TypeVectorPolygon
elif params[2].lower().endswith('geometry'):
params[2] = QgsProcessing.TypeVectorAnyGeometry
elif params[2].lower().endswith('vector'):
params[2] = QgsProcessing.TypeVector
if len(params) > 4:
params[4] = True if params[4].lower() == 'true' else False
param = clazz(*params)
if isAdvanced:
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
return param
else:
return None
else: # try script syntax
# try native method
param = QgsProcessingParameters.parameterFromScriptCode(s)
if param:
return param
| gpl-2.0 |
taohungyang/cloud-custodian | tools/c7n_index/setup.py | 6 | 1163 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name="c7n_indexer",
version='0.0.2',
description="Cloud Custodian - Metrics/Resource Indexer",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/capitalone/cloud-custodian",
license="Apache-2.0",
packages=find_packages(),
entry_points={
'console_scripts': [
'c7n-indexer = c7n_index.metrics:cli']},
install_requires=["c7n", "click", "influxdb", "elasticsearch"],
)
| apache-2.0 |
StuartLittlefair/astropy | examples/coordinates/rv-to-gsr.py | 8 | 4334 | # -*- coding: utf-8 -*-
"""
================================================================
Convert a radial velocity to the Galactic Standard of Rest (GSR)
================================================================
Radial or line-of-sight velocities of sources are often reported in a
Heliocentric or Solar-system barycentric reference frame. A common
transformation incorporates the projection of the Sun's motion along the
line-of-sight to the target, hence transforming it to a Galactic rest frame
instead (sometimes referred to as the Galactic Standard of Rest, GSR). This
transformation depends on the assumptions about the orientation of the Galactic
frame relative to the bary- or Heliocentric frame. It also depends on the
assumed solar velocity vector. Here we'll demonstrate how to perform this
transformation using a sky position and barycentric radial-velocity.
*By: Adrian Price-Whelan*
*License: BSD*
"""
################################################################################
# Make print work the same in all versions of Python and import the required
# Astropy packages:
import astropy.units as u
import astropy.coordinates as coord
################################################################################
# Use the latest convention for the Galactocentric coordinates
coord.galactocentric_frame_defaults.set('latest')
################################################################################
# For this example, let's work with the coordinates and barycentric radial
# velocity of the star HD 155967, as obtained from
# `Simbad <http://simbad.harvard.edu/simbad/>`_:
icrs = coord.SkyCoord(ra=258.58356362*u.deg, dec=14.55255619*u.deg,
radial_velocity=-16.1*u.km/u.s, frame='icrs')
################################################################################
# We next need to decide on the velocity of the Sun in the assumed GSR frame.
# We'll use the same velocity vector as used in the
# `~astropy.coordinates.Galactocentric` frame, and convert it to a
# `~astropy.coordinates.CartesianRepresentation` object using the
# ``.to_cartesian()`` method of the
# `~astropy.coordinates.CartesianDifferential` object ``galcen_v_sun``:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
################################################################################
# We now need to get a unit vector in the assumed Galactic frame from the sky
# position in the ICRS frame above. We'll use this unit vector to project the
# solar velocity onto the line-of-sight:
gal = icrs.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
################################################################################
# Now we project the solar velocity using this unit vector:
v_proj = v_sun.dot(unit_vector)
################################################################################
# Finally, we add the projection of the solar velocity to the radial velocity
# to get a GSR radial velocity:
rv_gsr = icrs.radial_velocity + v_proj
print(rv_gsr)
################################################################################
# We could wrap this in a function so we can control the solar velocity and
# re-use the above code:
def rv_to_gsr(c, v_sun=None):
"""Transform a barycentric radial velocity to the Galactic Standard of Rest
(GSR).
The input radial velocity must be passed in as a
Parameters
----------
c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The radial velocity, associated with a sky coordinates, to be
transformed.
v_sun : `~astropy.units.Quantity`, optional
The 3D velocity of the solar system barycenter in the GSR frame.
Defaults to the same solar motion as in the
`~astropy.coordinates.Galactocentric` frame.
Returns
-------
v_gsr : `~astropy.units.Quantity`
The input radial velocity transformed to a GSR frame.
"""
if v_sun is None:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
gal = c.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
v_proj = v_sun.dot(unit_vector)
return c.radial_velocity + v_proj
rv_gsr = rv_to_gsr(icrs)
print(rv_gsr)
| bsd-3-clause |
mrklees/CYPY | cyautomation/excel-updater/win32_wrapper.py | 2 | 10550 | # -*- coding: utf-8 -*-
"""Excep Updater Wrapper Functions
This modules is intended to build a layer of abstraction on top of the win32com
api. The goal is for the user to be able to import this module into a script where they
acutally implament the updating of specific workbooks.
"""
# Python Core packages
import os
from time import sleep, time
#from datetime import datetime
import sys
import logging
# win32 API
import win32com.client as win32
import win32process
import win32gui
import win32api
import win32con
def open_excel():
# Create a connector to Excel
try:
excel = win32.gencache.EnsureDispatch("Excel.Application")
logging.debug(":".join([str(time()), "Excel Instance Closed"]))
except:
logging.critical(":".join([str(time()), "Failed to launch Excel, quit"]))
return None
#excel = win32.Dispatch("Excel.Application")
#excel = win32.dynamic.Dispatch("Excel.Application")
excel.Visible = True
excel.DisplayAlerts = False
return excel
def open_workbook(fp, excel):
"""Wraps some of the boilerplate that we have to open and work with Excel.
Args:
fp (str): Filepath of workbook to open
Returns:
win32 Excel Application driver: active driver to the open instance of Excel
Excel Workbook Socket: The open socket to the workbook we opened
"""
# Leverages the connector to open a particular workbook
try:
wb = excel.Workbooks.Open(fp)
logging.debug(":".join([str(time()), "Opened " + fp]))
except:
logging.critical(":".join([str(time()), "Failed to open workbook"]))
sys.exit(1)
return wb
def refresh_save_quit(wb, protection, timer):
"""Refreshes, saves, and closes an open Excel workbook
Args:
wb (Excel Workbook Socket): The open socket to workbook to refresh
protection (Dict): parameters for protection { protect : [sheetnames],
veryhide : [sheetnames]
}
"""
remove_sheet_protection(wb, protection)
very_unhidden(wb, protection)
wb.RefreshAll()
sleep(timer)
apply_sheet_protection(wb, protection)
very_hidden(wb, protection)
wb.Save()
logging.debug("{}:refreshed and saved workbook successfully".format(time()))
wb.Close(False)
def search_tree(parent_dir):
"""Searches the desginated parent dir and all subfolder, extracting the
filepaths and names of all files
Args:
parent_dir (str): The filepath of the parent directory
Return:
list: The list of 3-tuples with the dirpath, dirnames, and filesnames of
each child directory.
"""
children = [(dirpath, dirnames, filenames) for dirpath, dirnames, filenames in os.walk(parent_dir)]
return children
def get_all_filepaths(file_tree):
"""Construct the list of all files in terms of their filepath
Args:
file_tree (list): the list of tuples from search_tree
Returns:
list: the list of strings of all files as absolute file paths
"""
return ['\\'.join([path, file]) for (path, child_dirs, files) in file_tree for file in files if file.endswith('.xlsm')]
def should_be_updated(file):
"""Validate that the file should be updated.
"""
if file.startswith('~'):
return False
elif not ((file.endswith('xlsm')) | (file.endswith('xlsx'))):
return False
else:
return True
def apply_sheet_protection(wb, protection):
"""Apply sheet protection to sheets specified by protection dict
Args:
wb (Workbook socket): the workbook
protection (Dict): parameters for protection { protect : [sheetnames],
veryhide : [sheetnames] }
"""
sheetnames = protection.get('protect', [])
for sheet in sheetnames:
ws = wb.Sheets(sheet)
ws.Protect(Password="Lighthouse", DrawingObjects=True, Contents=True, Scenarios=True)
def remove_sheet_protection(wb, protection):
"""Remove sheet protection to based on protection parameter dict
Args:
wb (Workbook socket): the workbook
protection (Dict): parameters for protection { protect : [sheetnames],
veryhide : [sheetnames] }
"""
sheetnames = protection.get('protect', [])
for sheet in sheetnames:
ws = wb.Sheets(sheet)
ws.Unprotect(Password="Lighthouse")
def very_hidden(wb, protection):
"""Apply sheet protection to specified sheet(s)
Args:
wb (Workbook socket): the workbook
protection (Dict): parameters for protection { protect : [sheetnames],
veryhide : [sheetnames] }
"""
sheetnames = protection.get('veryhide', [])
for sheet in sheetnames:
ws = wb.Sheets(sheet)
ws.Visible = 2
def very_unhidden(wb, protection):
"""Apply sheet protection to specified sheet(s)
Args:
wb (Workbook socket): the workbook
protection (Dict): parameters for protection { protect : [sheetnames],
veryhide : [sheetnames] }
"""
sheetnames = protection.get('veryhide', [])
for sheet in sheetnames:
ws = wb.Sheets(sheet)
ws.Visible = -1
def update_single_workbook(file, path, excel, protection={}, timer=8):
"""Routine for updating a simple workbook
Args:
file (str): The name of the file to update
path (str): The path to the folder containing the file
excel (Excel Connector): The open win32 Excel Application driver
protection (Dict): parameters for protection { protect : [sheetnames],
veryhide : [sheetnames]
}
timer (int): the amount of time workbooks will need to update all queries
"""
if is_file_checked_out(file):
return None
wb = open_workbook('\\'.join([path, file]), excel)
sleep(3)
if is_pq_available(excel):
refresh_save_quit(wb, protection, timer=timer)
else:
logging.critical(":".join([str(time()), "PowerQuery has been closed. Please force close and open Excel."]))
return None
sleep(3)
return None
def update_all(parent_dir, protection={}, timer=8, excel=''):
"""Script for updating the folder
Args:
parent_dir (str): Filepath of the top level directory to update. Will recurse through children.
protection (Dict): parameters for protection { protect : [sheetnames],
veryhide : [sheetnames] }
timer (Int): The amount of time to pause after trigger the query update. Increase for larger workbooks.
excel (Excel Connector):
"""
all_files = search_tree(parent_dir)
if excel == '':
excel = open_excel()
for path, child_dirs, files in all_files:
os.chdir(path)
#print(path, child_dirs, files)
for file in files:
print(file)
update_single_workbook(file, path, excel, protection, timer=timer)
if excel == '':
close_excel_by_force(excel)
def is_pq_available(excel):
"""Checks if PowerQuery is connected to Excel
Args:
excel (Excel Connector): Open Excel Connector
Return:
bool: True if PowerQuery is connected, False otherwise
"""
return excel.COMAddIns("Microsoft.Mashup.Client.Excel").Connect
def close_excel_by_force(excel):
"""Script for force closing excel
Excel frequently fails to close automatically, even when using the appropriate
excel.quit() method. So this forces the quit to happen.
Args:
excel (Excel Connector): Open Excel Connector
"""
# Get the window's process id's
hwnd = excel.Hwnd
t, p = win32process.GetWindowThreadProcessId(hwnd)
# Ask window nicely to close
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
# Allow some time for app to close
sleep(3)
# If the application didn't close, force close
try:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, p)
if handle:
win32api.TerminateProcess(handle, 0)
win32api.CloseHandle(handle)
except:
pass
def configure_log(log_folder):
"""Configures the log for update cycle
Currently uses a log folder and creates a file called update_{timestamp}.log
"""
fn = str(time()).split(".")[0]
logging.basicConfig(filename="".join([log_folder, '/log/update_', fn, '.log']), level=logging.DEBUG)
def is_file_checked_out(filename):
"""Checks if file is checked out on SharePoint
One of the biggest challenges in this script is catching the "File already open"
error. When this happen the file opens, but then a small window pops up informing
you of the problem. This kills the updater, because the small window is a hard
error to actually interact with. So, by apply this small rename check on each file
we can confirm if it's already open or not and thus avoid the error.
Args:
filename (str): the name of the file to check
Returns:
bool: True if the rename operation fails, implying that it's checked out
"""
just_name = filename.split('.')[0]
try:
os.rename(filename, just_name+filename)
except OSError:
logging.critical("{}:".format(time()) + filename + ' is still open. Has not been refreshed.')
return True
sleep(3)
try:
os.rename(just_name+filename, filename)
return False
except OSError:
logging.critical("{}:".format(time()) + filename + ' got locked out. Will wait and try again.')
sleep(3)
try:
os.rename(just_name+filename, filename)
logging.debug("{}:".format(time()) + filename + ' unlocked. Successfully reverted name.')
return False
except:
logging.critical("{}:".format(time()) + filename + ' is still locked out. Will need a manual rename')
return True
#if __name__ == '__main__':
# fp = 'P:\\Update Zone\\Refresh Zone\\FL Workbooks'
# fp = 'C:\\Users\\aperusse\\Desktop\\FiveFiles'
# configure_log()
# update_all(fp, {'protect':['Dashboard']}) | gpl-3.0 |
jhjguxin/blogserver | lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/predicates.py | 623 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| mit |
facebook/sparts | sparts/thrift/compiler.py | 3 | 8167 | # Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
"""Tools for dynamically compiling and loading thrift code."""
import distutils.spawn
import imp
import os.path
import tempfile
from six import iterkeys, iteritems
from sparts import ctx
from sparts.compat import OrderedDict, check_output
from sparts.fileutils import NamedTemporaryDirectory
def compile(path, root='.', debug=False, **kwargs):
"""Return a compiled thrift file module from `path`
Additional kwargs may be passed to indicate options to the thrift compiler:
- new_style [default:True]: Use new-style classes
- twisted [default:False]: Generated twisted-friendly bindings
- tornado [default:False]: Generate tornado-friendly bindings
- utf8strings [default:False]: Use unicode strings instead of native
- slots [default:True]: Use __slots__ in generated structs
"""
comp = CompileContext(root=root, debug=debug)
return comp.importThrift(path, **kwargs)
def get_executable():
"""Returns the thrift compiler path if found in the PATH, else None"""
path = distutils.spawn.find_executable('thrift1')
if path is None:
path = distutils.spawn.find_executable('thrift')
return path
def require_executable():
"""Assert that the thrift compiler is in the PATH. Returns the path"""
path = get_executable()
assert path is not None, 'Unable to find thrift compiler in PATH'
return path
class CompileContext(object):
def __init__(self, root='.', debug=False):
self.root = root
self.thrift_bin = require_executable()
self.include_dirs = OrderedDict()
self.dep_files = {}
self.dep_contents = {}
self.debug = debug
self.addIncludeDir(self.root)
def makeTemporaryIncludeDir(self):
d = NamedTemporaryDirectory(prefix='tsrc_')
if self.debug:
d.keep()
for k, v in iteritems(self.dep_contents):
d.writefile(k, v)
for k, v in iteritems(self.dep_files):
d.symlink(k, v)
return d
def makeIncludeArgs(self, temp_include_dir=None):
result = []
for k in iterkeys(self.include_dirs):
result += ['-I', k]
if temp_include_dir is not None:
result += ['-I', temp_include_dir.name]
return result
def getThriftOptions(self, new_style=True, twisted=False, tornado=False,
utf8strings=False, slots=True, dynamic=False,
dynbase=None, dynexc=None, dynimport=None):
param = 'py'
options = []
if new_style:
options.append('new_style')
if twisted:
options.append('twisted')
assert not tornado
if tornado:
options.append('tornado')
if utf8strings:
options.append('utf8strings')
if slots:
options.append('slots')
# TODO: Dynamic import jonx
if len(options):
param += ':' + ','.join(options)
return param
def addIncludeDir(self, path):
assert os.path.exists(path) and os.path.isdir(path)
self.include_dirs[os.path.abspath(path)] = True
def addDependentFilePath(self, path):
assert os.path.exists(path)
self.dep_files[os.path.basename(path)] = os.path.abspath(path)
path = os.path.dirname(path) or '.'
self.addIncludeDir(path)
def addDependentFileContents(self, name, contents):
self.dep_contents[name] = contents
def importThriftStr(self, payload, **kwargs):
"""Compiles a thrift file from string `payload`"""
with tempfile.NamedTemporaryFile(suffix='.thrift', mode='w') as f:
if self.debug:
f.delete = False
f.write(payload)
f.flush()
return self.importThrift(f.name, **kwargs)
def importThrift(self, path, **kwargs):
"""Compiles a .thrift file, importing its contents into its return value"""
path = os.path.abspath(path)
assert os.path.exists(path)
assert os.path.isfile(path)
srcdir = self.makeTemporaryIncludeDir()
pathbase = os.path.basename(path)
srcdir.symlink(pathbase, path)
outdir = NamedTemporaryDirectory(prefix='to1_')
outdir_recurse = NamedTemporaryDirectory(prefix='tor_')
if self.debug:
outdir.keep()
outdir_recurse.keep()
args = [self.thrift_bin] + self.makeIncludeArgs(srcdir) + \
["--gen", self.getThriftOptions(**kwargs), '-v',
"-o", outdir.name, srcdir.join(pathbase)]
check_output(args)
args = [self.thrift_bin] + self.makeIncludeArgs(srcdir) + \
["--gen", self.getThriftOptions(**kwargs), '-v', '-r',
"-o", outdir_recurse.name, srcdir.join(pathbase)]
check_output(args)
# Prepend output directory to the path
with ctx.add_path(outdir_recurse.join('gen-py'), 0):
thriftname = os.path.splitext(pathbase)[0]
for dirpath, dirnames, filenames in os.walk(outdir.join('gen-py')):
# Emulate relative imports badly
dirpath = os.path.abspath(outdir.join('gen-py', dirpath))
with ctx.add_path(dirpath):
# Add types to module first
if 'ttypes.py' in filenames:
ttypes = self.importPython(dirpath + '/ttypes.py')
result = ttypes
filenames.remove('ttypes.py')
# Then constants
if 'constants.py' in filenames:
result = self.mergeModules(
self.importPython(dirpath + '/constants.py'),
result)
filenames.remove('constants.py')
for filename in filenames:
# Skip pyremotes
if not filename.endswith('.py') or \
filename == '__init__.py':
continue
# Attach services as attributes on the module.
svcpath = dirpath + '/' + filename
svcname = os.path.splitext(filename)[0]
svcmod = self.importPython(svcpath)
svcmod.__file__ = os.path.abspath(svcpath)
svcmod.__name__ = '%s.%s (generated)' % \
(thriftname, svcname)
setattr(result, svcname, svcmod)
assert result is not None, "No files generated by %s" % (path, )
# Set the __file__ attribute to the .thrift file instead
# of the dynamically generated jonx
result.__file__ = os.path.abspath(path)
result.__name__ = thriftname + " (generated)"
return result
def mergeModules(self, module1, module2):
if module1 is None:
return module2
if module2 is None:
return module1
for k in dir(module2):
setattr(module1, k, getattr(module2, k))
return module1
def importPython(self, path):
"""Create a new module from code at `path`.
Does not pollute python's module cache"""
assert os.path.exists(path)
# Any special variables we want to include in execution context
orig_locals = {}
exec_locals = orig_locals.copy()
# Keep a copy of the module cache prior to execution
with ctx.module_snapshot():
execfile(path, exec_locals, exec_locals)
# Generate a new module object, and assign the modified locals
# as attributes on it.
result = imp.new_module(path)
for k, v in iteritems(exec_locals):
setattr(result, k, v)
return result
| bsd-3-clause |
LiveZenLK/CeygateERP | addons/crm/wizard/crm_lead_to_opportunity.py | 18 | 12831 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
import re
from openerp.exceptions import UserError
class crm_lead2opportunity_partner(osv.osv_memory):
_name = 'crm.lead2opportunity.partner'
_description = 'Lead To Opportunity Partner'
_inherit = 'crm.partner.binding'
_columns = {
'name': fields.selection([
('convert', 'Convert to opportunity'),
('merge', 'Merge with existing opportunities')
], 'Conversion Action', required=True),
'opportunity_ids': fields.many2many('crm.lead', string='Opportunities'),
'user_id': fields.many2one('res.users', 'Salesperson', select=True),
'team_id': fields.many2one('crm.team', 'Sales Team', oldname='section_id', select=True),
}
def onchange_action(self, cr, uid, ids, action, context=None):
return {'value': {'partner_id': False if action != 'exist' else self._find_matching_partner(cr, uid, context=context)}}
def _get_duplicated_leads(self, cr, uid, partner_id, email, include_lost=False, context=None):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
return self.pool.get('crm.lead')._get_duplicated_leads_by_emails(cr, uid, partner_id, email, include_lost=include_lost, context=context)
def default_get(self, cr, uid, fields, context=None):
"""
Default get for name, opportunity_ids.
If there is an exisitng partner link to the lead, find all existing
opportunities links with this partner to merge all information together
"""
lead_obj = self.pool.get('crm.lead')
res = super(crm_lead2opportunity_partner, self).default_get(cr, uid, fields, context=context)
if context.get('active_id'):
tomerge = [int(context['active_id'])]
partner_id = res.get('partner_id')
lead = lead_obj.browse(cr, uid, int(context['active_id']), context=context)
email = lead.partner_id and lead.partner_id.email or lead.email_from
tomerge.extend(self._get_duplicated_leads(cr, uid, partner_id, email, include_lost=True, context=context))
tomerge = list(set(tomerge))
if 'action' in fields and not res.get('action'):
res.update({'action' : partner_id and 'exist' or 'create'})
if 'partner_id' in fields:
res.update({'partner_id' : partner_id})
if 'name' in fields:
res.update({'name' : len(tomerge) >= 2 and 'merge' or 'convert'})
if 'opportunity_ids' in fields and len(tomerge) >= 2:
res.update({'opportunity_ids': tomerge})
if lead.user_id:
res.update({'user_id': lead.user_id.id})
if lead.team_id:
res.update({'team_id': lead.team_id.id})
if not partner_id and not lead.contact_name:
res.update({'action': 'nothing'})
return res
def on_change_user(self, cr, uid, ids, user_id, team_id, context=None):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
if user_id:
if team_id:
user_in_team = self.pool.get('crm.team').search(cr, uid, [('id', '=', team_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True)
else:
user_in_team = False
if not user_in_team:
result = self.pool['crm.lead'].on_change_user(cr, uid, ids, user_id, context=context)
team_id = result.get('value') and result['value'].get('team_id') and result['value']['team_id'] or False
return {'value': {'team_id': team_id}}
def view_init(self, cr, uid, fields, context=None):
"""
Check some preconditions before the wizard executes.
"""
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
for lead in lead_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if lead.probability == 100:
raise UserError(_("Closed/Dead leads cannot be converted into opportunities."))
return False
def _convert_opportunity(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
lead = self.pool.get('crm.lead')
res = False
lead_ids = vals.get('lead_ids', [])
team_id = vals.get('team_id', False)
partner_id = vals.get('partner_id')
data = self.browse(cr, uid, ids, context=context)[0]
leads = lead.browse(cr, uid, lead_ids, context=context)
for lead_id in leads:
partner_id = self._create_partner(cr, uid, lead_id.id, data.action, partner_id or lead_id.partner_id.id, context=context)
res = lead.convert_opportunity(cr, uid, [lead_id.id], partner_id, [], False, context=context)
user_ids = vals.get('user_ids', False)
if context.get('no_force_assignation'):
leads_to_allocate = [lead_id.id for lead_id in leads if not lead_id.user_id]
else:
leads_to_allocate = lead_ids
if user_ids:
lead.allocate_salesman(cr, uid, leads_to_allocate, user_ids, team_id=team_id, context=context)
return res
def action_apply(self, cr, uid, ids, context=None):
"""
Convert lead to opportunity or merge lead and opportunity and open
the freshly created opportunity view.
"""
if context is None:
context = {}
lead_obj = self.pool['crm.lead']
w = self.browse(cr, uid, ids, context=context)[0]
opp_ids = [o.id for o in w.opportunity_ids]
vals = {
'team_id': w.team_id.id,
}
if w.partner_id:
vals['partner_id'] = w.partner_id.id
if w.name == 'merge':
lead_id = lead_obj.merge_opportunity(cr, uid, opp_ids, context=context)
lead_ids = [lead_id]
lead = lead_obj.read(cr, uid, lead_id, ['type', 'user_id'], context=context)
if lead['type'] == "lead":
context = dict(context, active_ids=lead_ids)
vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]})
self._convert_opportunity(cr, uid, ids, vals, context=context)
elif not context.get('no_force_assignation') or not lead['user_id']:
vals.update({'user_id': w.user_id.id})
lead_obj.write(cr, uid, lead_id, vals, context=context)
else:
lead_ids = context.get('active_ids', [])
vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]})
self._convert_opportunity(cr, uid, ids, vals, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, lead_ids[0], context=context)
def _create_partner(self, cr, uid, lead_id, action, partner_id, context=None):
"""
Create partner based on action.
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this method in only called by crm_lead2opportunity_partner
#wizard and would probably diserve to be refactored or at least
#moved to a better place
if context is None:
context = {}
lead = self.pool.get('crm.lead')
if action == 'each_exist_or_create':
ctx = dict(context)
ctx['active_id'] = lead_id
partner_id = self._find_matching_partner(cr, uid, context=ctx)
action = 'create'
res = lead.handle_partner_assignation(cr, uid, [lead_id], action, partner_id, context=context)
return res.get(lead_id)
class crm_lead2opportunity_mass_convert(osv.osv_memory):
_name = 'crm.lead2opportunity.partner.mass'
_description = 'Mass Lead To Opportunity Partner'
_inherit = 'crm.lead2opportunity.partner'
_columns = {
'user_ids': fields.many2many('res.users', string='Salesmen'),
'team_id': fields.many2one('crm.team', 'Sales Team', select=True, oldname='section_id'),
'deduplicate': fields.boolean('Apply deduplication', help='Merge with existing leads/opportunities of each partner'),
'action': fields.selection([
('each_exist_or_create', 'Use existing partner or create'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'force_assignation': fields.boolean('Force assignation', help='If unchecked, this will leave the salesman of duplicated opportunities'),
}
_defaults = {
'deduplicate': True,
}
def default_get(self, cr, uid, fields, context=None):
res = super(crm_lead2opportunity_mass_convert, self).default_get(cr, uid, fields, context)
if 'partner_id' in fields:
# avoid forcing the partner of the first lead as default
res['partner_id'] = False
if 'action' in fields:
res['action'] = 'each_exist_or_create'
if 'name' in fields:
res['name'] = 'convert'
if 'opportunity_ids' in fields:
res['opportunity_ids'] = False
return res
def on_change_action(self, cr, uid, ids, action, context=None):
vals = {}
if action != 'exist':
vals = {'value': {'partner_id': False}}
return vals
def on_change_deduplicate(self, cr, uid, ids, deduplicate, context=None):
if context is None:
context = {}
active_leads = self.pool['crm.lead'].browse(cr, uid, context['active_ids'], context=context)
partner_ids = [(lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from) for lead in active_leads]
partners_duplicated_leads = {}
for partner_id, email in partner_ids:
duplicated_leads = self._get_duplicated_leads(cr, uid, partner_id, email)
if len(duplicated_leads) > 1:
partners_duplicated_leads.setdefault((partner_id, email), []).extend(duplicated_leads)
leads_with_duplicates = []
for lead in active_leads:
lead_tuple = (lead.partner_id.id, lead.partner_id.email if lead.partner_id else lead.email_from)
if len(partners_duplicated_leads.get(lead_tuple, [])) > 1:
leads_with_duplicates.append(lead.id)
return {'value': {'opportunity_ids': leads_with_duplicates}}
def _convert_opportunity(self, cr, uid, ids, vals, context=None):
"""
When "massively" (more than one at a time) converting leads to
opportunities, check the salesteam_id and salesmen_ids and update
the values before calling super.
"""
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
salesteam_id = data.team_id and data.team_id.id or False
salesmen_ids = []
if data.user_ids:
salesmen_ids = [x.id for x in data.user_ids]
vals.update({'user_ids': salesmen_ids, 'team_id': salesteam_id})
return super(crm_lead2opportunity_mass_convert, self)._convert_opportunity(cr, uid, ids, vals, context=context)
def mass_convert(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids, context=context)[0]
ctx = dict(context)
if data.name == 'convert' and data.deduplicate:
merged_lead_ids = []
remaining_lead_ids = []
lead_selected = context.get('active_ids', [])
for lead_id in lead_selected:
if lead_id not in merged_lead_ids:
lead = self.pool['crm.lead'].browse(cr, uid, lead_id, context=context)
duplicated_lead_ids = self._get_duplicated_leads(cr, uid, lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from)
if len(duplicated_lead_ids) > 1:
lead_id = self.pool.get('crm.lead').merge_opportunity(cr, uid, duplicated_lead_ids, False, False, context=context)
merged_lead_ids.extend(duplicated_lead_ids)
remaining_lead_ids.append(lead_id)
active_ids = set(context.get('active_ids', []))
active_ids = active_ids.difference(merged_lead_ids)
active_ids = active_ids.union(remaining_lead_ids)
ctx['active_ids'] = list(active_ids)
ctx['no_force_assignation'] = context.get('no_force_assignation', not data.force_assignation)
return self.action_apply(cr, uid, ids, context=ctx)
| gpl-3.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/pycrypto-2.6.1-py2.7-linux-x86_64.egg/Crypto/SelfTest/Protocol/test_chaffing.py | 120 | 2972 | #
# Test script for Crypto.Protocol.Chaffing
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import unittest
from Crypto.Protocol import Chaffing
text = """\
When in the Course of human events, it becomes necessary for one people to
dissolve the political bands which have connected them with another, and to
assume among the powers of the earth, the separate and equal station to which
the Laws of Nature and of Nature's God entitle them, a decent respect to the
opinions of mankind requires that they should declare the causes which impel
them to the separation.
We hold these truths to be self-evident, that all men are created equal, that
they are endowed by their Creator with certain unalienable Rights, that among
these are Life, Liberty, and the pursuit of Happiness. That to secure these
rights, Governments are instituted among Men, deriving their just powers from
the consent of the governed. That whenever any Form of Government becomes
destructive of these ends, it is the Right of the People to alter or to
abolish it, and to institute new Government, laying its foundation on such
principles and organizing its powers in such form, as to them shall seem most
likely to effect their Safety and Happiness.
"""
class ChaffingTest (unittest.TestCase):
def runTest(self):
"Simple tests of chaffing and winnowing"
# Test constructors
Chaffing.Chaff()
Chaffing.Chaff(0.5, 1)
self.assertRaises(ValueError, Chaffing.Chaff, factor=-1)
self.assertRaises(ValueError, Chaffing.Chaff, blocksper=-1)
data = [(1, 'data1', 'data1'), (2, 'data2', 'data2')]
c = Chaffing.Chaff(1.0, 1)
c.chaff(data)
chaff = c.chaff(data)
self.assertEqual(len(chaff), 4)
c = Chaffing.Chaff(0.0, 1)
chaff = c.chaff(data)
self.assertEqual(len(chaff), 2)
def get_tests(config={}):
return [ChaffingTest()]
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
addyosmani/aura-calendar-component | node_modules/grunt/node_modules/js-yaml/support/pyyaml-src/events.py | 985 | 2445 |
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
| mit |
darktears/chromium-crosswalk | third_party/closure_linter/closure_linter/statetracker_test.py | 109 | 3384 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the statetracker module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import testutil
class _FakeDocFlag(object):
def __repr__(self):
return '@%s %s' % (self.flag_type, self.name)
class IdentifierTest(googletest.TestCase):
def testJustIdentifier(self):
a = javascripttokens.JavaScriptToken(
'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1)
st = statetracker.StateTracker()
st.HandleToken(a, None)
class DocCommentTest(googletest.TestCase):
@staticmethod
def _MakeDocFlagFake(flag_type, name=None):
flag = _FakeDocFlag()
flag.flag_type = flag_type
flag.name = name
return flag
def testDocFlags(self):
comment = statetracker.DocComment(None)
a = self._MakeDocFlagFake('param', 'foo')
comment.AddFlag(a)
b = self._MakeDocFlagFake('param', '')
comment.AddFlag(b)
c = self._MakeDocFlagFake('param', 'bar')
comment.AddFlag(c)
self.assertEquals(
['foo', 'bar'],
comment.ordered_params)
self.assertEquals(
[a, b, c],
comment.GetDocFlags())
def testInvalidate(self):
comment = statetracker.DocComment(None)
self.assertFalse(comment.invalidated)
self.assertFalse(comment.IsInvalidated())
comment.Invalidate()
self.assertTrue(comment.invalidated)
self.assertTrue(comment.IsInvalidated())
def testSuppressionOnly(self):
comment = statetracker.DocComment(None)
self.assertFalse(comment.SuppressionOnly())
comment.AddFlag(self._MakeDocFlagFake('suppress'))
self.assertTrue(comment.SuppressionOnly())
comment.AddFlag(self._MakeDocFlagFake('foo'))
self.assertFalse(comment.SuppressionOnly())
def testRepr(self):
comment = statetracker.DocComment(None)
comment.AddFlag(self._MakeDocFlagFake('param', 'foo'))
comment.AddFlag(self._MakeDocFlagFake('param', 'bar'))
self.assertEquals(
'<DocComment: [\'foo\', \'bar\'], [@param foo, @param bar]>',
repr(comment))
def testDocFlagParam(self):
comment = self._ParseComment("""
/**
* @param {string} [name] Name of customer.
*/""")
flag = comment.GetFlag('param')
self.assertEquals('string', flag.type)
self.assertEquals('[name]', flag.name)
def _ParseComment(self, script):
"""Parse a script that contains one comment and return it."""
_, comments = testutil.ParseFunctionsAndComments(script)
self.assertEquals(1, len(comments))
return comments[0]
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
Diti24/python-ivi | ivi/agilent/agilent8340A.py | 1 | 1485 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8340 import *
class agilent8340A(agilentBase8340):
"Agilent 8340A IVI RF sweep generator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8340A')
super(agilent8340A, self).__init__(*args, **kwargs)
self._frequency_low = 10e6
self._frequency_high = 26.5e9
| mit |
TheTypoMaster/asuswrt | release/src/router/dnsmasq/contrib/dbus-test/dbus-test.py | 79 | 1667 | #!/usr/bin/python
import dbus
bus = dbus.SystemBus()
p = bus.get_object("uk.org.thekelleys.dnsmasq", "/uk/org/thekelleys/dnsmasq")
l = dbus.Interface(p, dbus_interface="uk.org.thekelleys.dnsmasq")
# The new more flexible SetServersEx method
array = dbus.Array()
array.append(["1.2.3.5"])
array.append(["1.2.3.4#664", "foobar.com"])
array.append(["1003:1234:abcd::1%eth0", "eng.mycorp.com", "lab.mycorp.com"])
print l.SetServersEx(array)
# Must create a new object for dnsmasq as the introspection gives the wrong
# signature for SetServers (av) while the code only expects a bunch of arguments
# instead of an array of variants
p = bus.get_object("uk.org.thekelleys.dnsmasq", "/uk/org/thekelleys/dnsmasq", introspect=False)
l = dbus.Interface(p, dbus_interface="uk.org.thekelleys.dnsmasq")
# The previous method; all addresses in machine byte order
print l.SetServers(dbus.UInt32(16909060), # 1.2.3.5
dbus.UInt32(16909061), # 1.2.3.4
"foobar.com",
dbus.Byte(0x10), # 1003:1234:abcd::1
dbus.Byte(0x03),
dbus.Byte(0x12),
dbus.Byte(0x34),
dbus.Byte(0xab),
dbus.Byte(0xcd),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x00),
dbus.Byte(0x01),
"eng.mycorp.com",
"lab.mycorp.com")
| gpl-2.0 |
heyavery/lopenr | venv/lib/python2.7/site-packages/wheel/test/test_install.py | 455 | 1866 | # Test wheel.
# The file has the following contents:
# hello.pyd
# hello/hello.py
# hello/__init__.py
# test-1.0.data/data/hello.dat
# test-1.0.data/headers/hello.dat
# test-1.0.data/scripts/hello.sh
# test-1.0.dist-info/WHEEL
# test-1.0.dist-info/METADATA
# test-1.0.dist-info/RECORD
# The root is PLATLIB
# So, some in PLATLIB, and one in each of DATA, HEADERS and SCRIPTS.
import wheel.tool
import wheel.pep425tags
from wheel.install import WheelFile
from tempfile import mkdtemp
import shutil
import os
THISDIR = os.path.dirname(__file__)
TESTWHEEL = os.path.join(THISDIR, 'test-1.0-py2.py3-none-win32.whl')
def check(*path):
return os.path.exists(os.path.join(*path))
def test_install():
tempdir = mkdtemp()
def get_supported():
return list(wheel.pep425tags.get_supported()) + [('py3', 'none', 'win32')]
whl = WheelFile(TESTWHEEL, context=get_supported)
assert whl.supports_current_python(get_supported)
try:
locs = {}
for key in ('purelib', 'platlib', 'scripts', 'headers', 'data'):
locs[key] = os.path.join(tempdir, key)
os.mkdir(locs[key])
whl.install(overrides=locs)
assert len(os.listdir(locs['purelib'])) == 0
assert check(locs['platlib'], 'hello.pyd')
assert check(locs['platlib'], 'hello', 'hello.py')
assert check(locs['platlib'], 'hello', '__init__.py')
assert check(locs['data'], 'hello.dat')
assert check(locs['headers'], 'hello.dat')
assert check(locs['scripts'], 'hello.sh')
assert check(locs['platlib'], 'test-1.0.dist-info', 'RECORD')
finally:
shutil.rmtree(tempdir)
def test_install_tool():
"""Slightly improve coverage of wheel.install"""
wheel.tool.install([TESTWHEEL], force=True, dry_run=True)
| mit |
JRock007/boxxy | dist/Boxxy server.app/Contents/Resources/lib/python2.7/numpy/core/setup_common.py | 36 | 12847 | from __future__ import division, absolute_import, print_function
# Code common to build tools
import sys
from os.path import join
import warnings
import copy
import binascii
from distutils.ccompiler import CompileError
#-------------------
# Versioning support
#-------------------
# How to change C_API_VERSION ?
# - increase C_API_VERSION value
# - record the hash for the new C API with the script cversions.py
# and add the hash to cversions.txt
# The hash values are used to remind developers when the C API number was not
# updated - generates a MismatchCAPIWarning warning which is turned into an
# exception for released version.
# Binary compatibility version number. This number is increased whenever the
# C-API is changed such that binary compatibility is broken, i.e. whenever a
# recompile of extension modules is needed.
C_ABI_VERSION = 0x01000009
# Minor API version. This number is increased whenever a change is made to the
# C-API -- whether it breaks binary compatibility or not. Some changes, such
# as adding a function pointer to the end of the function table, can be made
# without breaking binary compatibility. In this case, only the C_API_VERSION
# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
#
# 0x00000008 - 1.7.x
# 0x00000009 - 1.8.x
# 0x00000009 - 1.9.x
C_API_VERSION = 0x00000009
class MismatchCAPIWarning(Warning):
pass
def is_released(config):
"""Return True if a released version of numpy is detected."""
from distutils.version import LooseVersion
v = config.get_version('../version.py')
if v is None:
raise ValueError("Could not get version")
pv = LooseVersion(vstring=v).version
if len(pv) > 3:
return False
return True
def get_api_versions(apiversion, codegen_dir):
"""Return current C API checksum and the recorded checksum for the given
version of the C API version."""
api_files = [join(codegen_dir, 'numpy_api_order.txt'),
join(codegen_dir, 'ufunc_api_order.txt')]
# Compute the hash of the current API as defined in the .txt files in
# code_generators
sys.path.insert(0, codegen_dir)
try:
m = __import__('genapi')
numpy_api = __import__('numpy_api')
curapi_hash = m.fullapi_hash(numpy_api.full_api)
apis_hash = m.get_versions_hash()
finally:
del sys.path[0]
return curapi_hash, apis_hash[apiversion]
def check_api_version(apiversion, codegen_dir):
"""Emits a MismacthCAPIWarning if the C API version needs updating."""
curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
# If different hash, it means that the api .txt files in
# codegen_dir have been updated without the API version being
# updated. Any modification in those .txt files should be reflected
# in the api and eventually abi versions.
# To compute the checksum of the current API, use
# code_generators/cversions.py script
if not curapi_hash == api_hash:
msg = "API mismatch detected, the C API version " \
"numbers have to be updated. Current C api version is %d, " \
"with checksum %s, but recorded checksum for C API version %d in " \
"codegen_dir/cversions.txt is %s. If functions were added in the " \
"C API, you have to update C_API_VERSION in %s."
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
MismatchCAPIWarning)
# Mandatory functions: if not found, fail the build
MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
"floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
"acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter", "ftello", "fseeko",
"strtoll", "strtoull"]
OPTIONAL_HEADERS = [
# sse headers only enabled automatically on amd64/x32 builds
"xmmintrin.h", # SSE
"emmintrin.h", # SSE2
]
# optional gcc compiler builtins and their call arguments and optional a
# required header
# call arguments are required as the compiler will do strict signature checking
OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
("__builtin_isinf", '5.'),
("__builtin_isfinite", '5.'),
("__builtin_bswap32", '5u'),
("__builtin_bswap64", '5u'),
("__builtin_expect", '5, 0'),
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
]
# function attributes
# tested via "int %s %s(void *);" % (attribute, name)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
'attribute_optimize_unroll_loops'),
('__attribute__((optimize("O3")))',
'attribute_optimize_opt_3'),
('__attribute__((nonnull (1)))',
'attribute_nonnull'),
]
# variable attributes tested via "int %s a" % attribute
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h
OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot",
"copysign", "ftello", "fseeko"]
# C99 functions: float and long double versions
C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor",
"ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp",
"expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh",
"hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp',
"exp2", "log2", "copysign", "nextafter"]
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
C99_COMPLEX_TYPES = ['complex double', 'complex float', 'complex long double']
C99_COMPLEX_FUNCS = ['creal', 'cimag', 'cabs', 'carg', 'cexp', 'csqrt', 'clog',
'ccos', 'csin', 'cpow']
def fname2def(name):
return "HAVE_%s" % name.upper()
def sym2def(symbol):
define = symbol.replace(' ', '')
return define.upper()
def type2def(symbol):
define = symbol.replace(' ', '_')
return define.upper()
# Code to detect long double representation taken from MPFR m4 macro
def check_long_double_representation(cmd):
cmd._check_compiler()
body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
# We need to use _compile because we need the object filename
src, object = cmd._compile(body, None, None, 'c')
try:
type = long_double_representation(pyod(object))
return type
finally:
cmd._clean()
LONG_DOUBLE_REPRESENTATION_SRC = r"""
/* "before" is 16 bytes to ensure there's no padding between it and "x".
* We're not expecting any "long double" bigger than 16 bytes or with
* alignment requirements stricter than 16 bytes. */
typedef %(type)s test_type;
struct {
char before[16];
test_type x;
char after[8];
} foo = {
{ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
-123456789.0,
{ '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
};
"""
def pyod(filename):
"""Python implementation of the od UNIX utility (od -b, more exactly).
Parameters
----------
filename : str
name of the file to get the dump from.
Returns
-------
out : seq
list of lines of od output
Note
----
We only implement enough to get the necessary information for long double
representation, this is not intended as a compatible replacement for od.
"""
def _pyod2():
out = []
fid = open(filename, 'rb')
try:
yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
for i in range(0, len(yo), 16):
line = ['%07d' % int(oct(i))]
line.extend(['%03d' % c for c in yo[i:i+16]])
out.append(" ".join(line))
return out
finally:
fid.close()
def _pyod3():
out = []
fid = open(filename, 'rb')
try:
yo2 = [oct(o)[2:] for o in fid.read()]
for i in range(0, len(yo2), 16):
line = ['%07d' % int(oct(i)[2:])]
line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
out.append(" ".join(line))
return out
finally:
fid.close()
if sys.version_info[0] < 3:
return _pyod2()
else:
return _pyod3()
_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
'001', '043', '105', '147', '211', '253', '315', '357']
_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000']
_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000', '000', '000', '000', '000']
_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
'242', '240', '000', '000', '000', '000']
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
'000', '000', '000', '000', '000', '000', '000', '000']
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
_DOUBLE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] + \
['000'] * 8
_DOUBLE_DOUBLE_LE = ['000', '000', '000', '124', '064', '157', '235', '301'] + \
['000'] * 8
def long_double_representation(lines):
"""Given a binary dump as given by GNU od -b, look for long double
representation."""
# Read contains a list of 32 items, each item is a byte (in octal
# representation, as a string). We 'slide' over the output until read is of
# the form before_seq + content + after_sequence, where content is the long double
# representation:
# - content is 12 bytes: 80 bits Intel representation
# - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
# - content is 8 bytes: same as double (not implemented yet)
read = [''] * 32
saw = None
for line in lines:
# we skip the first word, as od -b output an index at the beginning of
# each line
for w in line.split()[1:]:
read.pop(0)
read.append(w)
# If the end of read is equal to the after_sequence, read contains
# the long double
if read[-8:] == _AFTER_SEQ:
saw = copy.copy(read)
if read[:12] == _BEFORE_SEQ[4:]:
if read[12:-8] == _INTEL_EXTENDED_12B:
return 'INTEL_EXTENDED_12_BYTES_LE'
if read[12:-8] == _MOTOROLA_EXTENDED_12B:
return 'MOTOROLA_EXTENDED_12_BYTES_BE'
elif read[:8] == _BEFORE_SEQ[8:]:
if read[8:-8] == _INTEL_EXTENDED_16B:
return 'INTEL_EXTENDED_16_BYTES_LE'
elif read[8:-8] == _IEEE_QUAD_PREC_BE:
return 'IEEE_QUAD_BE'
elif read[8:-8] == _IEEE_QUAD_PREC_LE:
return 'IEEE_QUAD_LE'
elif read[8:-8] == _DOUBLE_DOUBLE_BE:
return 'DOUBLE_DOUBLE_BE'
elif read[8:-8] == _DOUBLE_DOUBLE_LE:
return 'DOUBLE_DOUBLE_LE'
elif read[:16] == _BEFORE_SEQ:
if read[16:-8] == _IEEE_DOUBLE_LE:
return 'IEEE_DOUBLE_LE'
elif read[16:-8] == _IEEE_DOUBLE_BE:
return 'IEEE_DOUBLE_BE'
if saw is not None:
raise ValueError("Unrecognized format (%s)" % saw)
else:
# We never detected the after_sequence
raise ValueError("Could not lock sequences (%s)" % saw)
| mit |
inonit/wagtail | wagtail/wagtailsearch/backends/__init__.py | 9 | 2764 | # Backend loading
# Based on the Django cache framework
# https://github.com/django/django/blob/5d263dee304fdaf95e18d2f0619d6925984a7f02/django/core/cache/__init__.py
import sys
from importlib import import_module
from django.utils import six
from django.utils.module_loading import import_string
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
class InvalidSearchBackendError(ImproperlyConfigured):
pass
def import_backend(dotted_path):
"""
Theres two formats for the dotted_path.
One with the backend class (old) and one without (new)
eg:
old: wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch
new: wagtail.wagtailsearch.backends.elasticsearch
If a new style dotted path was specified, this function would
look for a backend class from the "SearchBackend" attribute.
"""
try:
# New
backend_module = import_module(dotted_path)
return backend_module.SearchBackend
except ImportError as e:
try:
# Old
return import_string(dotted_path)
except ImportError:
six.reraise(ImportError, e, sys.exc_info()[2])
def get_search_backend(backend='default', **kwargs):
# Get configuration
default_conf = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.db',
},
}
WAGTAILSEARCH_BACKENDS = getattr(
settings, 'WAGTAILSEARCH_BACKENDS', default_conf)
# Try to find the backend
try:
# Try to get the WAGTAILSEARCH_BACKENDS entry for the given backend name first
conf = WAGTAILSEARCH_BACKENDS[backend]
except KeyError:
try:
# Trying to import the given backend, in case it's a dotted path
import_backend(backend)
except ImportError as e:
raise InvalidSearchBackendError("Could not find backend '%s': %s" % (
backend, e))
params = kwargs
else:
# Backend is a conf entry
params = conf.copy()
params.update(kwargs)
backend = params.pop('BACKEND')
# Try to import the backend
try:
backend_cls = import_backend(backend)
except ImportError as e:
raise InvalidSearchBackendError("Could not find backend '%s': %s" % (
backend, e))
# Create backend
return backend_cls(params)
def get_search_backends(with_auto_update=False):
if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):
for backend, params in settings.WAGTAILSEARCH_BACKENDS.items():
if with_auto_update and params.get('AUTO_UPDATE', True) is False:
continue
yield get_search_backend(backend)
else:
yield get_search_backend('default')
| bsd-3-clause |
justinmchase/node-gyp | gyp/pylib/gyp/generator/analyzer.py | 294 | 21436 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets thare were not found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return target_dict['type'] != 'none' or \
target_dict.get('actions') or target_dict.get('rules')
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target.is_executable = target_dicts[target_name]['type'] == 'executable'
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if source in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build))):
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(include) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
invalid_targets = None
if len(unqualified_mapping) != len(config.targets):
invalid_targets = _NamesNotIn(config.targets, unqualified_mapping)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
matched_search_targets = _GetTargetsDependingOn(search_targets)
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if invalid_targets:
result_dict['invalid_targets'] = invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| mit |
DelvarWorld/three.js | utils/exporters/maya/plug-ins/threeJsFileTranslator.py | 51 | 23623 | __author__ = 'Sean Griffin'
__version__ = '1.0.0'
__email__ = 'sean@thoughtbot.com'
import sys
import os.path
import json
import shutil
from pymel.core import *
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'faces',
'materials', 'colorMaps', 'specularMaps', 'bumpMaps', 'copyTextures',
'bones', 'skeletalAnim', 'bakeAnimations', 'prettyOutput']
def write(self, path, optionString, accessMode):
self.path = path
self.accessMode = accessMode
self._parseOptions(optionString)
self.verticeOffset = 0
self.uvOffset = 0
self.normalOffset = 0
self.vertices = []
self.materials = []
self.faces = []
self.normals = []
self.uvs = []
self.morphTargets = []
self.bones = []
self.animations = []
self.skinIndices = []
self.skinWeights = []
print("exporting meshes")
self._exportMeshes()
if self.options["materials"]:
print("exporting materials")
self._exportMaterials()
if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode :
if self.options["bakeAnimations"]:
print("exporting animations")
self._exportAnimations()
self._goToFrame(self.options["startFrame"])
if self.options["bones"]:
print("exporting bones")
select(map(lambda m: m.getParent(), ls(type='mesh')))
runtime.GoToBindPose()
self._exportBones()
print("exporting skins")
self._exportSkins()
if self.options["skeletalAnim"]:
print("exporting keyframe animations")
self._exportKeyframeAnimations()
print("writing file")
output = {
'metadata': {
'formatVersion': 3.1,
'generatedBy': 'Maya Exporter'
},
'vertices': self.vertices,
'uvs': [self.uvs],
'faces': self.faces,
'normals': self.normals,
'materials': self.materials,
}
if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode :
if self.options['bakeAnimations']:
output['morphTargets'] = self.morphTargets
if self.options['bones']:
output['bones'] = self.bones
output['skinIndices'] = self.skinIndices
output['skinWeights'] = self.skinWeights
output['influencesPerVertex'] = self.options["influencesPerVertex"]
if self.options['skeletalAnim']:
output['animations'] = self.animations
with file(path, 'w') as f:
if self.options['prettyOutput']:
f.write(json.dumps(output, indent=4, separators=(", ", ": ")))
else:
f.write(json.dumps(output, separators=(",",":")))
def _allMeshes(self):
if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode :
print("*** Exporting ALL (NEW) ***")
self.__allMeshes = filter(lambda m: len(m.listConnections()) > 0, ls(type='mesh'))
else :
print("### Exporting SELECTED ###")
self.__allMeshes = ls(selection=True)
return self.__allMeshes
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
for key in self.componentKeys:
self.options[key] = key in optionsString
if self.options["bones"]:
boneOptionsString = optionsString[optionsString.find("bones"):]
boneOptions = boneOptionsString.split(' ')
self.options["influencesPerVertex"] = int(boneOptions[1])
if self.options["bakeAnimations"]:
bakeAnimOptionsString = optionsString[optionsString.find("bakeAnimations"):]
bakeAnimOptions = bakeAnimOptionsString.split(' ')
self.options["startFrame"] = int(bakeAnimOptions[1])
self.options["endFrame"] = int(bakeAnimOptions[2])
self.options["stepFrame"] = int(bakeAnimOptions[3])
def _exportMeshes(self):
if self.options['vertices']:
self._exportVertices()
for mesh in self._allMeshes():
self._exportMesh(mesh)
def _exportMesh(self, mesh):
print("Exporting " + mesh.name())
if self.options['faces']:
print("Exporting faces")
self._exportFaces(mesh)
self.verticeOffset += len(mesh.getPoints())
self.uvOffset += mesh.numUVs()
self.normalOffset += mesh.numNormals()
if self.options['normals']:
print("Exporting normals")
self._exportNormals(mesh)
if self.options['uvs']:
print("Exporting UVs")
self._exportUVs(mesh)
def _getMaterialIndex(self, face, mesh):
if not hasattr(self, '_materialIndices'):
self._materialIndices = dict([(mat['DbgName'], i) for i, mat in enumerate(self.materials)])
if self.options['materials']:
for engine in mesh.listConnections(type='shadingEngine'):
if sets(engine, isMember=face) or sets(engine, isMember=mesh):
for material in engine.listConnections(type='lambert'):
if self._materialIndices.has_key(material.name()):
return self._materialIndices[material.name()]
return -1
def _exportVertices(self):
self.vertices += self._getVertices()
def _exportAnimations(self):
for frame in self._framesToExport():
self._exportAnimationForFrame(frame)
def _framesToExport(self):
return range(self.options["startFrame"], self.options["endFrame"], self.options["stepFrame"])
def _exportAnimationForFrame(self, frame):
print("exporting frame " + str(frame))
self._goToFrame(frame)
self.morphTargets.append({
'name': "frame_" + str(frame),
'vertices': self._getVertices()
})
def _getVertices(self):
return [coord for mesh in self._allMeshes() for point in mesh.getPoints(space='world') for coord in [round(point.x, FLOAT_PRECISION), round(point.y, FLOAT_PRECISION), round(point.z, FLOAT_PRECISION)]]
def _goToFrame(self, frame):
currentTime(frame)
def _exportFaces(self, mesh):
typeBitmask = self._getTypeBitmask()
for face in mesh.faces:
materialIndex = self._getMaterialIndex(face, mesh)
hasMaterial = materialIndex != -1
self._exportFaceBitmask(face, typeBitmask, hasMaterial=hasMaterial)
self.faces += map(lambda x: x + self.verticeOffset, face.getVertices())
if self.options['materials']:
if hasMaterial:
self.faces.append(materialIndex)
if self.options['uvs'] and face.hasUVs():
self.faces += map(lambda v: face.getUVIndex(v) + self.uvOffset, range(face.polygonVertexCount()))
if self.options['normals']:
self._exportFaceVertexNormals(face)
def _exportFaceBitmask(self, face, typeBitmask, hasMaterial=True):
if face.polygonVertexCount() == 4:
faceBitmask = 1
else:
faceBitmask = 0
if hasMaterial:
faceBitmask |= (1 << 1)
if self.options['uvs'] and face.hasUVs():
faceBitmask |= (1 << 3)
self.faces.append(typeBitmask | faceBitmask)
def _exportFaceVertexNormals(self, face):
for i in range(face.polygonVertexCount()):
self.faces.append(face.normalIndex(i) + self.normalOffset)
def _exportNormals(self, mesh):
for normal in mesh.getNormals():
self.normals += [round(normal.x, FLOAT_PRECISION), round(normal.y, FLOAT_PRECISION), round(normal.z, FLOAT_PRECISION)]
def _exportUVs(self, mesh):
us, vs = mesh.getUVs()
for i, u in enumerate(us):
self.uvs.append(u)
self.uvs.append(vs[i])
def _getTypeBitmask(self):
bitmask = 0
if self.options['normals']:
bitmask |= 32
return bitmask
def _exportMaterials(self):
hist = listHistory( self._allMeshes(), f=1 )
mats = listConnections( hist, type='lambert' )
for mat in mats:
print("material: " + mat)
self.materials.append(self._exportMaterial(mat))
def _exportMaterial(self, mat):
result = {
"DbgName": mat.name(),
"blending": "NormalBlending",
"colorDiffuse": map(lambda i: i * mat.getDiffuseCoeff(), mat.getColor().rgb),
"depthTest": True,
"depthWrite": True,
"shading": mat.__class__.__name__,
"opacity": mat.getTransparency().r,
"transparent": mat.getTransparency().r != 1.0,
"vertexColors": False
}
if isinstance(mat, nodetypes.Phong):
result["colorSpecular"] = mat.getSpecularColor().rgb
result["reflectivity"] = mat.getReflectivity()
result["specularCoef"] = mat.getCosPower()
if self.options["specularMaps"]:
self._exportSpecularMap(result, mat)
if self.options["bumpMaps"]:
self._exportBumpMap(result, mat)
if self.options["colorMaps"]:
self._exportColorMap(result, mat)
return result
def _exportBumpMap(self, result, mat):
for bump in mat.listConnections(type='bump2d'):
for f in bump.listConnections(type='file'):
result["mapNormalFactor"] = 1
self._exportFile(result, f, "Normal")
def _exportColorMap(self, result, mat):
for f in mat.attr('color').inputs():
result["colorDiffuse"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Diffuse")
def _exportSpecularMap(self, result, mat):
for f in mat.attr('specularColor').inputs():
result["colorSpecular"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Specular")
def _exportFile(self, result, mapFile, mapType):
src = mapFile.ftn.get()
targetDir = os.path.dirname(self.path)
fName = os.path.basename(src)
if self.options['copyTextures']:
shutil.copy2(src, os.path.join(targetDir, fName))
result["map" + mapType] = fName
result["map" + mapType + "Repeat"] = [1, 1]
result["map" + mapType + "Wrap"] = ["repeat", "repeat"]
result["map" + mapType + "Anisotropy"] = 4
def _exportBones(self):
hist = listHistory( self._allMeshes(), f=1 )
joints = listConnections( hist, type="joint")
for joint in joints:
if joint.getParent():
parentIndex = self._indexOfJoint(joint.getParent().name())
else:
parentIndex = -1
rotq = joint.getRotation(quaternion=True) * joint.getOrientation()
pos = joint.getTranslation()
self.bones.append({
"parent": parentIndex,
"name": joint.name(),
"pos": self._roundPos(pos),
"rotq": self._roundQuat(rotq)
})
def _indexOfJoint(self, name):
if not hasattr(self, '_jointNames'):
self._jointNames = dict([(joint.name(), i) for i, joint in enumerate(ls(type='joint'))])
if name in self._jointNames:
return self._jointNames[name]
else:
return -1
def _exportKeyframeAnimations(self):
hierarchy = []
i = -1
frameRate = FramesPerSecond(currentUnit(query=True, time=True)).value()
hist = listHistory( self._allMeshes(), f=1 )
joints = listConnections( hist, type="joint")
for joint in joints:
hierarchy.append({
"parent": i,
"keys": self._getKeyframes(joint, frameRate)
})
i += 1
self.animations.append({
"name": "skeletalAction.001",
"length": (playbackOptions(maxTime=True, query=True) - playbackOptions(minTime=True, query=True)) / frameRate,
"fps": 1,
"hierarchy": hierarchy
})
def _getKeyframes(self, joint, frameRate):
firstFrame = playbackOptions(minTime=True, query=True)
lastFrame = playbackOptions(maxTime=True, query=True)
frames = sorted(list(set(keyframe(joint, query=True) + [firstFrame, lastFrame])))
keys = []
print("joint " + joint.name() + " has " + str(len(frames)) + " keyframes")
for frame in frames:
self._goToFrame(frame)
keys.append(self._getCurrentKeyframe(joint, frame, frameRate))
return keys
def _getCurrentKeyframe(self, joint, frame, frameRate):
pos = joint.getTranslation()
rot = joint.getRotation(quaternion=True) * joint.getOrientation()
return {
'time': (frame - playbackOptions(minTime=True, query=True)) / frameRate,
'pos': self._roundPos(pos),
'rot': self._roundQuat(rot),
'scl': [1,1,1]
}
def _roundPos(self, pos):
return map(lambda x: round(x, FLOAT_PRECISION), [pos.x, pos.y, pos.z])
def _roundQuat(self, rot):
return map(lambda x: round(x, FLOAT_PRECISION), [rot.x, rot.y, rot.z, rot.w])
def _exportSkins(self):
for mesh in self._allMeshes():
print("exporting skins for mesh: " + mesh.name())
hist = listHistory( mesh, f=1 )
skins = listConnections( hist, type='skinCluster')
if len(skins) > 0:
print("mesh has " + str(len(skins)) + " skins")
skin = skins[0]
joints = skin.influenceObjects()
for weights in skin.getWeights(mesh.vtx):
numWeights = 0
for i in range(0, len(weights)):
if weights[i] > 0:
self.skinWeights.append(weights[i])
self.skinIndices.append(self._indexOfJoint(joints[i].name()))
numWeights += 1
if numWeights > self.options["influencesPerVertex"]:
raise Exception("More than " + str(self.options["influencesPerVertex"]) + " influences on a vertex in " + mesh.name() + ".")
for i in range(0, self.options["influencesPerVertex"] - numWeights):
self.skinWeights.append(0)
self.skinIndices.append(0)
else:
print("mesh has no skins, appending 0")
for i in range(0, len(mesh.getPoints()) * self.options["influencesPerVertex"]):
self.skinWeights.append(0)
self.skinIndices.append(0)
class NullAnimCurve(object):
def getValue(self, index):
return 0.0
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.json'
def defaultExtension(self):
return 'json'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise
class FramesPerSecond(object):
MAYA_VALUES = {
'game': 15,
'film': 24,
'pal': 25,
'ntsc': 30,
'show': 48,
'palf': 50,
'ntscf': 60
}
def __init__(self, fpsString):
self.fpsString = fpsString
def value(self):
if self.fpsString in FramesPerSecond.MAYA_VALUES:
return FramesPerSecond.MAYA_VALUES[self.fpsString]
else:
return int(filter(lambda c: c.isdigit(), self.fpsString))
###################################################################
## The code below was taken from the Blender 3JS Exporter
## It's purpose is to fix the JSON output so that it does not
## put each array value on it's own line, which is ridiculous
## for this type of output.
###################################################################
ROUND = 6
## THREE override function
def _json_floatstr(o):
if ROUND is not None:
o = round(o, ROUND)
return '%g' % o
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
#if _indent is not None:
# _current_indent_level += 1
# newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
# separator = _item_separator + newline_indent
# buf += newline_indent
#else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
# override the encoder
json.encoder._make_iterencode = _make_iterencode
| mit |
mgeisler/satori | satori/sysinfo/posh_ohai.py | 2 | 5515 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0622
"""PoSh-Ohai Data Plane Discovery Module."""
import json
import logging
import ipaddress as ipaddress_module
import six
from satori import bash
from satori import errors
from satori import utils
LOG = logging.getLogger(__name__)
def get_systeminfo(ipaddress, config, interactive=False):
"""Run data plane discovery using this module against a host.
:param ipaddress: address to the host to discover.
:param config: arguments and configuration suppplied to satori.
:keyword interactive: whether to prompt the user for information.
"""
if (ipaddress in utils.get_local_ips() or
ipaddress_module.ip_address(six.text_type(ipaddress)).is_loopback):
client = bash.LocalShell()
client.host = "localhost"
client.port = 0
perform_install(client)
return system_info(client)
else:
with bash.RemoteShell(
ipaddress, username=config['host_username'],
private_key=config['host_key'],
interactive=interactive) as client:
perform_install(client)
return system_info(client)
def system_info(client, with_install=False):
"""Run Posh-Ohai on a remote system and gather the output.
:param client: :class:`smb.SMB` instance
:returns: dict -- system information from PoSh-Ohai
:raises: SystemInfoCommandMissing, SystemInfoCommandOld, SystemInfoNotJson
SystemInfoMissingJson
SystemInfoCommandMissing if `posh-ohai` is not installed.
SystemInfoCommandOld if `posh-ohai` is not the latest.
SystemInfoNotJson if `posh-ohai` does not return valid JSON.
SystemInfoMissingJson if `posh-ohai` does not return any JSON.
"""
if with_install:
perform_install(client)
if client.is_windows():
powershell_command = 'Get-ComputerConfiguration'
output = client.execute(powershell_command)
unicode_output = "%s" % output
try:
results = json.loads(unicode_output)
except ValueError:
try:
clean_output = get_json(unicode_output)
results = json.loads(clean_output)
except ValueError as err:
raise errors.SystemInfoNotJson(err)
return results
else:
raise errors.PlatformNotSupported(
"PoSh-Ohai is a Windows-only sytem info provider. "
"Target platform was %s", client.platform_info['dist'])
def perform_install(client):
"""Install PoSh-Ohai on remote system."""
LOG.info("Installing (or updating) PoSh-Ohai on device %s at %s:%d",
client.host, client.host, client.port)
# Check is it is a windows box, but fail safely to Linux
is_windows = False
try:
is_windows = client.is_windows()
except Exception:
pass
if is_windows:
powershell_command = ('[scriptblock]::Create((New-Object -TypeName '
'System.Net.WebClient).DownloadString('
'"http://readonly.configdiscovery.rackspace.com'
'/deploy.ps1")).Invoke()')
# check output to ensure that installation was successful
# if not, raise SystemInfoCommandInstallFailed
output = client.execute(powershell_command)
return output
else:
raise errors.PlatformNotSupported(
"PoSh-Ohai is a Windows-only sytem info provider. "
"Target platform was %s", client.platform_info['dist'])
def remove_remote(client):
"""Remove PoSh-Ohai from specifc remote system.
Currently supports:
- ubuntu [10.x, 12.x]
- debian [6.x, 7.x]
- redhat [5.x, 6.x]
- centos [5.x, 6.x]
"""
if client.is_windows():
powershell_command = ('Remove-Item -Path (Join-Path -Path '
'$($env:PSModulePath.Split(";") '
'| Where-Object { $_.StartsWith('
'$env:SystemRoot)}) -ChildPath '
'"PoSh-Ohai") -Recurse -Force -ErrorAction '
'SilentlyContinue')
output = client.execute(powershell_command)
return output
else:
raise errors.PlatformNotSupported(
"PoSh-Ohai is a Windows-only sytem info provider. "
"Target platform was %s", client.platform_info['dist'])
def get_json(data):
"""Find the JSON string in data and return a string.
:param data: :string:
:returns: string -- JSON string stripped of non-JSON data
:raises: SystemInfoMissingJson
SystemInfoMissingJson if no JSON is returned.
"""
try:
first = data.index('{')
last = data.rindex('}')
return data[first:last + 1]
except ValueError as exc:
context = {"ValueError": "%s" % exc}
raise errors.SystemInfoMissingJson(context)
| apache-2.0 |
jtyr/ansible-modules-extras | network/f5/bigip_ssl_certificate.py | 23 | 16161 | #!/usr/bin/python
#
# (c) 2016, Kevin Coming (@waffie1)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: bigip_ssl_certificate
short_description: Import/Delete certificates from BIG-IP
description:
- This module will import/delete SSL certificates on BIG-IP LTM.
Certificates can be imported from certificate and key files on the local
disk, in PEM format.
version_added: 2.2
options:
cert_content:
description:
- When used instead of 'cert_src', sets the contents of a certificate directly
to the specified value. This is used with lookup plugins or for anything
with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_content:
description:
- When used instead of 'key_src', sets the contents of a certificate key
directly to the specified value. This is used with lookup plugins or for
anything with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
state:
description:
- Certificate and key state. This determines if the provided certificate
and key is to be made C(present) on the device or C(absent).
required: true
default: present
choices:
- present
- absent
partition:
description:
- BIG-IP partition to use when adding/deleting certificate.
required: false
default: Common
name:
description:
- SSL Certificate Name. This is the cert/key pair name used
when importing a certificate/key into the F5. It also
determines the filenames of the objects on the LTM
(:Partition:name.cer_11111_1 and :Partition_name.key_11111_1).
required: true
cert_src:
description:
- This is the local filename of the certificate. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_src:
description:
- This is the local filename of the private key. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
passphrase:
description:
- Passphrase on certificate private key
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host.
- If you use this module, you will not be able to remove the certificates
and keys that are managed, via the web UI. You can only remove them via
tmsh or these modules.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 1.5.0
- BigIP >= v12
author:
- Kevin Coming (@waffie1)
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Import PEM Certificate from local disk
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_src: "/path/to/cert.crt"
key_src: "/path/to/key.key"
delegate_to: localhost
- name: Use a file lookup to import PEM Certificate
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_content: "{{ lookup('file', '/path/to/cert.crt') }}"
key_content: "{{ lookup('file', '/path/to/key.key') }}"
delegate_to: localhost
- name: "Delete Certificate"
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
delegate_to: localhost
'''
RETURN = '''
cert_name:
description: >
The name of the SSL certificate. The C(cert_name) and
C(key_name) will be equal to each other.
returned:
- created
- changed
- deleted
type: string
sample: "cert1"
key_name:
description: >
The name of the SSL certificate key. The C(key_name) and
C(cert_name) will be equal to each other.
returned:
- created
- changed
- deleted
type: string
sample: "key1"
partition:
description: Partition in which the cert/key was created
returned:
- changed
- created
- deleted
type: string
sample: "Common"
key_checksum:
description: SHA1 checksum of the key that was provided
return:
- changed
- created
type: string
sample: "cf23df2207d99a74fbe169e3eba035e633b65d94"
cert_checksum:
description: SHA1 checksum of the cert that was provided
return:
- changed
- created
type: string
sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0"
'''
try:
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
import hashlib
import StringIO
class BigIpSslCertificate(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
required_args = ['key_content', 'key_src', 'cert_content', 'cert_src']
ksource = kwargs['key_src']
if ksource:
with open(ksource) as f:
kwargs['key_content'] = f.read()
csource = kwargs['cert_src']
if csource:
with open(csource) as f:
kwargs['cert_content'] = f.read()
if kwargs['state'] == 'present':
if not any(kwargs[k] is not None for k in required_args):
raise F5ModuleError(
"Either 'key_content', 'key_src', 'cert_content' or "
"'cert_src' must be provided"
)
# This is the remote BIG-IP path from where it will look for certs
# to install.
self.dlpath = '/var/config/rest/downloads'
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def exists(self):
cert = self.cert_exists()
key = self.key_exists()
if cert and key:
return True
else:
return False
def get_hash(self, content):
k = hashlib.sha1()
s = StringIO.StringIO(content)
while True:
data = s.read(1024)
if not data:
break
k.update(data)
return k.hexdigest()
def present(self):
current = self.read()
changed = False
do_key = False
do_cert = False
chash = None
khash = None
check_mode = self.params['check_mode']
name = self.params['name']
partition = self.params['partition']
cert_content = self.params['cert_content']
key_content = self.params['key_content']
passphrase = self.params['passphrase']
# Technically you dont need to provide us with anything in the form
# of content for your cert, but that's kind of illogical, so we just
# return saying you didn't "do" anything if you left the cert and keys
# empty.
if not cert_content and not key_content:
return False
if key_content is not None:
if 'key_checksum' in current:
khash = self.get_hash(key_content)
if khash not in current['key_checksum']:
do_key = "update"
else:
do_key = "create"
if cert_content is not None:
if 'cert_checksum' in current:
chash = self.get_hash(cert_content)
if chash not in current['cert_checksum']:
do_cert = "update"
else:
do_cert = "create"
if do_cert or do_key:
changed = True
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
if khash:
params['key_checksum'] = khash
if chash:
params['cert_checksum'] = chash
self.cparams = params
if check_mode:
return changed
if not do_cert and not do_key:
return False
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if do_cert:
# Upload the content of a certificate as a StringIO object
cstring = StringIO.StringIO(cert_content)
filename = "%s.crt" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
cstring,
filename
)
if do_cert == "update":
# Install the certificate
params = {
'name': name,
'partition': partition
}
cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params)
# This works because, while the source path is the same,
# calling update causes the file to be re-read
cert.update()
changed = True
elif do_cert == "create":
# Install the certificate
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
api.tm.sys.file.ssl_certs.ssl_cert.create(**params)
changed = True
if do_key:
# Upload the content of a certificate key as a StringIO object
kstring = StringIO.StringIO(key_content)
filename = "%s.key" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
kstring,
filename
)
if do_key == "update":
# Install the key
params = {
'name': name,
'partition': partition
}
key = api.tm.sys.file.ssl_keys.ssl_key.load(**params)
params = dict()
if passphrase:
params['passphrase'] = passphrase
else:
params['passphrase'] = None
key.update(**params)
changed = True
elif do_key == "create":
# Install the key
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
if passphrase:
params['passphrase'] = self.params['passphrase']
else:
params['passphrase'] = None
api.tm.sys.file.ssl_keys.ssl_key.create(**params)
changed = True
return changed
def key_exists(self):
return self.api.tm.sys.file.ssl_keys.ssl_key.exists(
name=self.params['name'],
partition=self.params['partition']
)
def cert_exists(self):
return self.api.tm.sys.file.ssl_certs.ssl_cert.exists(
name=self.params['name'],
partition=self.params['partition']
)
def read(self):
p = dict()
name = self.params['name']
partition = self.params['partition']
if self.key_exists():
key = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=name,
partition=partition
)
if hasattr(key, 'checksum'):
p['key_checksum'] = str(key.checksum)
if self.cert_exists():
cert = self.api.tm.sys.file.ssl_certs.ssl_cert.load(
name=name,
partition=partition
)
if hasattr(cert, 'checksum'):
p['cert_checksum'] = str(cert.checksum)
p['name'] = name
return p
def flush(self):
result = dict()
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def delete(self):
changed = False
check_mode = self.params['check_mode']
delete_cert = self.cert_exists()
delete_key = self.key_exists()
if not delete_cert and not delete_key:
return changed
if check_mode:
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
self.cparams = params
return True
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if delete_cert:
# Delete the certificate
c = api.tm.sys.file.ssl_certs.ssl_cert.load(
name=self.params['name'],
partition=self.params['partition']
)
c.delete()
changed = True
if delete_key:
# Delete the certificate key
k = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=self.params['name'],
partition=self.params['partition']
)
k.delete()
changed = True
return changed
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True),
cert_content=dict(type='str', default=None),
cert_src=dict(type='path', default=None),
key_content=dict(type='str', default=None),
key_src=dict(type='path', default=None),
passphrase=dict(type='str', default=None, no_log=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['key_content', 'key_src'],
['cert_content', 'cert_src']
]
)
try:
obj = BigIpSslCertificate(check_mode=module.check_mode,
**module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
pilou-/ansible | test/integration/targets/vault/test-vault-client.py | 139 | 1818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import argparse
import sys
# TODO: could read these from the files I suppose...
secrets = {'vault-password': 'test-vault-password',
'vault-password-wrong': 'hunter42',
'vault-password-ansible': 'ansible',
'password': 'password',
'vault-client-password-1': 'password-1',
'vault-client-password-2': 'password-2'}
def build_arg_parser():
parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
parser.add_argument('--vault-id', action='store', default=None,
dest='vault_id',
help='name of the vault secret to get from keyring')
parser.add_argument('--username', action='store', default=None,
help='the username whose keyring is queried')
parser.add_argument('--set', action='store_true', default=False,
dest='set_password',
help='set the password instead of getting it')
return parser
def get_secret(keyname):
return secrets.get(keyname, None)
def main():
rc = 0
arg_parser = build_arg_parser()
args = arg_parser.parse_args()
# print('args: %s' % args)
keyname = args.vault_id or 'ansible'
if args.set_password:
print('--set is not supported yet')
sys.exit(1)
secret = get_secret(keyname)
if secret is None:
sys.stderr.write('test-vault-client could not find key for vault-id="%s"\n' % keyname)
# key not found rc=2
return 2
sys.stdout.write('%s\n' % secret)
return rc
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
kohnle-lernmodule/palama | twisted/application/service.py | 14 | 9400 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Service architecture for Twisted
Services are arranged in a hierarchy. At the leafs of the hierarchy,
the services which actually interact with the outside world are started.
Services can be named or anonymous -- usually, they will be named if
there is need to access them through the hierarchy (from a parent or
a sibling).
API Stability: unstable
Maintainer: U{Moshe Zadka<mailto:moshez@twistedmatrix.com>}
"""
from zope.interface import implements
from twisted.python import components
from twisted.internet import defer
from twisted.persisted import sob
class IService(components.Interface):
"""
A service.
Run start-up and shut-down code at the appropriate times.
@type name: C{string}
@ivar name: The name of the service (or None)
@type running: C{boolean}
@ivar running: Whether the service is running.
"""
def setName(self, name):
"""Set the name of the service.
@type name: C{str}
@raise L{RuntimeError}: Raised if the service already has a parent.
"""
def setServiceParent(self, parent):
"""Set the parent of the service.
@type name: C{IServiceCollection}
@raise L{RuntimeError}: Raised if the service already has a parent
or if the service has a name and the parent already has a child
by that name.
"""
def disownServiceParent(self):
"""Remove the parent of the service.
@rtype: C{Deferred}
@return: a deferred which is triggered when the service has
finished shutting down. If shutting down is immediate,
a value can be returned (usually, None).
"""
def startService(self):
"""Start the the service."""
def stopService(self):
"""Stop the the service.
@rtype: C{Deferred}
@return: a deferred which is triggered when the service has
finished shutting down. If shutting down is immediate,
a value can be returned (usually, None).
"""
def privilegedStartService(self):
"""Do preparation work for starting the service.
Here things which should be done before changing directory,
root or shedding privileges are done."""
class Service:
"""
Base class for services
Most services should inherit from this class. It handles the
book-keeping reponsibilities of starting and stopping, as well
as not serializing this book-keeping information.
"""
implements(IService)
running = 0
name = None
parent = None
def __getstate__(self):
dict = self.__dict__.copy()
if dict.has_key("running"):
del dict['running']
return dict
def setName(self, name):
if self.parent is not None:
raise RuntimeError("cannot change name when parent exists")
self.name = name
def setServiceParent(self, parent):
if self.parent is not None:
self.disownServiceParent()
parent = IServiceCollection(parent, parent)
self.parent = parent
self.parent.addService(self)
def disownServiceParent(self):
d = self.parent.removeService(self)
self.parent = None
return d
def privilegedStartService(self):
pass
def startService(self):
self.running = 1
def stopService(self):
self.running = 0
class IServiceCollection(components.Interface):
"""Collection of services.
Contain several services, and manage their start-up/shut-down.
Services can be accessed by name if they have a name, and it
is always possible to iterate over them.
"""
def getServiceNamed(self, name):
"""Get the child service with a given name.
@type name: C{str}
@rtype: C{IService}
@raise L{KeyError}: Raised if the service has no child with the
given name.
"""
def __iter__(self):
"""Get an iterator over all child services"""
def addService(self, service):
"""Add a child service.
@type service: C{IService}
@raise L{RuntimeError}: Raised if the service has a child with
the given name.
"""
def removeService(self, service):
"""Remove a child service.
@type service: C{IService}
@raise L{ValueError}: Raised if the given service is not a child.
@rtype: C{Deferred}
@return: a deferred which is triggered when the service has
finished shutting down. If shutting down is immediate,
a value can be returned (usually, None).
"""
class MultiService(Service):
"""Straightforward Service Container
Hold a collection of services, and manage them in a simplistic
way. No service will wait for another, but this object itself
will not finish shutting down until all of its child services
will finish.
"""
implements(IServiceCollection)
def __init__(self):
self.services = []
self.namedServices = {}
self.parent = None
def privilegedStartService(self):
Service.privilegedStartService(self)
for service in self:
service.privilegedStartService()
def startService(self):
Service.startService(self)
for service in self:
service.startService()
def stopService(self):
Service.stopService(self)
l = []
services = list(self)
services.reverse()
for service in services:
l.append(defer.maybeDeferred(service.stopService))
return defer.DeferredList(l)
def getServiceNamed(self, name):
return self.namedServices[name]
def __iter__(self):
return iter(self.services)
def addService(self, service):
if service.name is not None:
if self.namedServices.has_key(service.name):
raise RuntimeError("cannot have two services with same name"
" '%s'" % service.name)
self.namedServices[service.name] = service
self.services.append(service)
if self.running:
# It may be too late for that, but we will do our best
service.privilegedStartService()
service.startService()
def removeService(self, service):
if service.name:
del self.namedServices[service.name]
self.services.remove(service)
if self.running:
# Returning this so as not to lose information from the
# MultiService.stopService deferred.
return service.stopService()
else:
return None
class IProcess(components.Interface):
"""Process running parameters
Represents parameters for how processes should be run.
@ivar processName: the name the process should have in ps (or None)
@type processName: C{str}
@ivar uid: the user-id the process should run under.
@type uid: C{int}
@ivar gid: the group-id the process should run under.
@type gid: C{int}
"""
class Process:
"""Process running parameters
Sets up uid/gid in the constructor, and has a default
of C{None} as C{processName}.
"""
implements(IProcess)
processName = None
def __init__(self, uid=None, gid=None):
"""Set uid and gid.
@param uid: The user ID as whom to execute the process. If
this is None, no attempt will be made to change the UID.
@param gid: The group ID as whom to execute the process. If
this is None, no attempt will be made to change the GID.
"""
self.uid = uid
self.gid = gid
def Application(name, uid=None, gid=None):
"""Return a compound class.
Return an object supporting the C{IService}, C{IServiceCollection},
C{IProcess} and C{sob.IPersistable} interfaces, with the given
parameters. Always access the return value by explicit casting to
one of the interfaces.
"""
ret = components.Componentized()
for comp in (MultiService(), sob.Persistent(ret, name), Process(uid, gid)):
ret.addComponent(comp, ignoreClass=1)
IService(ret).setName(name)
return ret
def loadApplication(filename, kind, passphrase=None):
"""Load Application from file
@type filename: C{str}
@type kind: C{str}
@type passphrase: C{str}
Load application from a given file. The serialization format it
was saved in should be given as C{kind}, and is one of 'pickle', 'source',
'xml' or 'python'. If C{passphrase} is given, the application was encrypted
with the given passphrase.
"""
if kind == 'python':
application = sob.loadValueFromFile(filename, 'application', passphrase)
else:
application = sob.load(filename, kind, passphrase)
if IService(application, None) is None:
from twisted.application import compat
application = compat.convert(application)
return application
# add backwards compatible __implements__ attribute
components.backwardsCompatImplements(Service)
components.backwardsCompatImplements(MultiService)
components.backwardsCompatImplements(Process)
__all__ = ['IService', 'Service', 'IServiceCollection', 'MultiService',
'IProcess', 'Process', 'Application', 'loadApplication']
| gpl-2.0 |
tedelhourani/ansible | lib/ansible/modules/storage/netapp/netapp_e_amg.py | 29 | 10491 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg
short_description: Create, Remove, and Update Asynchronous Mirror Groups
description:
- Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- The name of the async array you wish to target, or create.
- If C(state) is present and the name isn't found, it will attempt to create.
required: yes
secondaryArrayId:
description:
- The ID of the secondary array to be used in mirroing process
required: yes
syncIntervalMinutes:
description:
- The synchronization interval in minutes
required: no
default: 10
manualSync:
description:
- Setting this to true will cause other synchronization values to be ignored
required: no
default: no
recoveryWarnThresholdMinutes:
description:
- Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
required: no
default: 20
repoUtilizationWarnThreshold:
description:
- Recovery point warning threshold
required: no
default: 80
interfaceType:
description:
- The intended protocol to use if both Fibre and iSCSI are available.
choices:
- iscsi
- fibre
required: no
default: null
syncWarnThresholdMinutes:
description:
- The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
required: no
default: 10
state:
description:
- A C(state) of present will either create or update the async mirror group.
- A C(state) of absent will remove the async mirror group.
required: yes
"""
EXAMPLES = """
- name: AMG removal
na_eseries_amg:
state: absent
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
- name: AMG create
netapp_e_amg:
state: present
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
"""
RETURN = """
msg:
description: Successful removal
returned: success
type: string
sample: "Async mirror group removed."
msg:
description: Successful creation
returned: success
type: string
sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
""" # NOQA
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.netapp import request, eseries_host_argument_spec
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def has_match(module, ssid, api_url, api_pwd, api_usr, body):
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
label_exists = False
matches_spec = False
current_state = None
async_id = None
api_data = None
desired_name = body.get('name')
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
try:
rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc())
for async_group in data:
if async_group['label'] == desired_name:
label_exists = True
api_data = async_group
async_id = async_group['groupRef']
current_state = dict(
syncIntervalMinutes=async_group['syncIntervalMinutes'],
syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
)
if current_state == desired_state:
matches_spec = True
return label_exists, matches_spec, api_data, async_id
def create_async(module, ssid, api_url, api_pwd, api_usr, body):
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
post_data = json.dumps(body)
try:
rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
if new_name:
desired_state['new_name'] = new_name
post_data = json.dumps(desired_state)
try:
rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
url_username=user, url_password=pwd)
except Exception as e:
module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def remove_amg(module, ssid, api_url, pwd, user, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
try:
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return
def main():
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
new_name=dict(required=False, type='str'),
secondaryArrayId=dict(required=True, type='str'),
syncIntervalMinutes=dict(required=False, default=10, type='int'),
manualSync=dict(required=False, default=False, type='bool'),
recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
state=dict(required=True, choices=['present', 'absent']),
syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
new_name = p.pop('new_name')
state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
if state == 'present':
if name_exists and spec_matches:
module.exit_json(changed=False, msg="Desired state met", **api_data)
elif name_exists and not spec_matches:
results = update_async(module, ssid, api_url, pwd, user,
p, new_name, async_id)
module.exit_json(changed=True,
msg="Async mirror group updated", async_id=async_id,
**results)
elif not name_exists:
results = create_async(module, ssid, api_url, user, pwd, p)
module.exit_json(changed=True, **results)
elif state == 'absent':
if name_exists:
remove_amg(module, ssid, api_url, pwd, user, async_id)
module.exit_json(changed=True, msg="Async mirror group removed.",
async_id=async_id)
else:
module.exit_json(changed=False,
msg="Async Mirror group: %s already absent" % p['name'])
if __name__ == '__main__':
main()
| gpl-3.0 |
danilito19/django | tests/admin_inlines/admin.py | 293 | 5354 | from django import forms
from django.contrib import admin
from .models import (
Author, BinaryTree, CapoFamiglia, Chapter, ChildModel1, ChildModel2,
Consigliere, EditablePKBook, ExtraTerrestrial, Fashionista, Holder,
Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked,
Inner4Tabular, NonAutoPKBook, Novel, ParentModelWithCustomPk, Poll,
Profile, ProfileCollection, Question, ReadOnlyInline, ShoppingWeakness,
Sighting, SomeChildModel, SomeParentModel, SottoCapo, Title,
TitleCollection,
)
site = admin.AdminSite(name="admin")
class BookInline(admin.TabularInline):
model = Author.books.through
class NonAutoPKBookTabularInline(admin.TabularInline):
model = NonAutoPKBook
class NonAutoPKBookStackedInline(admin.StackedInline):
model = NonAutoPKBook
class EditablePKBookTabularInline(admin.TabularInline):
model = EditablePKBook
class EditablePKBookStackedInline(admin.StackedInline):
model = EditablePKBook
class AuthorAdmin(admin.ModelAdmin):
inlines = [BookInline,
NonAutoPKBookTabularInline, NonAutoPKBookStackedInline,
EditablePKBookTabularInline, EditablePKBookStackedInline]
class InnerInline(admin.StackedInline):
model = Inner
can_delete = False
readonly_fields = ('readonly',) # For bug #13174 tests.
class HolderAdmin(admin.ModelAdmin):
class Media:
js = ('my_awesome_admin_scripts.js',)
class ReadOnlyInlineInline(admin.TabularInline):
model = ReadOnlyInline
readonly_fields = ['name']
class InnerInline2(admin.StackedInline):
model = Inner2
class Media:
js = ('my_awesome_inline_scripts.js',)
class InnerInline3(admin.StackedInline):
model = Inner3
class Media:
js = ('my_awesome_inline_scripts.js',)
class TitleForm(forms.ModelForm):
def clean(self):
cleaned_data = self.cleaned_data
title1 = cleaned_data.get("title1")
title2 = cleaned_data.get("title2")
if title1 != title2:
raise forms.ValidationError("The two titles must be the same")
return cleaned_data
class TitleInline(admin.TabularInline):
model = Title
form = TitleForm
extra = 1
class Inner4StackedInline(admin.StackedInline):
model = Inner4Stacked
show_change_link = True
class Inner4TabularInline(admin.TabularInline):
model = Inner4Tabular
show_change_link = True
class Holder4Admin(admin.ModelAdmin):
inlines = [Inner4StackedInline, Inner4TabularInline]
class InlineWeakness(admin.TabularInline):
model = ShoppingWeakness
extra = 1
class QuestionInline(admin.TabularInline):
model = Question
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in QuestionInline'
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
def call_me(self, obj):
return 'Callable in PollAdmin'
class ChapterInline(admin.TabularInline):
model = Chapter
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in ChapterInline'
class NovelAdmin(admin.ModelAdmin):
inlines = [ChapterInline]
class ConsigliereInline(admin.TabularInline):
model = Consigliere
class SottoCapoInline(admin.TabularInline):
model = SottoCapo
class ProfileInline(admin.TabularInline):
model = Profile
extra = 1
# admin for #18433
class ChildModel1Inline(admin.TabularInline):
model = ChildModel1
class ChildModel2Inline(admin.StackedInline):
model = ChildModel2
# admin for #19425 and #18388
class BinaryTreeAdmin(admin.TabularInline):
model = BinaryTree
def get_extra(self, request, obj=None, **kwargs):
extra = 2
if obj:
return extra - obj.binarytree_set.count()
return extra
def get_max_num(self, request, obj=None, **kwargs):
max_num = 3
if obj:
return max_num - obj.binarytree_set.count()
return max_num
# admin for #19524
class SightingInline(admin.TabularInline):
model = Sighting
# admin and form for #18263
class SomeChildModelForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = SomeChildModel
widgets = {
'position': forms.HiddenInput,
}
class SomeChildModelInline(admin.TabularInline):
model = SomeChildModel
form = SomeChildModelForm
site.register(TitleCollection, inlines=[TitleInline])
# Test bug #12561 and #12778
# only ModelAdmin media
site.register(Holder, HolderAdmin, inlines=[InnerInline])
# ModelAdmin and Inline media
site.register(Holder2, HolderAdmin, inlines=[InnerInline2])
# only Inline media
site.register(Holder3, inlines=[InnerInline3])
site.register(Poll, PollAdmin)
site.register(Novel, NovelAdmin)
site.register(Fashionista, inlines=[InlineWeakness])
site.register(Holder4, Holder4Admin)
site.register(Author, AuthorAdmin)
site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline, ReadOnlyInlineInline])
site.register(ProfileCollection, inlines=[ProfileInline])
site.register(ParentModelWithCustomPk, inlines=[ChildModel1Inline, ChildModel2Inline])
site.register(BinaryTree, inlines=[BinaryTreeAdmin])
site.register(ExtraTerrestrial, inlines=[SightingInline])
site.register(SomeParentModel, inlines=[SomeChildModelInline])
site.register([Question, Inner4Stacked, Inner4Tabular])
| bsd-3-clause |
mano3m/CouchPotatoServer | libs/rtorrent/lib/xmlrpc/http.py | 180 | 1195 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.compat import xmlrpclib
HTTPServerProxy = xmlrpclib.ServerProxy
| gpl-3.0 |
nomad-vino/SPSE-1 | Module 4/4.3.py | 1 | 1323 | #!/usr/bin/python
print " __ "
print " |__|____ ___ __ "
print " | \__ \\\\ \/ / "
print " | |/ __ \\\\ / "
print " /\__| (____ /\_/ "
print " \______| \/ "
print " "
print 'Module 4'
print 'Attacking Web Applications'
print 'Part 3'
print
"""
Coding a screen scaper
"""
import urllib
from bs4 import BeautifulSoup
url ="http://securitytube.net/video/3000"
response = urllib.urlopen(url)
html = response.read()
bs = BeautifulSoup(html, 'lxml')
# bs = BeautifulSoup(html, 'lxml').read() is the same
# find <description> tag and its content
#description = bs.find('div', id='description')
description = bs.find('p', align='justify')
print description
print
print description.string
print
print description.get_text()
# all links in description
print description.find_all('a')
print
# get a video link
videoLink = bs.find('iframe', {'title':'YouTube video player'}) # could add multiple characteristics to the dictionnary in case one of them was shared by multiple objects
print videoLink
print videoLink['src']
print
# find all the form fields
forms = bs.find_all('form')
print forms
# not such a good way ...
| gpl-3.0 |
40023255/-w16b_test | static/Brython3.1.3-20150514-095342/Lib/unittest/test/support.py | 770 | 3379 | import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class LoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super(LoggingResult, self).addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super(LoggingResult, self).addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super(LoggingResult, self).addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super(LoggingResult, self).addUnexpectedSuccess(*args)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
| agpl-3.0 |
camptocamp/QGIS | python/plugins/processing/algs/ftools/ExtentFromLayer.py | 1 | 5382 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentFromLayer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.tools import dataobjects, vector
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.outputs.OutputVector import OutputVector
class ExtentFromLayer(GeoAlgorithm):
INPUT_LAYER = "INPUT_LAYER"
BY_FEATURE = "BY_FEATURE"
OUTPUT = "OUTPUT"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/layer_extent.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Polygon from layer extent"
self.group = "Vector general tools"
self.addParameter(ParameterVector(self.INPUT_LAYER, "Input layer", [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterBoolean(self.BY_FEATURE, "Calculate extent for each feature separately", False))
self.addOutput(OutputVector(self.OUTPUT, "Output layer"))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_LAYER))
byFeature = self.getParameterValue(self.BY_FEATURE)
fields = [ QgsField("MINX", QVariant.Double),
QgsField("MINY", QVariant.Double),
QgsField("MAXX", QVariant.Double),
QgsField("MAXY", QVariant.Double),
QgsField("CNTX", QVariant.Double),
QgsField("CNTY", QVariant.Double),
QgsField("AREA", QVariant.Double),
QgsField("PERIM", QVariant.Double),
QgsField("HEIGHT", QVariant.Double),
QgsField("WIDTH", QVariant.Double)
]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
QGis.WKBPolygon, layer.crs())
if byFeature:
self.featureExtent(layer, writer, progress)
else:
self.layerExtent(layer, writer, progress)
del writer
def layerExtent(self, layer, writer, progress):
rect = layer.extent()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + (width / 2.0)
cnty = miny + (height / 2.0)
area = width * height
perim = (2 * width) + (2 * height)
rect = [QgsPoint(minx, miny),
QgsPoint(minx, maxy),
QgsPoint(maxx, maxy),
QgsPoint(maxx, miny),
QgsPoint(minx, miny)
]
geometry = QgsGeometry().fromPolygon([rect])
feat = QgsFeature()
feat.setGeometry(geometry)
attrs = [minx,miny,maxx,maxy,cntx,cnty,area,perim,height,width]
feat.setAttributes(attrs)
writer.addFeature(feat)
def featureExtent(self, layer, writer, progress):
current = 0
features = vector.features(layer)
total = 100.0 / float(len(features))
feat = QgsFeature()
for f in features:
rect = f.geometry().boundingBox()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + (width / 2.0)
cnty = miny + (height / 2.0)
area = width * height
perim = (2 * width) + (2 * height)
rect = [QgsPoint(minx, miny),
QgsPoint(minx, maxy),
QgsPoint(maxx, maxy),
QgsPoint(maxx, miny),
QgsPoint(minx, miny)
]
geometry = QgsGeometry().fromPolygon([rect])
feat.setGeometry(geometry)
attrs = [minx,miny,maxx,maxy,cntx,cnty,area,perim,height,width]
feat.setAttributes(attrs)
writer.addFeature(feat)
current += 1
progress.setPercentage(int(current * total))
| gpl-2.0 |
ajnelson/ceph | src/gtest/test/gtest_list_tests_unittest.py | 1068 | 5415 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| lgpl-2.1 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/lib2to3/pgen2/conv.py | 134 | 9642 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Convert graminit.[ch] spit out by pgen to Python code.
Pgen is the Python parser generator. It is useful to quickly create a
parser from a grammar file in Python's grammar notation. But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.
Note that the token numbers are constants determined by the standard
Python tokenizer. The standard token module defines these numbers and
their names (the names are not used much). The token numbers are
hardcoded into the Python tokenizer and into pgen. A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.
On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.
Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.
"""
# Python imports
import re
# Local imports
from pgen2 import grammar, token
class Converter(grammar.Grammar):
"""Grammar subclass that reads classic pgen output files.
The run() method reads the tables as produced by the pgen parser
generator, typically contained in two C files, graminit.h and
graminit.c. The other methods are for internal use only.
See the base class for more documentation.
"""
def run(self, graminit_h, graminit_c):
"""Load the grammar tables from the text files written by pgen."""
self.parse_graminit_h(graminit_h)
self.parse_graminit_c(graminit_c)
self.finish_off()
def parse_graminit_h(self, filename):
"""Parse the .h file written by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back.
"""
try:
f = open(filename)
except OSError as err:
print("Can't open %s: %s" % (filename, err))
return False
self.symbol2number = {}
self.number2symbol = {}
lineno = 0
for line in f:
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print("%s(%s): can't parse %s" % (filename, lineno,
line.strip()))
else:
symbol, number = mo.groups()
number = int(number)
assert symbol not in self.symbol2number
assert number not in self.number2symbol
self.symbol2number[symbol] = number
self.number2symbol[number] = symbol
return True
def parse_graminit_c(self, filename):
"""Parse the .c file written by pgen. (Internal)
The file looks as follows. The first two lines are always this:
#include "pgenheaders.h"
#include "grammar.h"
After that come four blocks:
1) one or more state definitions
2) a table defining dfas
3) a table defining labels
4) a struct defining the grammar
A state definition has the following form:
- one or more arc arrays, each of the form:
static arc arcs_<n>_<m>[<k>] = {
{<i>, <j>},
...
};
- followed by a state array, of the form:
static state states_<s>[<t>] = {
{<k>, arcs_<n>_<m>},
...
};
"""
try:
f = open(filename)
except OSError as err:
print("Can't open %s: %s" % (filename, err))
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
# Expect the two #include lines
lineno, line = lineno+1, next(f)
assert line == '#include "pgenheaders.h"\n', (lineno, line)
lineno, line = lineno+1, next(f)
assert line == '#include "grammar.h"\n', (lineno, line)
# Parse the state definitions
lineno, line = lineno+1, next(f)
allarcs = {}
states = []
while line.startswith("static arc "):
while line.startswith("static arc "):
mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
line)
assert mo, (lineno, line)
n, m, k = list(map(int, mo.groups()))
arcs = []
for _ in range(k):
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), (\d+)},$", line)
assert mo, (lineno, line)
i, j = list(map(int, mo.groups()))
arcs.append((i, j))
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
allarcs[(n, m)] = arcs
lineno, line = lineno+1, next(f)
mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
assert mo, (lineno, line)
s, t = list(map(int, mo.groups()))
assert s == len(states), (lineno, line)
state = []
for _ in range(t):
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
assert mo, (lineno, line)
k, n, m = list(map(int, mo.groups()))
arcs = allarcs[n, m]
assert k == len(arcs), (lineno, line)
state.append(arcs)
states.append(state)
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
lineno, line = lineno+1, next(f)
self.states = states
# Parse the dfas
dfas = {}
mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
for i in range(ndfas):
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
line)
assert mo, (lineno, line)
symbol = mo.group(2)
number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
assert self.symbol2number[symbol] == number, (lineno, line)
assert self.number2symbol[number] == symbol, (lineno, line)
assert x == 0, (lineno, line)
state = states[z]
assert y == len(state), (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
assert mo, (lineno, line)
first = {}
rawbitset = eval(mo.group(1))
for i, c in enumerate(rawbitset):
byte = ord(c)
for j in range(8):
if byte & (1<<j):
first[i*8 + j] = 1
dfas[number] = (state, first)
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
self.dfas = dfas
# Parse the labels
labels = []
lineno, line = lineno+1, next(f)
mo = re.match(r"static label labels\[(\d+)\] = {$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
for i in range(nlabels):
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
assert mo, (lineno, line)
x, y = mo.groups()
x = int(x)
if y == "0":
y = None
else:
y = eval(y)
labels.append((x, y))
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
self.labels = labels
# Parse the grammar struct
lineno, line = lineno+1, next(f)
assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+(\d+),$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
assert ndfas == len(self.dfas)
lineno, line = lineno+1, next(f)
assert line == "\tdfas,\n", (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), labels},$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
assert nlabels == len(self.labels), (lineno, line)
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+(\d+)$", line)
assert mo, (lineno, line)
start = int(mo.group(1))
assert start in self.number2symbol, (lineno, line)
self.start = start
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
try:
lineno, line = lineno+1, next(f)
except StopIteration:
pass
else:
assert 0, (lineno, line)
def finish_off(self):
"""Create additional useful structures. (Internal)."""
self.keywords = {} # map from keyword strings to arc labels
self.tokens = {} # map from numeric token values to arc labels
for ilabel, (type, value) in enumerate(self.labels):
if type == token.NAME and value is not None:
self.keywords[value] = ilabel
elif value is None:
self.tokens[type] = ilabel
| gpl-3.0 |
waseem18/oh-mainline | vendor/packages/whoosh/src/whoosh/matching.py | 17 | 55551 | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains "matcher" classes. Matchers deal with posting lists. The
most basic matcher, which reads the list of postings for a term, will be
provided by the backend implementation (for example,
:class:`whoosh.filedb.filepostings.FilePostingReader`). The classes in this
module provide additional functionality, such as combining the results of two
matchers, or modifying the results of a matcher.
You do not need to deal with the classes in this module unless you need to
write your own Matcher implementation to provide some new functionality. These
classes are not instantiated by the user. They are usually created by a
:class:`~whoosh.query.Query` object's :meth:`~whoosh.query.Query.matcher()`
method, which returns the appropriate matcher to implement the query (for
example, the :class:`~whoosh.query.Or` query's
:meth:`~whoosh.query.Or.matcher()` method returns a
:py:class:`~whoosh.matching.UnionMatcher` object).
Certain backends support "quality" optimizations. These backends have the
ability to skip ahead if it knows the current block of postings can't
contribute to the top N documents. If the matcher tree and backend support
these optimizations, the matcher's :meth:`Matcher.supports_block_quality()`
method will return ``True``.
"""
import sys
from itertools import repeat
from whoosh.compat import izip, xrange
from whoosh.util import abstractmethod
class ReadTooFar(Exception):
"""Raised when :meth:`~whoosh.matching.Matcher.next()` or
:meth:`~whoosh.matching.Matcher.skip_to()` are called on an inactive
matcher.
"""
class NoQualityAvailable(Exception):
"""Raised when quality methods are called on a matcher that does not
support block quality optimizations.
"""
# Matchers
class Matcher(object):
"""Base class for all matchers.
"""
@abstractmethod
def is_active(self):
"""Returns True if this matcher is still "active", that is, it has not
yet reached the end of the posting list.
"""
raise NotImplementedError
@abstractmethod
def reset(self):
"""Returns to the start of the posting list.
Note that reset() may not do what you expect after you call
:meth:`Matcher.replace()`, since this can mean calling reset() not on
the original matcher, but on an optimized replacement.
"""
raise NotImplementedError
def term(self):
"""Returns a ``("fieldname", "termtext")`` tuple for the term this matcher
matches, or None if this matcher is not a term matcher.
"""
return None
def term_matchers(self):
"""Returns an iterator of term matchers in this tree.
"""
if self.term() is not None:
yield self
else:
for cm in self.children():
for m in cm.term_matchers():
yield m
def matching_terms(self, id=None):
"""Returns an iterator of ``("fieldname", "termtext")`` tuples for the
**currently matching** term matchers in this tree.
"""
if not self.is_active():
return
if id is None:
id = self.id()
elif id != self.id():
return
t = self.term()
if t is None:
for c in self.children():
for t in c.matching_terms(id):
yield t
else:
yield t
def children(self):
"""Returns an (possibly empty) list of the submatchers of this
matcher.
"""
return []
def replace(self, minquality=0):
"""Returns a possibly-simplified version of this matcher. For example,
if one of the children of a UnionMatcher is no longer active, calling
this method on the UnionMatcher will return the other child.
"""
return self
@abstractmethod
def copy(self):
"""Returns a copy of this matcher.
"""
raise NotImplementedError
def depth(self):
"""Returns the depth of the tree under this matcher, or 0 if this
matcher does not have any children.
"""
return 0
def supports_block_quality(self):
"""Returns True if this matcher supports the use of ``quality`` and
``block_quality``.
"""
return False
def block_quality(self):
"""Returns a quality measurement of the current block of postings,
according to the current weighting algorithm. Raises
``NoQualityAvailable`` if the matcher or weighting do not support
quality measurements.
"""
raise NoQualityAvailable(self.__class__)
@abstractmethod
def id(self):
"""Returns the ID of the current posting.
"""
raise NotImplementedError
def all_ids(self):
"""Returns a generator of all IDs in the matcher.
What this method returns for a matcher that has already read some
postings (whether it only yields the remaining postings or all postings
from the beginning) is undefined, so it's best to only use this method
on fresh matchers.
"""
i = 0
while self.is_active():
yield self.id()
self.next()
i += 1
if i == 10:
self = self.replace()
i = 0
def all_items(self):
"""Returns a generator of all (ID, encoded value) pairs in the matcher.
What this method returns for a matcher that has already read some
postings (whether it only yields the remaining postings or all postings
from the beginning) is undefined, so it's best to only use this method
on fresh matchers.
"""
i = 0
while self.is_active():
yield (self.id(), self.value())
self.next()
i += 1
if i == 10:
self = self.replace()
i = 0
def items_as(self, astype):
"""Returns a generator of all (ID, decoded value) pairs in the matcher.
What this method returns for a matcher that has already read some
postings (whether it only yields the remaining postings or all postings
from the beginning) is undefined, so it's best to only use this method
on fresh matchers.
"""
while self.is_active():
yield (self.id(), self.value_as(astype))
@abstractmethod
def value(self):
"""Returns the encoded value of the current posting.
"""
raise NotImplementedError
@abstractmethod
def supports(self, astype):
"""Returns True if the field's format supports the named data type,
for example 'frequency' or 'characters'.
"""
raise NotImplementedError("supports not implemented in %s"
% self.__class__)
@abstractmethod
def value_as(self, astype):
"""Returns the value(s) of the current posting as the given type.
"""
raise NotImplementedError("value_as not implemented in %s"
% self.__class__)
def spans(self):
"""Returns a list of :class:`whoosh.spans.Span` objects for the matches
in this document. Raises an exception if the field being searched does
not store positions.
"""
from whoosh.spans import Span
if self.supports("characters"):
return [Span(pos, startchar=startchar, endchar=endchar)
for pos, startchar, endchar in self.value_as("characters")]
elif self.supports("positions"):
return [Span(pos) for pos in self.value_as("positions")]
else:
raise Exception("Field does not support spans")
def skip_to(self, id):
"""Moves this matcher to the first posting with an ID equal to or
greater than the given ID.
"""
while self.is_active() and self.id() < id:
self.next()
def skip_to_quality(self, minquality):
"""Moves this matcher to the next block with greater than the given
minimum quality value.
"""
raise NotImplementedError(self.__class__.__name__)
@abstractmethod
def next(self):
"""Moves this matcher to the next posting.
"""
raise NotImplementedError(self.__class__.__name__)
def weight(self):
"""Returns the weight of the current posting.
"""
return self.value_as("weight")
@abstractmethod
def score(self):
"""Returns the score of the current posting.
"""
raise NotImplementedError(self.__class__.__name__)
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class NullMatcherClass(Matcher):
"""Matcher with no postings which is never active.
"""
def __call__(self):
return self
def supports_block_quality(self):
return True
def block_quality(self):
return 0
def skip_to_quality(self, minquality):
return 0
def is_active(self):
return False
def reset(self):
pass
def all_ids(self):
return []
def copy(self):
return self
def max_quality(self):
return 0
# Singleton instance
NullMatcher = NullMatcherClass()
class ListMatcher(Matcher):
"""Synthetic matcher backed by a list of IDs.
"""
def __init__(self, ids, weights=None, values=None, format=None,
scorer=None, position=0, all_weights=None, term=None,
terminfo=None):
"""
:param ids: a list of doc IDs.
:param weights: a list of weights corresponding to the list of IDs.
If this argument is not supplied, a list of 1.0 values is used.
:param values: a list of encoded values corresponding to the list of
IDs.
:param format: a :class:`whoosh.formats.Format` object representing the
format of the field.
:param scorer: a :class:`whoosh.scoring.BaseScorer` object for scoring
the postings.
:param term: a ``("fieldname", "text")`` tuple, or None if this is not
a term matcher.
"""
self._ids = ids
self._weights = weights
self._all_weights = all_weights
self._values = values
self._i = position
self._format = format
self._scorer = scorer
self._term = term
self._terminfo = terminfo
def __repr__(self):
return "<%s>" % self.__class__.__name__
def is_active(self):
return self._i < len(self._ids)
def reset(self):
self._i = 0
def term(self):
return self._term
def copy(self):
return self.__class__(self._ids, self._weights, self._values,
self._format, self._scorer, self._i,
self._all_weights)
def replace(self, minquality=0):
if not self.is_active() or (minquality
and self.max_quality() < minquality):
return NullMatcher()
else:
return self
def max_quality(self):
return self.block_max_weight()
def supports_block_quality(self):
return (self._scorer is not None
and self._scorer.supports_block_quality())
def block_quality(self):
return self._scorer.block_quality(self)
def skip_to_quality(self, minquality):
self._i += 1
while self._i < len(self._ids) and self.quality() <= minquality:
self._i += 1
return 0
def id(self):
return self._ids[self._i]
def all_ids(self):
return iter(self._ids)
def all_items(self):
values = self._values
if values is None:
values = repeat('')
return izip(self._ids, values)
def value(self):
if self._values:
return self._values[self._i]
else:
return ''
def value_as(self, astype):
decoder = self._format.decoder(astype)
return decoder(self.value())
def supports(self, astype):
return self._format.supports(astype)
def next(self):
self._i += 1
def weight(self):
if self._all_weights:
return self._all_weights
elif self._weights:
return self._weights[self._i]
else:
return 1.0
def block_min_length(self):
return self._terminfo.min_length()
def block_max_length(self):
return self._terminfo.max_length()
def block_max_weight(self):
if self._all_weights:
return self._all_weights
elif self._weights:
return max(self._weights)
elif self._terminfo is not None:
return self._terminfo.max_weight()
else:
return 1.0
def block_max_wol(self):
return self._terminfo.max_wol()
def score(self):
if self._scorer:
return self._scorer.score(self)
else:
return self.weight()
class WrappingMatcher(Matcher):
"""Base class for matchers that wrap sub-matchers.
"""
def __init__(self, child, boost=1.0):
self.child = child
self.boost = boost
def __repr__(self):
return "%s(%r, boost=%s)" % (self.__class__.__name__, self.child,
self.boost)
def copy(self):
kwargs = {}
if hasattr(self, "boost"):
kwargs["boost"] = self.boost
return self.__class__(self.child.copy(), **kwargs)
def depth(self):
return 1 + self.child.depth()
def _replacement(self, newchild):
return self.__class__(newchild, boost=self.boost)
def replace(self, minquality=0):
# Replace the child matcher
r = self.child.replace(minquality)
if not r.is_active():
# If the replaced child is inactive, return an inactive matcher
return NullMatcher()
elif r is not self.child:
# If the child changed, return a new wrapper on the new child
try:
# Subclasses of WrappingMatcher can override _replacement() to
# get the __init__ signature they need
return self._replacement(r)
except TypeError:
e = sys.exc_info()[1]
raise TypeError("Class %s got exception %s trying "
"to replace itself" % (self.__class__, e))
else:
return self
def max_quality(self):
return self.child.max_quality()
def id(self):
return self.child.id()
def all_ids(self):
return self.child.all_ids()
def is_active(self):
return self.child.is_active()
def reset(self):
self.child.reset()
def children(self):
return [self.child]
def supports(self, astype):
return self.child.supports(astype)
def value(self):
return self.child.value()
def value_as(self, astype):
return self.child.value_as(astype)
def spans(self):
return self.child.spans()
def skip_to(self, id):
return self.child.skip_to(id)
def next(self):
self.child.next()
def supports_block_quality(self):
return self.child.supports_block_quality()
def skip_to_quality(self, minquality):
return self.child.skip_to_quality(minquality / self.boost)
def block_quality(self):
return self.child.block_quality() * self.boost
def weight(self):
return self.child.weight() * self.boost
def score(self):
return self.child.score() * self.boost
class MultiMatcher(Matcher):
"""Serializes the results of a list of sub-matchers.
"""
def __init__(self, matchers, idoffsets, current=0):
"""
:param matchers: a list of Matcher objects.
:param idoffsets: a list of offsets corresponding to items in the
``matchers`` list.
"""
self.matchers = matchers
self.offsets = idoffsets
self.current = current
self._next_matcher()
def __repr__(self):
return "%s(%r, %r, current=%s)" % (self.__class__.__name__,
self.matchers, self.offsets,
self.current)
def is_active(self):
return self.current < len(self.matchers)
def reset(self):
for mr in self.matchers:
mr.reset()
self.current = 0
def children(self):
return [self.matchers[self.current]]
def _next_matcher(self):
matchers = self.matchers
while (self.current < len(matchers)
and not matchers[self.current].is_active()):
self.current += 1
def copy(self):
return self.__class__([mr.copy() for mr in self.matchers],
self.offsets, current=self.current)
def depth(self):
if self.is_active():
return 1 + max(mr.depth() for mr in self.matchers[self.current:])
else:
return 0
def replace(self, minquality=0):
m = self
if minquality:
# Skip sub-matchers that don't have a high enough max quality to
# contribute
while (m.is_active()
and m.matchers[m.current].max_quality() < minquality):
m = self.__class__(self.matchers, self.offsets, m.current + 1)
m._next_matcher()
if not m.is_active():
return NullMatcher()
# TODO: Possible optimization: if the last matcher is current, replace
# this with the last matcher, but wrap it with a matcher that adds the
# offset. Have to check whether that's actually faster, though.
return m
def max_quality(self):
return self.matchers[self.current].max_quality()
def id(self):
current = self.current
return self.matchers[current].id() + self.offsets[current]
def all_ids(self):
offsets = self.offsets
for i, mr in enumerate(self.matchers):
for id in mr.all_ids():
yield id + offsets[i]
def spans(self):
return self.matchers[self.current].spans()
def supports(self, astype):
return self.matchers[self.current].supports(astype)
def value(self):
return self.matchers[self.current].value()
def value_as(self, astype):
return self.matchers[self.current].value_as(astype)
def next(self):
if not self.is_active():
raise ReadTooFar
self.matchers[self.current].next()
if not self.matchers[self.current].is_active():
self._next_matcher()
def skip_to(self, id):
if not self.is_active():
raise ReadTooFar
if id <= self.id():
return
matchers = self.matchers
offsets = self.offsets
r = False
while self.current < len(matchers) and id > self.id():
mr = matchers[self.current]
sr = mr.skip_to(id - offsets[self.current])
r = sr or r
if mr.is_active():
break
self._next_matcher()
return r
def supports_block_quality(self):
return all(mr.supports_block_quality() for mr
in self.matchers[self.current:])
def block_quality(self):
return self.matchers[self.current].block_quality()
def weight(self):
return self.matchers[self.current].weight()
def score(self):
return self.matchers[self.current].score()
def ExcludeMatcher(child, excluded, boost=1.0):
return FilterMatcher(child, excluded, exclude=True, boost=boost)
class FilterMatcher(WrappingMatcher):
"""Filters the postings from the wrapped based on whether the IDs are
present in or absent from a set.
"""
def __init__(self, child, ids, exclude=False, boost=1.0):
"""
:param child: the child matcher.
:param ids: a set of IDs to filter by.
:param exclude: by default, only IDs from the wrapped matcher that are
**in** the set are used. If this argument is True, only IDs from
the wrapped matcher that are **not in** the set are used.
"""
super(FilterMatcher, self).__init__(child)
self._ids = ids
self._exclude = exclude
self.boost = boost
self._find_next()
def __repr__(self):
return "%s(%r, %r, %r, boost=%s)" % (self.__class__.__name__,
self.child, self._ids,
self._exclude, self.boost)
def reset(self):
self.child.reset()
self._find_next()
def copy(self):
return self.__class__(self.child.copy(), self._ids, self._exclude,
boost=self.boost)
def _replacement(self, newchild):
return self.__class__(newchild, self._ids, exclude=self._exclude,
boost=self.boost)
def _find_next(self):
child = self.child
ids = self._ids
r = False
if self._exclude:
while child.is_active() and child.id() in ids:
r = child.next() or r
else:
while child.is_active() and child.id() not in ids:
r = child.next() or r
return r
def next(self):
self.child.next()
self._find_next()
def skip_to(self, id):
self.child.skip_to(id)
self._find_next()
def all_ids(self):
ids = self._ids
if self._exclude:
return (id for id in self.child.all_ids() if id not in ids)
else:
return (id for id in self.child.all_ids() if id in ids)
def all_items(self):
ids = self._ids
if self._exclude:
return (item for item in self.child.all_items()
if item[0] not in ids)
else:
return (item for item in self.child.all_items() if item[0] in ids)
class BiMatcher(Matcher):
"""Base class for matchers that combine the results of two sub-matchers in
some way.
"""
def __init__(self, a, b):
super(BiMatcher, self).__init__()
self.a = a
self.b = b
def reset(self):
self.a.reset()
self.b.reset()
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.a, self.b)
def children(self):
return [self.a, self.b]
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def depth(self):
return 1 + max(self.a.depth(), self.b.depth())
def skip_to(self, id):
if not self.is_active():
raise ReadTooFar
ra = self.a.skip_to(id)
rb = self.b.skip_to(id)
return ra or rb
def supports_block_quality(self):
return (self.a.supports_block_quality()
and self.b.supports_block_quality())
def supports(self, astype):
return self.a.supports(astype) and self.b.supports(astype)
class AdditiveBiMatcher(BiMatcher):
"""Base class for binary matchers where the scores of the sub-matchers are
added together.
"""
def max_quality(self):
q = 0.0
if self.a.is_active():
q += self.a.max_quality()
if self.b.is_active():
q += self.b.max_quality()
return q
def block_quality(self):
bq = 0.0
if self.a.is_active():
bq += self.a.block_quality()
if self.b.is_active():
bq += self.b.block_quality()
return bq
def weight(self):
return (self.a.weight() + self.b.weight())
def score(self):
return (self.a.score() + self.b.score())
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class UnionMatcher(AdditiveBiMatcher):
"""Matches the union (OR) of the postings in the two sub-matchers.
"""
_id = None
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# If neither sub-matcher on its own has a high enough max quality to
# contribute, convert to an intersection matcher
if (minquality and a_active and b_active
and a.max_quality() < minquality and b.max_quality() < minquality):
return IntersectionMatcher(a, b).replace(minquality)
# If one or both of the sub-matchers are inactive, convert
if not (a_active or b_active):
return NullMatcher()
elif not a_active:
return b.replace(minquality)
elif not b_active:
return a.replace(minquality)
a = a.replace(minquality - b.max_quality() if minquality else 0)
b = b.replace(minquality - a.max_quality() if minquality else 0)
# If one of the sub-matchers changed, return a new union
if a is not self.a or b is not self.b:
return self.__class__(a, b)
else:
self._id = None
return self
def is_active(self):
if self._id is not None:
return True
return self.a.is_active() or self.b.is_active()
def skip_to(self, id):
ra = rb = False
if self.a.is_active():
ra = self.a.skip_to(id)
if self.b.is_active():
rb = self.b.skip_to(id)
self._id = None
return ra or rb
def id(self):
_id = self._id
if _id is not None:
return _id
a = self.a
b = self.b
if not a.is_active():
_id = b.id()
elif not b.is_active():
_id = a.id()
else:
_id = min(a.id(), b.id())
self._id = _id
return _id
# Using sets is faster in most cases, but could potentially use a lot of
# memory. Comment out this method override to not use sets.
def all_ids(self):
return iter(sorted(set(self.a.all_ids()) | set(self.b.all_ids())))
def next(self):
self._id = None
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# Shortcut when one matcher is inactive
if not (a_active or b_active):
raise ReadTooFar
elif not a_active:
return b.next()
elif not b_active:
return a.next()
a_id = a.id()
b_id = b.id()
ar = br = None
# After all that, here's the actual implementation
if a_id <= b_id:
ar = a.next()
if b_id <= a_id:
br = b.next()
return ar or br
def spans(self):
if not self.a.is_active():
return self.b.spans()
if not self.b.is_active():
return self.a.spans()
id_a = self.a.id()
id_b = self.b.id()
if id_a < id_b:
return self.a.spans()
elif id_b < id_a:
return self.b.spans()
else:
return sorted(set(self.a.spans()) | set(self.b.spans()))
def weight(self):
a = self.a
b = self.b
if not a.is_active():
return b.weight()
if not b.is_active():
return a.weight()
id_a = a.id()
id_b = b.id()
if id_a < id_b:
return a.weight()
elif id_b < id_a:
return b.weight()
else:
return (a.weight() + b.weight())
def score(self):
a = self.a
b = self.b
if not a.is_active():
return b.score()
if not b.is_active():
return a.score()
id_a = a.id()
id_b = b.id()
if id_a < id_b:
return a.score()
elif id_b < id_a:
return b.score()
else:
return (a.score() + b.score())
def skip_to_quality(self, minquality):
self._id = None
a = self.a
b = self.b
if not (a.is_active() or b.is_active()):
raise ReadTooFar
# Short circuit if one matcher is inactive
if not a.is_active():
return b.skip_to_quality(minquality)
elif not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
skipped += a.skip_to_quality(minquality - bq)
aq = a.block_quality()
else:
skipped += b.skip_to_quality(minquality - aq)
bq = b.block_quality()
return skipped
class DisjunctionMaxMatcher(UnionMatcher):
"""Matches the union (OR) of two sub-matchers. Where both sub-matchers
match the same posting, returns the weight/score of the higher-scoring
posting.
"""
# TODO: this class inherits from AdditiveBiMatcher (through UnionMatcher)
# but it does not add the scores of the sub-matchers together (it
# overrides all methods that perform addition). Need to clean up the
# inheritance.
def __init__(self, a, b, tiebreak=0.0):
super(DisjunctionMaxMatcher, self).__init__(a, b)
self.tiebreak = tiebreak
def copy(self):
return self.__class__(self.a.copy(), self.b.copy(),
tiebreak=self.tiebreak)
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# DisMax takes the max of the sub-matcher qualities instead of adding
# them, so we need special logic here
if minquality and a_active and b_active:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max < minquality and b_max < minquality:
# If neither sub-matcher has a high enough max quality to
# contribute, return an inactive matcher
return NullMatcher()
elif b_max < minquality:
# If the b matcher can't contribute, return a
return a.replace(minquality)
elif a_max < minquality:
# If the a matcher can't contribute, return b
return b.replace(minquality)
if not (a_active or b_active):
return NullMatcher()
elif not a_active:
return b.replace(minquality)
elif not b_active:
return a.replace(minquality)
# We CAN pass the minquality down here, since we don't add the two
# scores together
a = a.replace(minquality)
b = b.replace(minquality)
a_active = a.is_active()
b_active = b.is_active()
# It's kind of tedious to check for inactive sub-matchers all over
# again here after we replace them, but it's probably better than
# returning a replacement with an inactive sub-matcher
if not (a_active and b_active):
return NullMatcher()
elif not a_active:
return b
elif not b_active:
return a
elif a is not self.a or b is not self.b:
# If one of the sub-matchers changed, return a new DisMax
return self.__class__(a, b)
else:
return self
def max_quality(self):
return max(self.a.max_quality(), self.b.max_quality())
def score(self):
if not self.a.is_active():
return self.b.score()
elif not self.b.is_active():
return self.a.score()
else:
return max(self.a.score(), self.b.score())
def block_quality(self):
return max(self.a.block_quality(), self.b.block_quality())
def skip_to_quality(self, minquality):
a = self.a
b = self.b
# Short circuit if one matcher is inactive
if not a.is_active():
sk = b.skip_to_quality(minquality)
return sk
elif not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and max(aq, bq) <= minquality:
if aq <= minquality:
skipped += a.skip_to_quality(minquality)
aq = a.block_quality()
if bq <= minquality:
skipped += b.skip_to_quality(minquality)
bq = b.block_quality()
return skipped
class IntersectionMatcher(AdditiveBiMatcher):
"""Matches the intersection (AND) of the postings in the two sub-matchers.
"""
def __init__(self, a, b):
super(IntersectionMatcher, self).__init__(a, b)
self._find_first()
def reset(self):
self.a.reset()
self.b.reset()
self._find_first()
def _find_first(self):
if (self.a.is_active()
and self.b.is_active()
and self.a.id() != self.b.id()):
self._find_next()
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
if not (a_active and b_active):
# Intersection matcher requires that both sub-matchers be active
return NullMatcher()
if minquality:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max + b_max < minquality:
# If the combined quality of the sub-matchers can't contribute,
# return an inactive matcher
return NullMatcher()
# Require that the replacements be able to contribute results
# higher than the minquality
a_min = minquality - b_max
b_min = minquality - a_max
else:
a_min = b_min = 0
a = a.replace(a_min)
b = b.replace(b_min)
a_active = a.is_active()
b_active = b.is_active()
if not (a_active or b_active):
return NullMatcher()
elif not a_active:
return b
elif not b_active:
return a
elif a is not self.a or b is not self.b:
return self.__class__(a, b)
else:
return self
def is_active(self):
return self.a.is_active() and self.b.is_active()
def _find_next(self):
a = self.a
b = self.b
a_id = a.id()
b_id = b.id()
assert a_id != b_id
r = False
while a.is_active() and b.is_active() and a_id != b_id:
if a_id < b_id:
ra = a.skip_to(b_id)
if not a.is_active():
return
r = r or ra
a_id = a.id()
else:
rb = b.skip_to(a_id)
if not b.is_active():
return
r = r or rb
b_id = b.id()
return r
def id(self):
return self.a.id()
# Using sets is faster in some cases, but could potentially use a lot of
# memory
def all_ids(self):
return iter(sorted(set(self.a.all_ids()) & set(self.b.all_ids())))
def skip_to(self, id):
if not self.is_active():
raise ReadTooFar
ra = self.a.skip_to(id)
rb = self.b.skip_to(id)
if self.is_active():
rn = False
if self.a.id() != self.b.id():
rn = self._find_next()
return ra or rb or rn
def skip_to_quality(self, minquality):
a = self.a
b = self.b
minquality = minquality
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
skipped += a.skip_to_quality(minquality - bq)
else:
skipped += b.skip_to_quality(minquality - aq)
if not a.is_active() or not b.is_active():
break
if a.id() != b.id():
self._find_next()
aq = a.block_quality()
bq = b.block_quality()
return skipped
def next(self):
if not self.is_active():
raise ReadTooFar
# We must assume that the ids are equal whenever next() is called (they
# should have been made equal by _find_next), so advance them both
ar = self.a.next()
if self.is_active():
nr = self._find_next()
return ar or nr
def spans(self):
return sorted(set(self.a.spans()) | set(self.b.spans()))
class AndNotMatcher(BiMatcher):
"""Matches the postings in the first sub-matcher that are NOT present in
the second sub-matcher.
"""
def __init__(self, a, b):
super(AndNotMatcher, self).__init__(a, b)
self._find_first()
def reset(self):
self.a.reset()
self.b.reset()
self._find_first()
def _find_first(self):
if (self.a.is_active()
and self.b.is_active()
and self.a.id() == self.b.id()):
self._find_next()
def is_active(self):
return self.a.is_active()
def _find_next(self):
pos = self.a
neg = self.b
if not neg.is_active():
return
pos_id = pos.id()
r = False
if neg.id() < pos_id:
neg.skip_to(pos_id)
while pos.is_active() and neg.is_active() and pos_id == neg.id():
nr = pos.next()
if not pos.is_active():
break
r = r or nr
pos_id = pos.id()
neg.skip_to(pos_id)
return r
def supports_block_quality(self):
return self.a.supports_block_quality()
def replace(self, minquality=0):
if not self.a.is_active():
# The a matcher is required, so if it's inactive, return an
# inactive matcher
return NullMatcher()
elif (minquality
and self.a.max_quality() < minquality):
# If the quality of the required matcher isn't high enough to
# contribute, return an inactive matcher
return NullMatcher()
elif not self.b.is_active():
# If the prohibited matcher is inactive, convert to just the
# required matcher
return self.a.replace(minquality)
a = self.a.replace(minquality)
b = self.b.replace()
if a is not self.a or b is not self.b:
# If one of the sub-matchers was replaced, return a new AndNot
return self.__class__(a, b)
else:
return self
def max_quality(self):
return self.a.max_quality()
def block_quality(self):
return self.a.block_quality()
def skip_to_quality(self, minquality):
skipped = self.a.skip_to_quality(minquality)
self._find_next()
return skipped
def id(self):
return self.a.id()
def all_ids(self):
return iter(sorted(set(self.a.all_ids()) - set(self.b.all_ids())))
def next(self):
if not self.a.is_active():
raise ReadTooFar
ar = self.a.next()
nr = False
if self.a.is_active() and self.b.is_active():
nr = self._find_next()
return ar or nr
def skip_to(self, id):
if not self.a.is_active():
raise ReadTooFar
if id < self.a.id():
return
self.a.skip_to(id)
if self.b.is_active():
self.b.skip_to(id)
self._find_next()
def weight(self):
return self.a.weight()
def score(self):
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
class InverseMatcher(WrappingMatcher):
"""Synthetic matcher, generates postings that are NOT present in the
wrapped matcher.
"""
def __init__(self, child, limit, missing=None, weight=1.0):
super(InverseMatcher, self).__init__(child)
self.limit = limit
self._weight = weight
self.missing = missing or (lambda id: False)
self._id = 0
self._find_next()
def copy(self):
return self.__class__(self.child.copy(), self.limit,
weight=self._weight, missing=self.missing)
def _replacement(self, newchild):
return self.__class__(newchild, self.limit, missing=self.missing,
weight=self.weight)
def is_active(self):
return self._id < self.limit
def reset(self):
self.child.reset()
self._id = 0
self._find_next()
def supports_block_quality(self):
return False
def _find_next(self):
child = self.child
missing = self.missing
if not child.is_active() and not missing(self._id):
return
if child.is_active() and child.id() < self._id:
child.skip_to(self._id)
# While self._id is missing or is in the child matcher, increase it
while child.is_active() and self._id < self.limit:
if missing(self._id):
self._id += 1
continue
if self._id == child.id():
self._id += 1
child.next()
continue
break
def id(self):
return self._id
def all_ids(self):
missing = self.missing
negs = set(self.child.all_ids())
return (id for id in xrange(self.limit)
if id not in negs and not missing(id))
def next(self):
if self._id >= self.limit:
raise ReadTooFar
self._id += 1
self._find_next()
def skip_to(self, id):
if self._id >= self.limit:
raise ReadTooFar
if id < self._id:
return
self._id = id
self._find_next()
def weight(self):
return self._weight
def score(self):
return self._weight
class RequireMatcher(WrappingMatcher):
"""Matches postings that are in both sub-matchers, but only uses scores
from the first.
"""
def __init__(self, a, b):
self.a = a
self.b = b
self.child = IntersectionMatcher(a, b)
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def supports_block_quality(self):
return self.a.supports_block_quality()
def replace(self, minquality=0):
if not self.child.is_active():
# If one of the sub-matchers is inactive, go inactive
return NullMatcher()
elif minquality and self.a.max_quality() < minquality:
# If the required matcher doesn't have a high enough max quality
# to possibly contribute, return an inactive matcher
return NullMatcher()
new_a = self.a.replace(minquality)
new_b = self.b.replace()
if not new_a.is_active():
return NullMatcher()
elif new_a is not self.a or new_b is not self.b:
# If one of the sub-matchers changed, return a new Require
return self.__class__(new_a, self.b)
else:
return self
def max_quality(self):
return self.a.max_quality()
def block_quality(self):
return self.a.block_quality()
def skip_to_quality(self, minquality):
skipped = self.a.skip_to_quality(minquality)
self.child._find_next()
return skipped
def weight(self):
return self.a.weight()
def score(self):
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
class AndMaybeMatcher(AdditiveBiMatcher):
"""Matches postings in the first sub-matcher, and if the same posting is
in the second sub-matcher, adds their scores.
"""
def __init__(self, a, b):
self.a = a
self.b = b
self._first_b()
def reset(self):
self.a.reset()
self.b.reset()
self._first_b()
def _first_b(self):
a = self.a
b = self.b
if a.is_active() and b.is_active() and a.id() != b.id():
b.skip_to(a.id())
def is_active(self):
return self.a.is_active()
def id(self):
return self.a.id()
def next(self):
if not self.a.is_active():
raise ReadTooFar
ar = self.a.next()
br = False
if self.a.is_active() and self.b.is_active():
br = self.b.skip_to(self.a.id())
return ar or br
def skip_to(self, id):
if not self.a.is_active():
raise ReadTooFar
ra = self.a.skip_to(id)
rb = False
if self.a.is_active() and self.b.is_active():
rb = self.b.skip_to(id)
return ra or rb
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
if not a_active:
return NullMatcher()
elif minquality and b_active:
if a.max_quality() + b.max_quality() < minquality:
# If the combined max quality of the sub-matchers isn't high
# enough to possibly contribute, return an inactive matcher
return NullMatcher()
elif a.max_quality() < minquality:
# If the max quality of the main sub-matcher isn't high enough
# to ever contribute without the optional sub- matcher, change
# into an IntersectionMatcher
return IntersectionMatcher(self.a, self.b)
elif not b_active:
return a.replace(minquality)
new_a = a.replace(minquality - b.max_quality())
new_b = b.replace(minquality - a.max_quality())
if new_a is not a or new_b is not b:
# If one of the sub-matchers changed, return a new AndMaybe
return self.__class__(new_a, new_b)
else:
return self
def skip_to_quality(self, minquality):
a = self.a
b = self.b
minquality = minquality
if not a.is_active():
raise ReadTooFar
if not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
skipped += a.skip_to_quality(minquality - bq)
aq = a.block_quality()
else:
skipped += b.skip_to_quality(minquality - aq)
bq = b.block_quality()
return skipped
def weight(self):
if self.a.id() == self.b.id():
return self.a.weight() + self.b.weight()
else:
return self.a.weight()
def score(self):
if self.b.is_active() and self.a.id() == self.b.id():
return self.a.score() + self.b.score()
else:
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
class ConstantScoreMatcher(WrappingMatcher):
def __init__(self, child, score=1.0):
super(ConstantScoreMatcher, self).__init__(child)
self._score = score
def copy(self):
return self.__class__(self.child.copy(), score=self._score)
def _replacement(self, newchild):
return self.__class__(newchild, score=self._score)
def block_quality(self):
return self._score
def score(self):
return self._score
#class PhraseMatcher(WrappingMatcher):
# """Matches postings where a list of sub-matchers occur next to each other
# in order.
# """
#
# def __init__(self, wordmatchers, slop=1, boost=1.0):
# self.wordmatchers = wordmatchers
# self.child = make_binary_tree(IntersectionMatcher, wordmatchers)
# self.slop = slop
# self.boost = boost
# self._spans = None
# self._find_next()
#
# def copy(self):
# return self.__class__(self.wordmatchers[:], slop=self.slop,
# boost=self.boost)
#
# def replace(self, minquality=0):
# if not self.is_active():
# return NullMatcher()
# return self
#
# def all_ids(self):
# # Need to redefine this because the WrappingMatcher parent class
# # forwards to the submatcher, which in this case is just the
# # IntersectionMatcher.
# while self.is_active():
# yield self.id()
# self.next()
#
# def next(self):
# ri = self.child.next()
# rn = self._find_next()
# return ri or rn
#
# def skip_to(self, id):
# rs = self.child.skip_to(id)
# rn = self._find_next()
# return rs or rn
#
# def skip_to_quality(self, minquality):
# skipped = 0
# while self.is_active() and self.quality() <= minquality:
# # TODO: doesn't count the documents matching the phrase yet
# skipped += self.child.skip_to_quality(minquality/self.boost)
# self._find_next()
# return skipped
#
# def positions(self):
# if not self.is_active():
# raise ReadTooFar
# if not self.wordmatchers:
# return []
# return self.wordmatchers[0].positions()
#
# def _find_next(self):
# isect = self.child
# slop = self.slop
#
# # List of "active" positions
# current = []
#
# while not current and isect.is_active():
# # [[list of positions for word 1],
# # [list of positions for word 2], ...]
# poses = [m.positions() for m in self.wordmatchers]
#
# # Set the "active" position list to the list of positions of the
# # first word. We well then iteratively update this list with the
# # positions of subsequent words if they are within the "slop"
# # distance of the positions in the active list.
# current = poses[0]
#
# # For each list of positions for the subsequent words...
# for poslist in poses[1:]:
# # A list to hold the new list of active positions
# newposes = []
#
# # For each position in the list of positions in this next word
# for newpos in poslist:
# # Use bisect to only check the part of the current list
# # that could contain positions within the "slop" distance
# # of the new position
# start = bisect_left(current, newpos - slop)
# end = bisect_right(current, newpos)
#
# #
# for curpos in current[start:end]:
# delta = newpos - curpos
# if delta > 0 and delta <= slop:
# newposes.append(newpos)
#
# current = newposes
# if not current: break
#
# if not current:
# isect.next()
#
# self._count = len(current)
#
#
#class VectorPhraseMatcher(BasePhraseMatcher):
# """Phrase matcher for fields with a vector with positions (i.e. Positions
# or CharacterPositions format).
# """
#
# def __init__(self, searcher, fieldname, words, isect, slop=1, boost=1.0):
# """
# :param searcher: a Searcher object.
# :param fieldname: the field in which to search.
# :param words: a sequence of token texts representing the words in the
# phrase.
# :param isect: an intersection matcher for the words in the phrase.
# :param slop:
# """
#
# decodefn = searcher.field(fieldname).vector.decoder("positions")
# self.reader = searcher.reader()
# self.fieldname = fieldname
# self.words = words
# self.sortedwords = sorted(self.words)
# super(VectorPhraseMatcher, self).__init__(isect, decodefn, slop=slop,
# boost=boost)
#
# def _poses(self):
# vreader = self.reader.vector(self.child.id(), self.fieldname)
# poses = {}
# decode_positions = self.decode_positions
# for word in self.sortedwords:
# vreader.skip_to(word)
# if vreader.id() != word:
# raise Exception("Phrase query: %r in term index but not in"
# " vector (possible analyzer mismatch)" % word)
# poses[word] = decode_positions(vreader.value())
# # Now put the position lists in phrase order
# return [poses[word] for word in self.words]
| agpl-3.0 |
mlperf/inference_results_v0.5 | closed/FuriosaAI/code/mobilenet/furiosa-loadgen/resize-imagenet.py | 1 | 1710 | import logging
import sys
import time
import os
import cv2
import numpy as np
def center_crop(img, out_height, out_width):
height, width, _ = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
return img
def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
height, width, _ = img.shape
new_height = int(100. * out_height / scale)
new_width = int(100. * out_width / scale)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=inter_pol)
return img
def pre_process_mobilenet(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return np.asarray(img, dtype=np.uint8)
imageloc = sys.argv[1]
outloc = sys.argv[2]
for filename in os.listdir(imageloc):
if filename.endswith(".JPEG"):
image = cv2.imread(os.path.join(imageloc, filename))
preprocessed = pre_process_mobilenet(image, [224, 224, 3])
#cv2.imwrite(os.path.join(outloc, filename), preprocessed)
np.save(os.path.join(outloc, filename+".npy"), preprocessed)
| apache-2.0 |
hurricanerix/swift | test/unit/common/middleware/crypto/test_encrypter.py | 8 | 41463 | # Copyright (c) 2015-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import hmac
import json
import os
import unittest
import mock
from six.moves.urllib import parse as urlparse
from swift.common.middleware.crypto import encrypter
from swift.common.middleware.crypto.crypto_utils import (
CRYPTO_KEY_CALLBACK, Crypto)
from swift.common.swob import (
Request, HTTPException, HTTPCreated, HTTPAccepted, HTTPOk, HTTPBadRequest)
from swift.common.utils import FileLikeIter
from test.unit import FakeLogger, EMPTY_ETAG
from test.unit.common.middleware.crypto.crypto_helpers import (
fetch_crypto_keys, md5hex, FAKE_IV, encrypt)
from test.unit.common.middleware.helpers import FakeSwift, FakeAppThatExcepts
@mock.patch('swift.common.middleware.crypto.crypto_utils.Crypto.create_iv',
lambda *args: FAKE_IV)
class TestEncrypter(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.encrypter = encrypter.Encrypter(self.app, {})
self.encrypter.logger = FakeLogger()
def _verify_user_metadata(self, req_hdrs, name, value, key):
# verify encrypted version of user metadata
self.assertNotIn('X-Object-Meta-' + name, req_hdrs)
expected_hdr = 'X-Object-Transient-Sysmeta-Crypto-Meta-' + name
self.assertIn(expected_hdr, req_hdrs)
enc_val, param = req_hdrs[expected_hdr].split(';')
param = param.strip()
self.assertTrue(param.startswith('swift_meta='))
actual_meta = json.loads(
urlparse.unquote_plus(param[len('swift_meta='):]))
self.assertEqual(Crypto.cipher, actual_meta['cipher'])
meta_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, meta_iv)
self.assertEqual(
base64.b64encode(encrypt(value, key, meta_iv)),
enc_val)
# if there is any encrypted user metadata then this header should exist
self.assertIn('X-Object-Transient-Sysmeta-Crypto-Meta', req_hdrs)
common_meta = json.loads(urlparse.unquote_plus(
req_hdrs['X-Object-Transient-Sysmeta-Crypto-Meta']))
self.assertDictEqual({'cipher': Crypto.cipher,
'key_id': {'v': 'fake', 'path': '/a/c/fake'}},
common_meta)
def test_PUT_req(self):
body_key = os.urandom(32)
object_key = fetch_crypto_keys()['object']
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
ciphertext = encrypt(plaintext, body_key, FAKE_IV)
ciphertext_etag = md5hex(ciphertext)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'etag': plaintext_etag,
'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'x-object-meta-etag': 'not to be confused with the Etag!',
'x-object-meta-test': 'encrypt me',
'x-object-sysmeta-test': 'do not encrypt me'}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
return_value=body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('PUT', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# verify body crypto meta
actual = req_hdrs['X-Object-Sysmeta-Crypto-Body-Meta']
actual = json.loads(urlparse.unquote_plus(actual))
self.assertEqual(Crypto().cipher, actual['cipher'])
self.assertEqual(FAKE_IV, base64.b64decode(actual['iv']))
# verify wrapped body key
expected_wrapped_key = encrypt(body_key, object_key, FAKE_IV)
self.assertEqual(expected_wrapped_key,
base64.b64decode(actual['body_key']['key']))
self.assertEqual(FAKE_IV,
base64.b64decode(actual['body_key']['iv']))
self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
# verify etag
self.assertEqual(ciphertext_etag, req_hdrs['Etag'])
encrypted_etag, _junk, etag_meta = \
req_hdrs['X-Object-Sysmeta-Crypto-Etag'].partition('; swift_meta=')
# verify crypto_meta was appended to this etag
self.assertTrue(etag_meta)
actual_meta = json.loads(urlparse.unquote_plus(etag_meta))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
# verify encrypted version of plaintext etag
actual = base64.b64decode(encrypted_etag)
etag_iv = base64.b64decode(actual_meta['iv'])
enc_etag = encrypt(plaintext_etag, object_key, etag_iv)
self.assertEqual(enc_etag, actual)
# verify etag MAC for conditional requests
actual_hmac = base64.b64decode(
req_hdrs['X-Object-Sysmeta-Crypto-Etag-Mac'])
self.assertEqual(actual_hmac, hmac.new(
object_key, plaintext_etag, hashlib.sha256).digest())
# verify encrypted etag for container update
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
parts = req_hdrs[
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
# extract crypto_meta from end of etag for container update
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id'])
cont_key = fetch_crypto_keys()['container']
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt(plaintext_etag, cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
# content-type is not encrypted
self.assertEqual('text/plain', req_hdrs['Content-Type'])
# user meta is encrypted
self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', object_key)
self._verify_user_metadata(
req_hdrs, 'Etag', 'not to be confused with the Etag!', object_key)
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
req_hdrs['X-Object-Sysmeta-Test'])
# verify object is encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = get_req.get_response(self.app)
self.assertEqual(ciphertext, resp.body)
self.assertEqual(ciphertext_etag, resp.headers['Etag'])
def test_PUT_zero_size_object(self):
# object body encryption should be skipped for zero sized object body
object_key = fetch_crypto_keys()['object']
plaintext_etag = EMPTY_ETAG
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'etag': EMPTY_ETAG,
'content-type': 'text/plain',
'content-length': '0',
'x-object-meta-etag': 'not to be confused with the Etag!',
'x-object-meta-test': 'encrypt me',
'x-object-sysmeta-test': 'do not encrypt me'}
req = Request.blank(
'/v1/a/c/o', environ=env, body='', headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('PUT', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# verify that there is no body crypto meta
self.assertNotIn('X-Object-Sysmeta-Crypto-Meta', req_hdrs)
# verify etag is md5 of plaintext
self.assertEqual(EMPTY_ETAG, req_hdrs['Etag'])
# verify there is no etag crypto meta
self.assertNotIn('X-Object-Sysmeta-Crypto-Etag', req_hdrs)
# verify there is no container update override for etag
self.assertNotIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
# user meta is still encrypted
self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', object_key)
self._verify_user_metadata(
req_hdrs, 'Etag', 'not to be confused with the Etag!', object_key)
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
req_hdrs['X-Object-Sysmeta-Test'])
# verify object is empty by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = get_req.get_response(self.app)
self.assertEqual('', resp.body)
self.assertEqual(EMPTY_ETAG, resp.headers['Etag'])
def _test_PUT_with_other_footers(self, override_etag):
# verify handling of another middleware's footer callback
cont_key = fetch_crypto_keys()['container']
body_key = os.urandom(32)
object_key = fetch_crypto_keys()['object']
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
ciphertext = encrypt(plaintext, body_key, FAKE_IV)
ciphertext_etag = md5hex(ciphertext)
other_footers = {
'Etag': plaintext_etag,
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Size':
'other override',
'X-Object-Sysmeta-Container-Update-Override-Etag':
override_etag}
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': 'correct etag is in footers'}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
lambda *args: body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('PUT', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# verify that other middleware's footers made it to app, including any
# container update overrides but nothing Etag-related
other_footers.pop('Etag')
other_footers.pop('X-Object-Sysmeta-Container-Update-Override-Etag')
for k, v in other_footers.items():
self.assertEqual(v, req_hdrs[k])
# verify encryption footers are ok
encrypted_etag, _junk, etag_meta = \
req_hdrs['X-Object-Sysmeta-Crypto-Etag'].partition('; swift_meta=')
self.assertTrue(etag_meta)
actual_meta = json.loads(urlparse.unquote_plus(etag_meta))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(ciphertext_etag, req_hdrs['Etag'])
actual = base64.b64decode(encrypted_etag)
etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(encrypt(plaintext_etag, object_key, etag_iv), actual)
# verify encrypted etag for container update
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
parts = req_hdrs[
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
# extract crypto_meta from end of etag for container update
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
cont_key = fetch_crypto_keys()['container']
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt(override_etag, cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
# verify body crypto meta
actual = req_hdrs['X-Object-Sysmeta-Crypto-Body-Meta']
actual = json.loads(urlparse.unquote_plus(actual))
self.assertEqual(Crypto().cipher, actual['cipher'])
self.assertEqual(FAKE_IV, base64.b64decode(actual['iv']))
# verify wrapped body key
expected_wrapped_key = encrypt(body_key, object_key, FAKE_IV)
self.assertEqual(expected_wrapped_key,
base64.b64decode(actual['body_key']['key']))
self.assertEqual(FAKE_IV,
base64.b64decode(actual['body_key']['iv']))
self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
def test_PUT_with_other_footers(self):
self._test_PUT_with_other_footers('override etag')
def test_PUT_with_other_footers_and_empty_etag(self):
# verify that an override etag value of EMPTY_ETAG will be encrypted
# when there was a non-zero body length
self._test_PUT_with_other_footers(EMPTY_ETAG)
def _test_PUT_with_etag_override_in_headers(self, override_etag):
# verify handling of another middleware's
# container-update-override-etag in headers
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': plaintext_etag,
'X-Object-Sysmeta-Container-Update-Override-Etag':
override_etag}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(('PUT', '/v1/a/c/o'), self.app.calls[0])
req_hdrs = self.app.headers[0]
# verify encrypted etag for container update
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
parts = req_hdrs[
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
cont_key = fetch_crypto_keys()['container']
# extract crypto_meta from end of etag for container update
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id'])
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt(override_etag, cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
def test_PUT_with_etag_override_in_headers(self):
self._test_PUT_with_etag_override_in_headers('override_etag')
def test_PUT_with_etag_override_in_headers_and_empty_etag(self):
# verify that an override etag value of EMPTY_ETAG will be encrypted
# when there was a non-zero body length
self._test_PUT_with_etag_override_in_headers(EMPTY_ETAG)
def test_PUT_with_bad_etag_in_other_footers(self):
# verify that etag supplied in footers from other middleware overrides
# header etag when validating inbound plaintext etags
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
other_footers = {
'Etag': 'bad etag',
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': plaintext_etag}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('422 Unprocessable Entity', resp.status)
self.assertNotIn('Etag', resp.headers)
def test_PUT_with_bad_etag_in_headers_and_other_footers(self):
# verify that etag supplied in headers from other middleware is used if
# none is supplied in footers when validating inbound plaintext etags
plaintext = 'FAKE APP'
other_footers = {
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': 'bad etag'}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('422 Unprocessable Entity', resp.status)
self.assertNotIn('Etag', resp.headers)
def test_PUT_nothing_read(self):
# simulate an artificial scenario of a downstream filter/app not
# actually reading the input stream from encrypter.
class NonReadingApp(object):
def __call__(self, env, start_response):
# note: no read from wsgi.input
req = Request(env)
env['swift.callback.update_footers'](req.headers)
call_headers.append(req.headers)
resp = HTTPCreated(req=req, headers={'Etag': 'response etag'})
return resp(env, start_response)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': 0,
'etag': 'etag from client'}
req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs)
call_headers = []
resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {}))
self.assertEqual('201 Created', resp.status)
self.assertEqual('response etag', resp.headers['Etag'])
self.assertEqual(1, len(call_headers))
self.assertEqual('etag from client', call_headers[0]['etag'])
# verify no encryption footers
for k in call_headers[0]:
self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-'))
# check that an upstream footer callback gets called
other_footers = {
'Etag': EMPTY_ETAG,
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
env.update({'swift.callback.update_footers':
lambda footers: footers.update(other_footers)})
req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs)
call_headers = []
resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {}))
self.assertEqual('201 Created', resp.status)
self.assertEqual('response etag', resp.headers['Etag'])
self.assertEqual(1, len(call_headers))
# verify encrypted override etag for container update.
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', call_headers[0])
parts = call_headers[0][
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
cont_key = fetch_crypto_keys()['container']
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id'])
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt('other override', cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
# verify that other middleware's footers made it to app
other_footers.pop('X-Object-Sysmeta-Container-Update-Override-Etag')
for k, v in other_footers.items():
self.assertEqual(v, call_headers[0][k])
# verify no encryption footers
for k in call_headers[0]:
self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-'))
# if upstream footer override etag is for an empty body then check that
# it is not encrypted
other_footers = {
'Etag': EMPTY_ETAG,
'X-Object-Sysmeta-Container-Update-Override-Etag': EMPTY_ETAG}
env.update({'swift.callback.update_footers':
lambda footers: footers.update(other_footers)})
req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs)
call_headers = []
resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {}))
self.assertEqual('201 Created', resp.status)
self.assertEqual('response etag', resp.headers['Etag'])
self.assertEqual(1, len(call_headers))
# verify that other middleware's footers made it to app
for k, v in other_footers.items():
self.assertEqual(v, call_headers[0][k])
# verify no encryption footers
for k in call_headers[0]:
self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-'))
def test_POST_req(self):
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'POST',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'x-object-meta-test': 'encrypt me',
'x-object-meta-test2': '',
'x-object-sysmeta-test': 'do not encrypt me'}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
key = fetch_crypto_keys()['object']
self.app.register('POST', '/v1/a/c/o', HTTPAccepted, {})
resp = req.get_response(self.encrypter)
self.assertEqual('202 Accepted', resp.status)
self.assertNotIn('Etag', resp.headers)
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('POST', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# user meta is encrypted
self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', key)
# unless it had no value
self.assertEqual('', req_hdrs['X-Object-Meta-Test2'])
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
req_hdrs['X-Object-Sysmeta-Test'])
def _test_no_user_metadata(self, method):
# verify that x-object-transient-sysmeta-crypto-meta is not set when
# there is no user metadata
env = {'REQUEST_METHOD': method,
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
req = Request.blank('/v1/a/c/o', environ=env, body='body')
self.app.register(method, '/v1/a/c/o', HTTPAccepted, {})
resp = req.get_response(self.encrypter)
self.assertEqual('202 Accepted', resp.status)
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(method, self.app.calls[0][0])
self.assertNotIn('x-object-transient-sysmeta-crypto-meta',
self.app.headers[0])
def test_PUT_no_user_metadata(self):
self._test_no_user_metadata('PUT')
def test_POST_no_user_metadata(self):
self._test_no_user_metadata('POST')
def _test_if_match(self, method, match_header_name):
def do_test(method, plain_etags, expected_plain_etags=None):
env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
match_header_value = ', '.join(plain_etags)
req = Request.blank(
'/v1/a/c/o', environ=env, method=method,
headers={match_header_name: match_header_value})
app = FakeSwift()
app.register(method, '/v1/a/c/o', HTTPOk, {})
resp = req.get_response(encrypter.Encrypter(app, {}))
self.assertEqual('200 OK', resp.status)
self.assertEqual(1, len(app.calls), app.calls)
self.assertEqual(method, app.calls[0][0])
actual_headers = app.headers[0]
# verify the alternate etag location has been specified
if match_header_value and match_header_value != '*':
self.assertIn('X-Backend-Etag-Is-At', actual_headers)
self.assertEqual('X-Object-Sysmeta-Crypto-Etag-Mac',
actual_headers['X-Backend-Etag-Is-At'])
# verify etags have been supplemented with masked values
self.assertIn(match_header_name, actual_headers)
actual_etags = set(actual_headers[match_header_name].split(', '))
key = fetch_crypto_keys()['object']
masked_etags = [
'"%s"' % base64.b64encode(hmac.new(
key, etag.strip('"'), hashlib.sha256).digest())
for etag in plain_etags if etag not in ('*', '')]
expected_etags = set((expected_plain_etags or plain_etags) +
masked_etags)
self.assertEqual(expected_etags, actual_etags)
# check that the request environ was returned to original state
self.assertEqual(set(plain_etags),
set(req.headers[match_header_name].split(', ')))
do_test(method, [''])
do_test(method, ['"an etag"'])
do_test(method, ['"an etag"', '"another_etag"'])
do_test(method, ['*'])
# rfc2616 does not allow wildcard *and* etag but test it anyway
do_test(method, ['*', '"an etag"'])
# etags should be quoted but check we can cope if they are not
do_test(
method, ['*', 'an etag', 'another_etag'],
expected_plain_etags=['*', '"an etag"', '"another_etag"'])
def test_GET_if_match(self):
self._test_if_match('GET', 'If-Match')
def test_HEAD_if_match(self):
self._test_if_match('HEAD', 'If-Match')
def test_GET_if_none_match(self):
self._test_if_match('GET', 'If-None-Match')
def test_HEAD_if_none_match(self):
self._test_if_match('HEAD', 'If-None-Match')
def _test_existing_etag_is_at_header(self, method, match_header_name):
# if another middleware has already set X-Backend-Etag-Is-At then
# encrypter should not override that value
env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
req = Request.blank(
'/v1/a/c/o', environ=env, method=method,
headers={match_header_name: "an etag",
'X-Backend-Etag-Is-At': 'X-Object-Sysmeta-Other-Etag'})
self.app.register(method, '/v1/a/c/o', HTTPOk, {})
resp = req.get_response(self.encrypter)
self.assertEqual('200 OK', resp.status)
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(method, self.app.calls[0][0])
actual_headers = self.app.headers[0]
self.assertIn('X-Backend-Etag-Is-At', actual_headers)
self.assertEqual(
'X-Object-Sysmeta-Other-Etag,X-Object-Sysmeta-Crypto-Etag-Mac',
actual_headers['X-Backend-Etag-Is-At'])
actual_etags = set(actual_headers[match_header_name].split(', '))
self.assertIn('"an etag"', actual_etags)
def test_GET_if_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('GET', 'If-Match')
def test_HEAD_if_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('HEAD', 'If-Match')
def test_GET_if_none_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('GET', 'If-None-Match')
def test_HEAD_if_none_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('HEAD', 'If-None-Match')
def _test_etag_is_at_not_duplicated(self, method):
# verify only one occurrence of X-Object-Sysmeta-Crypto-Etag-Mac in
# X-Backend-Etag-Is-At
key = fetch_crypto_keys()['object']
env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
req = Request.blank(
'/v1/a/c/o', environ=env, method=method,
headers={'If-Match': '"an etag"',
'If-None-Match': '"another etag"'})
self.app.register(method, '/v1/a/c/o', HTTPOk, {})
resp = req.get_response(self.encrypter)
self.assertEqual('200 OK', resp.status)
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(method, self.app.calls[0][0])
actual_headers = self.app.headers[0]
self.assertIn('X-Backend-Etag-Is-At', actual_headers)
self.assertEqual('X-Object-Sysmeta-Crypto-Etag-Mac',
actual_headers['X-Backend-Etag-Is-At'])
self.assertIn('"%s"' % base64.b64encode(
hmac.new(key, 'an etag', hashlib.sha256).digest()),
actual_headers['If-Match'])
self.assertIn('"another etag"', actual_headers['If-None-Match'])
self.assertIn('"%s"' % base64.b64encode(
hmac.new(key, 'another etag', hashlib.sha256).digest()),
actual_headers['If-None-Match'])
def test_GET_etag_is_at_not_duplicated(self):
self._test_etag_is_at_not_duplicated('GET')
def test_HEAD_etag_is_at_not_duplicated(self):
self._test_etag_is_at_not_duplicated('HEAD')
def test_PUT_response_inconsistent_etag_is_not_replaced(self):
# if response is success but etag does not match the ciphertext md5
# then verify that we do *not* replace it with the plaintext etag
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated,
{'Etag': 'not the ciphertext etag'})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual('not the ciphertext etag', resp.headers['Etag'])
def test_PUT_multiseg_no_client_etag(self):
body_key = os.urandom(32)
chunks = ['some', 'chunks', 'of data']
body = ''.join(chunks)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'wsgi.input': FileLikeIter(chunks)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
lambda *args: body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
# verify object is encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.assertEqual(encrypt(body, body_key, FAKE_IV),
get_req.get_response(self.app).body)
def test_PUT_multiseg_good_client_etag(self):
body_key = os.urandom(32)
chunks = ['some', 'chunks', 'of data']
body = ''.join(chunks)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'wsgi.input': FileLikeIter(chunks)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body)),
'Etag': md5hex(body)}
req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
lambda *args: body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
# verify object is encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.assertEqual(encrypt(body, body_key, FAKE_IV),
get_req.get_response(self.app).body)
def test_PUT_multiseg_bad_client_etag(self):
chunks = ['some', 'chunks', 'of data']
body = ''.join(chunks)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'wsgi.input': FileLikeIter(chunks)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body)),
'Etag': 'badclientetag'}
req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('422 Unprocessable Entity', resp.status)
def test_PUT_missing_key_callback(self):
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
resp = req.get_response(self.encrypter)
self.assertEqual('500 Internal Error', resp.status)
self.assertIn('missing callback',
self.encrypter.logger.get_lines_for_level('error')[0])
self.assertEqual('Unable to retrieve encryption keys.', resp.body)
def test_PUT_error_in_key_callback(self):
def raise_exc():
raise Exception('Testing')
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: raise_exc}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
resp = req.get_response(self.encrypter)
self.assertEqual('500 Internal Error', resp.status)
self.assertIn('from callback: Testing',
self.encrypter.logger.get_lines_for_level('error')[0])
self.assertEqual('Unable to retrieve encryption keys.', resp.body)
def test_PUT_encryption_override(self):
# set crypto override to disable encryption.
# simulate another middleware wanting to set footers
other_footers = {
'Etag': 'other etag',
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT',
'swift.crypto.override': True,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
# verify that other middleware's footers made it to app
req_hdrs = self.app.headers[0]
for k, v in other_footers.items():
self.assertEqual(v, req_hdrs[k])
# verify object is NOT encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.assertEqual(body, get_req.get_response(self.app).body)
def _test_constraints_checking(self, method):
# verify that the check_metadata function is called on PUT and POST
body = 'FAKE APP'
env = {'REQUEST_METHOD': method,
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
mocked_func = 'swift.common.middleware.crypto.encrypter.check_metadata'
with mock.patch(mocked_func) as mocked:
mocked.side_effect = [HTTPBadRequest('testing')]
resp = req.get_response(self.encrypter)
self.assertEqual('400 Bad Request', resp.status)
self.assertEqual(1, mocked.call_count)
mocked.assert_called_once_with(mock.ANY, 'object')
self.assertEqual(req.headers,
mocked.call_args_list[0][0][0].headers)
def test_PUT_constraints_checking(self):
self._test_constraints_checking('PUT')
def test_POST_constraints_checking(self):
self._test_constraints_checking('POST')
def test_config_true_value_on_disable_encryption(self):
app = FakeSwift()
self.assertFalse(encrypter.Encrypter(app, {}).disable_encryption)
for val in ('true', '1', 'yes', 'on', 't', 'y'):
app = encrypter.Encrypter(app,
{'disable_encryption': val})
self.assertTrue(app.disable_encryption)
def test_PUT_app_exception(self):
app = encrypter.Encrypter(FakeAppThatExcepts(HTTPException), {})
req = Request.blank('/', environ={'REQUEST_METHOD': 'PUT'})
with self.assertRaises(HTTPException) as catcher:
req.get_response(app)
self.assertEqual(FakeAppThatExcepts.MESSAGE, catcher.exception.body)
def test_encrypt_header_val(self):
# Prepare key and Crypto instance
object_key = fetch_crypto_keys()['object']
# - Normal string can be crypted
encrypted = encrypter.encrypt_header_val(Crypto(), 'aaa', object_key)
# sanity: return value is 2 item tuple
self.assertEqual(2, len(encrypted))
crypted_val, crypt_info = encrypted
expected_crypt_val = base64.b64encode(
encrypt('aaa', object_key, FAKE_IV))
expected_crypt_info = {
'cipher': 'AES_CTR_256', 'iv': 'This is an IV123'}
self.assertEqual(expected_crypt_val, crypted_val)
self.assertEqual(expected_crypt_info, crypt_info)
# - Empty string raises a ValueError for safety
with self.assertRaises(ValueError) as cm:
encrypter.encrypt_header_val(Crypto(), '', object_key)
self.assertEqual('empty value is not acceptable',
cm.exception.message)
# - None also raises a ValueError for safety
with self.assertRaises(ValueError) as cm:
encrypter.encrypt_header_val(Crypto(), None, object_key)
self.assertEqual('empty value is not acceptable',
cm.exception.message)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/servicefabric/tests/latest/test_sf_managed_cluster.py | 1 | 7862 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.command_modules.servicefabric.tests.latest.test_util import (
_create_keyvault,
_add_selfsigned_cert_to_keyvault
)
from azure.cli.core.util import CLIError
from azure.cli.testsdk import ScenarioTest, LiveScenarioTest, ResourceGroupPreparer
from azure.mgmt.servicefabric.models import ErrorModelException
class ServiceFabricManagedClustersTests(ScenarioTest):
@ResourceGroupPreparer()
def test_basic_cluster(self):
self.kwargs.update({
'cert_tp': '123BDACDCDFB2C7B250192C6078E47D1E1DB119B',
'loc': 'eastasia',
'cluster_name': self.create_random_name('sfrp-cli-', 24),
'vm_password': self.create_random_name('Pass@', 9)
})
self.cmd('az sf managed-cluster create -g {rg} -c {cluster_name} -l {loc} --cert-thumbprint {cert_tp} --cert-is-admin --admin-password {vm_password}',
checks=[self.check('provisioningState', 'Succeeded'),
self.check('clusterState', 'WaitingForNodes')])
self.cmd('az sf managed-node-type create -g {rg} -c {cluster_name} -n pnt --instance-count 5 --primary',
checks=[self.check('provisioningState', 'Succeeded')])
# 'InvalidParameter - Cluster must have at least one active primary node type'
with self.assertRaisesRegexp(ErrorModelException, 'Cluster must have at least one active primary node type'):
self.cmd('az sf managed-node-type delete -g {rg} -c {cluster_name} -n pnt')
self.cmd('az sf managed-cluster show -g {rg} -c {cluster_name}',
checks=[self.check('clusterState', 'Deploying')])
self.cmd('az sf managed-cluster delete -g {rg} -c {cluster_name}')
# SystemExit 3 'not found'
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('az sf managed-cluster show -g {rg} -c {cluster_name}')
@ResourceGroupPreparer()
def test_node_type_operation(self):
self.kwargs.update({
'cert_tp': '123BDACDCDFB2C7B250192C6078E47D1E1DB119B',
'loc': 'eastasia',
'cluster_name': self.create_random_name('sfrp-cli-', 24),
'vm_password': self.create_random_name('Pass@', 9)
})
self.cmd('az sf managed-cluster create -g {rg} -c {cluster_name} -l {loc} --cert-thumbprint {cert_tp} --cert-is-admin --admin-password {vm_password} --sku Standard',
checks=[self.check('provisioningState', 'Succeeded'),
self.check('clusterState', 'WaitingForNodes')])
self.cmd('az sf managed-node-type create -g {rg} -c {cluster_name} -n pnt --instance-count 5 --primary',
checks=[self.check('provisioningState', 'Succeeded')])
self.cmd('az sf managed-node-type list -g {rg} -c {cluster_name}',
checks=[self.check('length(@)', 1)])
self.cmd('az sf managed-node-type create -g {rg} -c {cluster_name} -n snt --instance-count 6',
checks=[self.check('provisioningState', 'Succeeded')])
self.cmd('az sf managed-node-type list -g {rg} -c {cluster_name}',
checks=[self.check('length(@)', 2)])
self.cmd('az sf managed-node-type node restart -g {rg} -c {cluster_name} -n snt --node-name snt_0 snt_1')
self.cmd('az sf managed-node-type node delete -g {rg} -c {cluster_name} -n snt --node-name snt_1')
self.cmd('az sf managed-node-type node reimage -g {rg} -c {cluster_name} -n snt --node-name snt_3')
self.cmd('az sf managed-node-type delete -g {rg} -c {cluster_name} -n snt')
# SystemExit 3 'not found'
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('az sf managed-node-type show -g {rg} -c {cluster_name} -n snt')
self.cmd('az sf managed-node-type list -g {rg} -c {cluster_name}',
checks=[self.check('length(@)', 1)])
self.cmd('az sf managed-cluster delete -g {rg} -c {cluster_name}')
# SystemExit 3 'not found'
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('az sf managed-cluster show -g {rg} -c {cluster_name}')
@ResourceGroupPreparer()
def test_cert_and_ext(self):
self.kwargs.update({
'cert_tp': '123BDACDCDFB2C7B250192C6078E47D1E1DB119B',
'cert_tp2': '123BDACDCDFB2C7B250192C6078E47D1E1DB7777',
'loc': 'eastasia',
'cluster_name': self.create_random_name('sfrp-cli-', 24),
'vm_password': self.create_random_name('Pass@', 9),
'extName': 'csetest',
'publisher': 'Microsoft.Compute',
'extType': 'BGInfo',
'extVer': '2.1',
'kv_name': self.create_random_name('sfrp-cli-kv-', 24),
'cert_name': self.create_random_name('sfrp-cli-', 24)
})
self.cmd('az sf managed-cluster create -g {rg} -c {cluster_name} -l {loc} --cert-thumbprint {cert_tp} --cert-is-admin --admin-password {vm_password}',
checks=[self.check('provisioningState', 'Succeeded'),
self.check('clusterState', 'WaitingForNodes')])
self.cmd('az sf managed-node-type create -g {rg} -c {cluster_name} -n pnt --instance-count 5 --primary',
checks=[self.check('provisioningState', 'Succeeded')])
# add extension
self.cmd('az sf managed-node-type vm-extension add -g {rg} -c {cluster_name} -n pnt '
' --extension-name {extName} --publisher {publisher} --extension-type {extType} --type-handler-version {extVer} --auto-upgrade-minor-version',
checks=[self.check('provisioningState', 'Succeeded')])
self.cmd('az sf managed-node-type show -g {rg} -c {cluster_name} -n pnt',
checks=[self.check('length(vmExtensions)', 1)])
# add secret
kv = _create_keyvault(self, self.kwargs)
self.kwargs.update({'kv_id': kv['id']})
cert = _add_selfsigned_cert_to_keyvault(self, self.kwargs)
cert_secret_id = cert['sid']
self.kwargs.update({'cert_secret_id': cert_secret_id})
self.cmd('az sf managed-node-type vm-secret add -g {rg} -c {cluster_name} -n pnt '
' --source-vault-id {kv_id} --certificate-url {cert_secret_id} --certificate-store my',
checks=[self.check('provisioningState', 'Succeeded')])
self.cmd('az sf managed-node-type show -g {rg} -c {cluster_name} -n pnt',
checks=[self.check('length(vmSecrets)', 1)])
# add client cert
self.cmd('az sf managed-cluster client-certificate add -g {rg} -c {cluster_name} --thumbprint {cert_tp2}',
checks=[self.check('provisioningState', 'Succeeded')])
self.cmd('az sf managed-cluster show -g {rg} -c {cluster_name}',
checks=[self.check('length(clients)', 2)])
# delete client cert
self.cmd('az sf managed-cluster client-certificate delete -g {rg} -c {cluster_name} --thumbprint {cert_tp2}',
checks=[self.check('provisioningState', 'Succeeded')])
self.cmd('az sf managed-cluster show -g {rg} -c {cluster_name}',
checks=[self.check('length(clients)', 1)])
self.cmd('az sf managed-cluster delete -g {rg} -c {cluster_name}')
# SystemExit 3 'not found'
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('az sf managed-cluster show -g {rg} -c {cluster_name}')
if __name__ == '__main__':
unittest.main()
| mit |
PeachstoneIO/peachbox | peachbox/model/schema.py | 1 | 2622 | import pyspark.sql.types
from peachbox.model.file_format import FileFormat
import peachbox.model
class MasterSchema():
data_unit_index = None
partition_key = None
partition_granularity = None
output_format = FileFormat.Parquet
schema = None
mart = 'master'
_spark_schema = None
_spark_row = None
_spark_indices = None
_types = None
_initialized = False
@classmethod
def target(cls):
return str(cls.data_unit_index)
@classmethod
def spark_schema(cls):
if not cls._spark_schema: cls.generate_spark_schema()
return cls._spark_schema
@classmethod
def initialize(cls):
cls.generate_spark_schema()
cls.generate_spark_row_definition()
cls._initialized = True
@classmethod
def spark_row(cls, **kwargs):
if not cls._initialized: cls.initialize()
values = [None]*len(kwargs)
for field, value in kwargs.iteritems():
idx = cls._spark_indices[field]
if type(value) is not (cls._types[idx]):
raise TypeError("%s:%s (type:%s) must be of type %s" %
(field, value, type(value), cls._types[idx]))
else:
values[idx] = value
return cls._spark_row(*values)
@classmethod
def generate_spark_schema(cls):
fields = []
types = [None]*(len(cls.schema)+1)
indices = {}
current_idx = 0
type_int = peachbox.model.Types.spark_type('IntegerType')
fields.append(pyspark.sql.types.StructField('true_as_of_seconds', type_int, True))
types[current_idx] = int
indices['true_as_of_seconds'] = current_idx
current_idx += 1
for field in cls.schema:
spark_type = peachbox.model.Types.spark_type(field['type'])
fields.append(pyspark.sql.types.StructField(field['field'], spark_type, True))
types[current_idx] = peachbox.model.Types.python_type(field['type'])
indices[field['field']] = current_idx
current_idx += 1
cls._spark_schema = pyspark.sql.types.StructType(fields)
cls._spark_indices = indices
cls._types = types
@classmethod
def get_field_index(cls, field_name):
return cls._spark_indices[field_name]
@classmethod
def generate_spark_row_definition(cls):
names = [field.name for field in cls.spark_schema().fields]
cls._spark_row = pyspark.sql.Row(*names)
| apache-2.0 |
appapantula/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
StackStorm/st2 | st2auth/tests/unit/test_validation_utils.py | 3 | 2505 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2auth.validation import validate_auth_backend_is_correctly_configured
from st2tests import config as tests_config
__all__ = ["ValidationUtilsTestCase"]
class ValidationUtilsTestCase(unittest2.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
tests_config.parse_args()
def test_validate_auth_backend_is_correctly_configured_success(self):
result = validate_auth_backend_is_correctly_configured()
self.assertTrue(result)
def test_validate_auth_backend_is_correctly_configured_invalid_backend(self):
cfg.CONF.set_override(group="auth", name="mode", override="invalid")
expected_msg = (
'Invalid auth mode "invalid" specified in the config. '
"Valid modes are: proxy, standalone"
)
self.assertRaisesRegexp(
ValueError, expected_msg, validate_auth_backend_is_correctly_configured
)
def test_validate_auth_backend_is_correctly_configured_backend_doesnt_expose_groups(
self,
):
# Flat file backend doesn't expose user group membership information aha provide
# "has group info" capability
cfg.CONF.set_override(group="auth", name="backend", override="flat_file")
cfg.CONF.set_override(
group="auth", name="backend_kwargs", override='{"file_path": "dummy"}'
)
cfg.CONF.set_override(group="rbac", name="enable", override=True)
cfg.CONF.set_override(group="rbac", name="sync_remote_groups", override=True)
expected_msg = (
"Configured auth backend doesn't expose user group information. Disable "
"remote group synchronization or"
)
self.assertRaisesRegexp(
ValueError, expected_msg, validate_auth_backend_is_correctly_configured
)
| apache-2.0 |
zombi-x/android_kernel_oppo_msm8974 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
kirca/odoo | addons/account_check_writing/report/check_print.py | 320 | 2943 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class report_print_check(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_print_check, self).__init__(cr, uid, name, context)
self.number_lines = 0
self.number_add = 0
self.localcontext.update({
'time': time,
'get_lines': self.get_lines,
'fill_stars' : self.fill_stars,
})
def fill_stars(self, amount):
if len(amount) < 100:
stars = 100 - len(amount)
return ' '.join([amount,'*'*stars])
else: return amount
def get_lines(self, voucher_lines):
result = []
self.number_lines = len(voucher_lines)
for i in range(0, min(10,self.number_lines)):
if i < self.number_lines:
res = {
'date_due' : voucher_lines[i].date_due,
'name' : voucher_lines[i].name,
'amount_original' : voucher_lines[i].amount_original and voucher_lines[i].amount_original or False,
'amount_unreconciled' : voucher_lines[i].amount_unreconciled and voucher_lines[i].amount_unreconciled or False,
'amount' : voucher_lines[i].amount and voucher_lines[i].amount or False,
}
else :
res = {
'date_due' : False,
'name' : False,
'amount_original' : False,
'amount_due' : False,
'amount' : False,
}
result.append(res)
return result
class report_check(osv.AbstractModel):
_name = 'report.account_check_writing.report_check'
_inherit = 'report.abstract_report'
_template = 'account_check_writing.report_check'
_wrapped_report_class = report_print_check
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sanjeevtripurari/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_dateformat.py | 57 | 6340 | from __future__ import unicode_literals
from datetime import datetime, date
import os
import time
from django.utils.dateformat import format
from django.utils import dateformat, translation, unittest
from django.utils.timezone import utc
from django.utils.tzinfo import FixedOffset, LocalTimezone
class DateFormatTests(unittest.TestCase):
def setUp(self):
self.old_TZ = os.environ.get('TZ')
os.environ['TZ'] = 'Europe/Copenhagen'
self._orig_lang = translation.get_language()
translation.activate('en-us')
try:
# Check if a timezone has been set
time.tzset()
self.tz_tests = True
except AttributeError:
# No timezone available. Don't run the tests that require a TZ
self.tz_tests = False
def tearDown(self):
translation.activate(self._orig_lang)
if self.old_TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = self.old_TZ
# Cleanup - force re-evaluation of TZ environment variable.
if self.tz_tests:
time.tzset()
def test_date(self):
d = date(2009, 5, 16)
self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d)
def test_naive_datetime(self):
dt = datetime(2009, 5, 16, 5, 30, 30)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt)
def test_datetime_with_local_tzinfo(self):
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None))
def test_datetime_with_tzinfo(self):
tz = FixedOffset(-510)
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None))
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple())
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple())
def test_epoch(self):
udt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(format(udt, 'U'), '0')
def test_empty_format(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, ''), '')
def test_am_pm(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.')
def test_microsecond(self):
# Regression test for #18951
dt = datetime(2009, 5, 16, microsecond=123)
self.assertEqual(dateformat.format(dt, 'u'), '000123')
def test_date_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM')
self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456')
self.assertEqual(dateformat.format(my_birthday, 'd'), '08')
self.assertEqual(dateformat.format(my_birthday, 'j'), '8')
self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday')
self.assertEqual(dateformat.format(my_birthday, 'L'), 'False')
self.assertEqual(dateformat.format(my_birthday, 'm'), '07')
self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul')
self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul')
self.assertEqual(dateformat.format(my_birthday, 'n'), '7')
self.assertEqual(dateformat.format(my_birthday, 'N'), 'July')
def test_time_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.')
self.assertEqual(dateformat.format(my_birthday, 's'), '00')
self.assertEqual(dateformat.format(my_birthday, 'S'), 'th')
self.assertEqual(dateformat.format(my_birthday, 't'), '31')
self.assertEqual(dateformat.format(my_birthday, 'w'), '0')
self.assertEqual(dateformat.format(my_birthday, 'W'), '27')
self.assertEqual(dateformat.format(my_birthday, 'y'), '79')
self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979')
self.assertEqual(dateformat.format(my_birthday, 'z'), '189')
def test_dateformat(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET')
self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July')
def test_futuredates(self):
the_future = datetime(2100, 10, 25, 0, 00)
self.assertEqual(dateformat.format(the_future, r'Y'), '2100')
def test_timezones(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
summertime = datetime(2005, 10, 30, 1, 00)
wintertime = datetime(2005, 10, 30, 4, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
if self.tz_tests:
self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100')
self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100')
self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET')
self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600')
self.assertEqual(dateformat.format(timestamp, 'u'), '123456')
self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600')
self.assertEqual(dateformat.format(summertime, 'I'), '1')
self.assertEqual(dateformat.format(summertime, 'O'), '+0200')
self.assertEqual(dateformat.format(wintertime, 'I'), '0')
self.assertEqual(dateformat.format(wintertime, 'O'), '+0100')
# Ticket #16924 -- We don't need timezone support to test this
# 3h30m to the west of UTC
tz = FixedOffset(-3*60 - 30)
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(dateformat.format(dt, 'O'), '-0330')
| apache-2.0 |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/python/Lib/lib-tk/Tkconstants.py | 375 | 1493 | # Symbolic constants for Tk
# Booleans
NO=FALSE=OFF=0
YES=TRUE=ON=1
# -anchor and -sticky
N='n'
S='s'
W='w'
E='e'
NW='nw'
SW='sw'
NE='ne'
SE='se'
NS='ns'
EW='ew'
NSEW='nsew'
CENTER='center'
# -fill
NONE='none'
X='x'
Y='y'
BOTH='both'
# -side
LEFT='left'
TOP='top'
RIGHT='right'
BOTTOM='bottom'
# -relief
RAISED='raised'
SUNKEN='sunken'
FLAT='flat'
RIDGE='ridge'
GROOVE='groove'
SOLID = 'solid'
# -orient
HORIZONTAL='horizontal'
VERTICAL='vertical'
# -tabs
NUMERIC='numeric'
# -wrap
CHAR='char'
WORD='word'
# -align
BASELINE='baseline'
# -bordermode
INSIDE='inside'
OUTSIDE='outside'
# Special tags, marks and insert positions
SEL='sel'
SEL_FIRST='sel.first'
SEL_LAST='sel.last'
END='end'
INSERT='insert'
CURRENT='current'
ANCHOR='anchor'
ALL='all' # e.g. Canvas.delete(ALL)
# Text widget and button states
NORMAL='normal'
DISABLED='disabled'
ACTIVE='active'
# Canvas state
HIDDEN='hidden'
# Menu item types
CASCADE='cascade'
CHECKBUTTON='checkbutton'
COMMAND='command'
RADIOBUTTON='radiobutton'
SEPARATOR='separator'
# Selection modes for list boxes
SINGLE='single'
BROWSE='browse'
MULTIPLE='multiple'
EXTENDED='extended'
# Activestyle for list boxes
# NONE='none' is also valid
DOTBOX='dotbox'
UNDERLINE='underline'
# Various canvas styles
PIESLICE='pieslice'
CHORD='chord'
ARC='arc'
FIRST='first'
LAST='last'
BUTT='butt'
PROJECTING='projecting'
ROUND='round'
BEVEL='bevel'
MITER='miter'
# Arguments to xview/yview
MOVETO='moveto'
SCROLL='scroll'
UNITS='units'
PAGES='pages'
| apache-2.0 |
asmacdo/pulp-automation | tests/consumer_agent_tests/test_09_consumer_auth.py | 2 | 3615 | import unittest, logging, nose
from tests.conf.roles import ROLES
from pulp_auto import Pulp, format_response
from pulp_auto.handler.profile import PROFILE
from pulp_auto.consumer import (Consumer, Binding)
from pulp_auto.task import (Task, TaskFailure, TaskTimeoutError)
from pulp_auto.agent import Agent
from pulp_auto.qpid_handle import QpidHandle
from pulp_auto.authenticator import Authenticator
import pulp_auto.handler
from M2Crypto import (RSA, BIO)
from tests.pulp_test import requires, requires_any, PulpTest, agent_test
from tests.conf.facade.yum import YumRepo
@requires('qpid.url')
@requires_any('repos', lambda repo: repo.type == 'rpm')
class ConsumerAuthTest(PulpTest):
@classmethod
def setUpClass(cls):
super(ConsumerAuthTest, cls).setUpClass()
cls.ROLES = ROLES
cls.PROFILE = PROFILE
cls.rsa_primary = RSA.load_key('/usr/share/pulp_auto/tests/data/fake-consumer.pem')
cls.rsa_secondary = RSA.load_key('/usr/share/pulp_auto/tests/data/fake-consumer-secondary.pem')
bio_fd = BIO.MemoryBuffer()
cls.rsa_primary.save_pub_key_bio(bio_fd)
cls.pub_pem_primary = bio_fd.getvalue()
bio_fd = BIO.MemoryBuffer()
cls.rsa_secondary.save_pub_key_bio(bio_fd)
cls.pub_pem_secondary = bio_fd.getvalue()
repo_role = [repo for repo in cls.ROLES.repos if repo.type == 'rpm'][0]
cls.repo, cls.importer, [cls.distributor] = YumRepo.from_role(repo_role).create(cls.pulp)
cls.consumer = Consumer.register(cls.pulp, cls.__name__ + '_consumer', rsa_pub=cls.pub_pem_primary)
cls.agent = Agent(pulp_auto.handler, PROFILE=pulp_auto.handler.profile.PROFILE)
cls.qpid_handle = QpidHandle(cls.ROLES.qpid.url, cls.consumer.id, auth=Authenticator(signing_key=cls.rsa_primary, verifying_key=cls.pulp.pubkey))
@classmethod
def tearDownClass(cls):
with \
cls.pulp.asserting(True), \
cls.agent.catching(False), \
cls.agent.running(cls.qpid_handle, frequency=10) \
:
Task.wait_for_report(cls.pulp, cls.repo.delete(cls.pulp))
cls.consumer.delete(cls.pulp)
super(ConsumerAuthTest, cls).tearDownClass()
def tearDown(self):
'''delete repo binding; runs within a "correct" agent running ctx'''
with self.pulp.asserting(True), \
self.agent.catching(True), \
self.agent.running(self.qpid_handle, frequency=10) \
:
report = self.consumer.unbind_distributor(self.pulp, self.repo.id, self.distributor.id)
self.assertPulp(code=202)
Task.wait_for_report(self.pulp, report)
def bindRepo(self):
'''test cases are performed on a repo bind call; to be run within agent running ctx'''
with self.pulp.asserting(True) :
report = self.consumer.bind_distributor(self.pulp,self.repo.id, self.distributor.id)
self.assertPulp(code=202)
Task.wait_for_report(self.pulp, report, timeout=5)
def test_01_valid_consumer_auth(self):
with self.agent.catching(True), self.agent.running(self.qpid_handle, frequency=10):
self.bindRepo()
# bz: 1104788
@unittest.expectedFailure
def test_02_invalid_consumer_auth(self):
invalid_qpid_handle = QpidHandle(self.ROLES.qpid.url, self.consumer.id, auth=Authenticator(signing_key=self.rsa_secondary, verifying_key=self.pulp.pubkey))
with self.agent.catching(True), self.agent.running(invalid_qpid_handle, frequency=10):
with self.assertRaises(TaskFailure):
self.bindRepo()
| gpl-2.0 |
detiber/lib_openshift | lib_openshift/models/v1_local_subject_access_review.py | 2 | 13159 | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1LocalSubjectAccessReview(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
{
'class': 'OapiV1',
'type': 'create',
'method': 'create_namespaced_localsubjectaccessreview',
'namespaced': True
},
{
'class': 'OapiV1',
'type': 'create',
'method': 'create_localsubjectaccessreview',
'namespaced': False
},
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'kind': 'str',
'api_version': 'str',
'namespace': 'str',
'verb': 'str',
'resource_api_group': 'str',
'resource_api_version': 'str',
'resource': 'str',
'resource_name': 'str',
'content': 'str',
'user': 'str',
'groups': 'list[str]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'namespace': 'namespace',
'verb': 'verb',
'resource_api_group': 'resourceAPIGroup',
'resource_api_version': 'resourceAPIVersion',
'resource': 'resource',
'resource_name': 'resourceName',
'content': 'content',
'user': 'user',
'groups': 'groups'
}
def __init__(self, kind=None, api_version=None, namespace=None, verb=None, resource_api_group=None, resource_api_version=None, resource=None, resource_name=None, content=None, user=None, groups=None):
"""
V1LocalSubjectAccessReview - a model defined in Swagger
"""
self._kind = kind
self._api_version = api_version
self._namespace = namespace
self._verb = verb
self._resource_api_group = resource_api_group
self._resource_api_version = resource_api_version
self._resource = resource
self._resource_name = resource_name
self._content = content
self._user = user
self._groups = groups
@property
def kind(self):
"""
Gets the kind of this V1LocalSubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1LocalSubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1LocalSubjectAccessReview.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1LocalSubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:return: The api_version of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1LocalSubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1LocalSubjectAccessReview.
:type: str
"""
self._api_version = api_version
@property
def namespace(self):
"""
Gets the namespace of this V1LocalSubjectAccessReview.
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
:return: The namespace of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1LocalSubjectAccessReview.
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
:param namespace: The namespace of this V1LocalSubjectAccessReview.
:type: str
"""
self._namespace = namespace
@property
def verb(self):
"""
Gets the verb of this V1LocalSubjectAccessReview.
Verb is one of: get, list, watch, create, update, delete
:return: The verb of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._verb
@verb.setter
def verb(self, verb):
"""
Sets the verb of this V1LocalSubjectAccessReview.
Verb is one of: get, list, watch, create, update, delete
:param verb: The verb of this V1LocalSubjectAccessReview.
:type: str
"""
self._verb = verb
@property
def resource_api_group(self):
"""
Gets the resource_api_group of this V1LocalSubjectAccessReview.
Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined
:return: The resource_api_group of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._resource_api_group
@resource_api_group.setter
def resource_api_group(self, resource_api_group):
"""
Sets the resource_api_group of this V1LocalSubjectAccessReview.
Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined
:param resource_api_group: The resource_api_group of this V1LocalSubjectAccessReview.
:type: str
"""
self._resource_api_group = resource_api_group
@property
def resource_api_version(self):
"""
Gets the resource_api_version of this V1LocalSubjectAccessReview.
Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined
:return: The resource_api_version of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._resource_api_version
@resource_api_version.setter
def resource_api_version(self, resource_api_version):
"""
Sets the resource_api_version of this V1LocalSubjectAccessReview.
Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined
:param resource_api_version: The resource_api_version of this V1LocalSubjectAccessReview.
:type: str
"""
self._resource_api_version = resource_api_version
@property
def resource(self):
"""
Gets the resource of this V1LocalSubjectAccessReview.
Resource is one of the existing resource types
:return: The resource of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""
Sets the resource of this V1LocalSubjectAccessReview.
Resource is one of the existing resource types
:param resource: The resource of this V1LocalSubjectAccessReview.
:type: str
"""
self._resource = resource
@property
def resource_name(self):
"""
Gets the resource_name of this V1LocalSubjectAccessReview.
ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"
:return: The resource_name of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""
Sets the resource_name of this V1LocalSubjectAccessReview.
ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"
:param resource_name: The resource_name of this V1LocalSubjectAccessReview.
:type: str
"""
self._resource_name = resource_name
@property
def content(self):
"""
Gets the content of this V1LocalSubjectAccessReview.
Content is the actual content of the request for create and update
:return: The content of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this V1LocalSubjectAccessReview.
Content is the actual content of the request for create and update
:param content: The content of this V1LocalSubjectAccessReview.
:type: str
"""
self._content = content
@property
def user(self):
"""
Gets the user of this V1LocalSubjectAccessReview.
User is optional. If both User and Groups are empty, the current authenticated user is used.
:return: The user of this V1LocalSubjectAccessReview.
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this V1LocalSubjectAccessReview.
User is optional. If both User and Groups are empty, the current authenticated user is used.
:param user: The user of this V1LocalSubjectAccessReview.
:type: str
"""
self._user = user
@property
def groups(self):
"""
Gets the groups of this V1LocalSubjectAccessReview.
Groups is optional. Groups is the list of groups to which the User belongs.
:return: The groups of this V1LocalSubjectAccessReview.
:rtype: list[str]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""
Sets the groups of this V1LocalSubjectAccessReview.
Groups is optional. Groups is the list of groups to which the User belongs.
:param groups: The groups of this V1LocalSubjectAccessReview.
:type: list[str]
"""
self._groups = groups
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1LocalSubjectAccessReview.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
alirizakeles/zato | code/zato-web-admin/src/zato/admin/web/templatetags/extras.py | 1 | 1103 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from django import template
register = template.Library()
# Taken from https://djangosnippets.org/snippets/38/ and slightly updated
@register.filter
def bunchget(obj, args):
""" Try to get an attribute from an object.
Example: {% if block|getattr:"editable,True" %}
Beware that the default is always a string, if you want this
to return False, pass an empty second argument:
{% if block|getattr:"editable," %}
"""
args = str(args).split(',')
if len(args) == 1:
(attribute, default) = [args[0], '']
else:
(attribute, default) = args
if attribute in obj:
return obj[attribute]
return default
# Taken from https://stackoverflow.com/a/16609498
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
| gpl-3.0 |
jimsize/PySolFC | pysollib/games/unionsquare.py | 1 | 7698 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.mfxutil import kwdefault
from pysollib.game import Game
import pysollib.game
from pysollib.layout import Layout
from pysollib.hint import CautiousDefaultHint
from pysollib.util import ACE, ANY_RANK, NO_RANK
from pysollib.stack import \
AbstractFoundationStack, \
OpenStack, \
Stack, \
UD_SS_RowStack, \
WasteStack, \
WasteTalonStack, \
StackWrapper
# ************************************************************************
# *
# ************************************************************************
class UnionSquare_Foundation(AbstractFoundationStack):
def acceptsCards(self, from_stack, cards):
if not AbstractFoundationStack.acceptsCards(self, from_stack, cards):
return False
# check the rank
if len(self.cards) > 12:
return cards[0].rank == 25 - len(self.cards)
else:
return cards[0].rank == len(self.cards)
class UnionSquare_RowStack(OpenStack):
def __init__(self, x, y, game, **cap):
kwdefault(cap, mod=8192, dir=0, base_rank=ANY_RANK,
max_accept=1, max_move=1)
OpenStack.__init__(self, x, y, game, **cap)
# self.CARD_YOFFSET = 1
def acceptsCards(self, from_stack, cards):
if not OpenStack.acceptsCards(self, from_stack, cards):
return False
if not self.cards:
return True
if cards[0].suit != self.cards[0].suit:
return False
if len(self.cards) == 1:
card_dir = cards[0].rank - self.cards[-1].rank
return card_dir == 1 or card_dir == -1
else:
stack_dir = (self.cards[1].rank - self.cards[0].rank) % \
self.cap.mod
return (self.cards[-1].rank + stack_dir) % \
self.cap.mod == cards[0].rank
getBottomImage = Stack._getReserveBottomImage
# ************************************************************************
# *
# ************************************************************************
class UnionSquare(pysollib.game.StartDealRowAndCards, Game):
Hint_Class = CautiousDefaultHint
Foundation_Class = StackWrapper(UnionSquare_Foundation, max_cards=26)
RowStack_Class = UnionSquare_RowStack
#
# game layout
#
def createGame(self, rows=16):
# create layout
l, s = Layout(self, card_y_space=20), self.s
# set window
self.setSize(l.XM + (5+rows//4)*l.XS, l.YM + 4*l.YS)
# create stacks
x, y, = l.XM, l.YM
s.talon = WasteTalonStack(x, y, self, max_rounds=1)
l.createText(s.talon, "s")
x = x + l.XS
s.waste = WasteStack(x, y, self)
l.createText(s.waste, "s")
for i in range(4):
x = 3*l.XS
for j in range(rows//4):
stack = self.RowStack_Class(x, y, self)
stack.CARD_XOFFSET, stack.CARD_YOFFSET = 0, 1
s.rows.append(stack)
x = x + l.XS
y = y + l.YS
x, y = self.width-l.XS, l.YM
for i in range(4):
stack = self.Foundation_Class(x, y, self, suit=i,
max_move=0, dir=0)
l.createText(stack, "sw")
s.foundations.append(stack)
y = y + l.YS
# define stack-groups
l.defaultStackGroups()
#
# game overrides
#
shallHighlightMatch = Game._shallHighlightMatch_SS
def getHighlightPilesStacks(self):
return ()
# ************************************************************************
# * Solid Square
# ************************************************************************
class SolidSquare(UnionSquare):
RowStack_Class = StackWrapper(UD_SS_RowStack, base_rank=NO_RANK,
max_accept=1, max_move=1, mod=13)
def createGame(self):
UnionSquare.createGame(self, rows=20)
def _shuffleHook(self, cards):
return self._shuffleHookMoveToTop(
cards,
lambda c: (c.rank == ACE and c.deck == 0, c.suit))
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
UnionSquare.startGame(self)
def fillStack(self, stack):
if stack in self.s.rows and not stack.cards:
old_state = self.enterState(self.S_FILL)
if not self.s.waste.cards:
self.s.talon.dealCards()
if self.s.waste.cards:
self.s.waste.moveMove(1, stack)
self.leaveState(old_state)
shallHighlightMatch = Game._shallHighlightMatch_SSW
# ************************************************************************
# * Boomerang
# ************************************************************************
class Boomerang_Foundation(AbstractFoundationStack):
def acceptsCards(self, from_stack, cards):
if not AbstractFoundationStack.acceptsCards(self, from_stack, cards):
return False
# check the rank
# 7, 8, 9, 10, J, Q, K, A, K, Q, J, 10, 9, 8, 7, A
if len(self.cards) < 7:
return cards[0].rank - 6 == len(self.cards)
elif len(self.cards) == 7:
return cards[0].rank == ACE
elif len(self.cards) < 15:
return cards[0].rank == 20 - len(self.cards)
else: # len(self.cards) == 15
return cards[0].rank == ACE
class Boomerang(UnionSquare):
Foundation_Class = StackWrapper(Boomerang_Foundation,
base_rank=6, max_cards=16)
RowStack_Class = StackWrapper(UnionSquare_RowStack, base_rank=NO_RANK)
def createGame(self):
UnionSquare.createGame(self, rows=12)
def fillStack(self, stack):
if stack in self.s.rows and not stack.cards:
old_state = self.enterState(self.S_FILL)
if not self.s.waste.cards:
self.s.talon.dealCards()
if self.s.waste.cards:
self.s.waste.moveMove(1, stack)
self.leaveState(old_state)
# register the game
registerGame(GameInfo(35, UnionSquare, "Union Square",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_MOSTLY_SKILL,
altnames=('British Square',),
))
registerGame(GameInfo(439, SolidSquare, "Solid Square",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(738, Boomerang, "Boomerang",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_BALANCED,
ranks=(0, 6, 7, 8, 9, 10, 11, 12),
))
| gpl-3.0 |
bbuckingham/katello | cli/test/katello/tests/core/user/user_report_test.py | 3 | 1074 | import unittest
import os
from katello.tests.core.action_test_utils import CLIOptionTestCase, CLIActionTestCase
import katello.client.core.user
from katello.client.core.user import Report
from katello.client.core.utils import convert_to_mime_type
class UserReportTest(CLIActionTestCase):
def setUp(self):
self.set_action(Report())
self.set_module(katello.client.core.user)
self.mock(self.action.api, 'report', ('', ''))
self.mock(self.module, 'save_report')
def tearDown(self):
self.restore_mocks()
def test_it_calls_report_api_with_default_format(self):
self.run_action()
self.action.api.report.assert_called_once_with('text/plain')
def test_it_uses_format_parameter(self):
self.mock_options({'format': 'pdf'})
self.run_action()
self.action.api.report.assert_called_once_with(convert_to_mime_type('pdf'))
def test_it_saves_pdf_report(self):
self.mock_options({'format': 'pdf'})
self.run_action()
self.module.save_report.assert_called_once()
| gpl-2.0 |
pombredanne/bokeh | examples/plotting/file/line_select.py | 7 | 1295 | """ Example demonstrating the picking of line objects.
"""
import numpy as np
from bokeh.models import TapTool, CustomJS, ColumnDataSource
from bokeh.plotting import output_file, show, figure
# The data is setup to have very different scales in x and y, to verify
# that picking happens in pixels. Different widths are used to test that
# you can click anywhere on the visible line.
#
# Note that the get_view() function used here is not documented and
# might change in future versions of Bokeh.
t = np.linspace(0, 0.1, 100)
code = """
d0 = cb_obj.get("selected")["0d"];
if (d0.glyph) {
var color = d0.get_view().visuals.line.line_color.value();
var data = source.get('data');
data['text'] = ['Selected the ' + color + ' line'];
source.trigger('change');
}
"""
# use a source to easily update the text of the text-glyph
source = ColumnDataSource(data=dict(text=['no line selected']))
p = figure()
l1 = p.line(t, 100*np.sin(t*50), color='red', line_width=25)
l2 = p.line(t, 100*np.sin(t*50+1), color='green', line_width=5)
l3 = p.line(t, 100*np.sin(t*50+2), color='blue', line_width=1)
p.text(0, -100, source=source)
p.add_tools(TapTool(callback=CustomJS(code=code, args=dict(source=source))))
output_file("line_select.html", title="line_select.py example")
show(p)
| bsd-3-clause |
kornicameister/ansible-modules-extras | cloud/misc/ovirt.py | 12 | 17729 | #!/usr/bin/python
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ovirt
author: "Vincent Van der Kussen (@vincentvdk)"
short_description: oVirt/RHEV platform management
description:
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
version_added: "1.4"
options:
user:
description:
- the user to authenticate with
default: null
required: true
aliases: []
url:
description:
- the url of the oVirt instance
default: null
required: true
aliases: []
instance_name:
description:
- the name of the instance to use
default: null
required: true
aliases: [ vmname ]
password:
description:
- password of the user to authenticate with
default: null
required: true
aliases: []
image:
description:
- template to use for the instance
default: null
required: false
aliases: []
resource_type:
description:
- whether you want to deploy an image or create an instance from scratch.
default: null
required: false
aliases: []
choices: [ 'new', 'template' ]
zone:
description:
- deploy the image to this oVirt cluster
default: null
required: false
aliases: []
instance_disksize:
description:
- size of the instance's disk in GB
default: null
required: false
aliases: [ vm_disksize]
instance_cpus:
description:
- the instance's number of cpu's
default: 1
required: false
aliases: [ vmcpus ]
instance_nic:
description:
- name of the network interface in oVirt/RHEV
default: null
required: false
aliases: [ vmnic ]
instance_network:
description:
- the logical network the machine should belong to
default: rhevm
required: false
aliases: [ vmnetwork ]
instance_mem:
description:
- the instance's amount of memory in MB
default: null
required: false
aliases: [ vmmem ]
instance_type:
description:
- define if the instance is a server or desktop
default: server
required: false
aliases: [ vmtype ]
choices: [ 'server', 'desktop' ]
disk_alloc:
description:
- define if disk is thin or preallocated
default: thin
required: false
aliases: []
choices: [ 'thin', 'preallocated' ]
disk_int:
description:
- interface type of the disk
default: virtio
required: false
aliases: []
choices: [ 'virtio', 'ide' ]
instance_os:
description:
- type of Operating System
default: null
required: false
aliases: [ vmos ]
instance_cores:
description:
- define the instance's number of cores
default: 1
required: false
aliases: [ vmcores ]
sdomain:
description:
- the Storage Domain where you want to create the instance's disk on.
default: null
required: false
aliases: []
region:
description:
- the oVirt/RHEV datacenter where you want to deploy to
default: null
required: false
aliases: []
instance_dns:
description:
- define the instance's Primary DNS server
required: false
aliases: [ dns ]
version_added: "2.1"
instance_domain:
description:
- define the instance's Domain
required: false
aliases: [ domain ]
version_added: "2.1"
instance_hostname:
description:
- define the instance's Hostname
required: false
aliases: [ hostname ]
version_added: "2.1"
instance_ip:
description:
- define the instance's IP
required: false
aliases: [ ip ]
version_added: "2.1"
instance_netmask:
description:
- define the instance's Netmask
required: false
aliases: [ netmask ]
version_added: "2.1"
instance_rootpw:
description:
- define the instance's Root password
required: false
aliases: [ rootpw ]
version_added: "2.1"
instance_key:
description:
- define the instance's Authorized key
required: false
aliases: [ key ]
version_added: "2.1"
state:
description:
- create, terminate or remove instances
default: 'present'
required: false
aliases: []
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
requirements:
- "python >= 2.6"
- "ovirt-engine-sdk-python"
'''
EXAMPLES = '''
# Basic example provisioning from image.
ovirt:
user: admin@internal
url: https://ovirt.example.com
instance_name: ansiblevm04
password: secret
image: centos_64
zone: cluster01
resource_type: template"
# Full example to create new instance from scratch
ovirt:
instance_name: testansible
resource_type: new
instance_type: server
user: admin@internal
password: secret
url: https://ovirt.example.com
instance_disksize: 10
zone: cluster01
region: datacenter1
instance_cpus: 1
instance_nic: nic1
instance_network: rhevm
instance_mem: 1000
disk_alloc: thin
sdomain: FIBER01
instance_cores: 1
instance_os: rhel_6x64
disk_int: virtio"
# stopping an instance
ovirt:
instance_name: testansible
state: stopped
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance with cloud init information
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
hostname: testansible
domain: ansible.local
ip: 192.0.2.100
netmask: 255.255.255.0
gateway: 192.0.2.1
rootpw: bigsecret
'''
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
HAS_OVIRTSDK = True
except ImportError:
HAS_OVIRTSDK = False
# ------------------------------------------------------------------- #
# create connection with API
#
def conn(url, user, password):
api = API(url=url, username=user, password=password, insecure=True)
try:
value = api.test()
except:
raise Exception("error connecting to the oVirt API")
return api
# ------------------------------------------------------------------- #
# Create VM from scratch
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try:
conn.vms.add(vmparams)
except:
raise Exception("Error creating VM with specified parameters")
vm = conn.vms.get(name=vmname)
try:
vm.disks.add(vmdisk)
except:
raise Exception("Error attaching disk")
try:
vm.nics.add(nic_net1)
except:
raise Exception("Error adding nic")
# create an instance from a template
def create_vm_template(conn, vmname, image, zone):
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
try:
conn.vms.add(vmparams)
except:
raise Exception('error adding template %s' % image)
# start instance
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
domain=None, dns=None, rootpw=None, key=None):
vm = conn.vms.get(name=vmname)
use_cloud_init = False
nics = None
nic = None
if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
use_cloud_init = True
if ip and netmask and gateway:
ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
nics = params.Nics()
nics = params.GuestNicsConfiguration(nic_configuration=[nic])
initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
root_password=rootpw, nic_configurations=nics, dns_servers=dns,
authorized_ssh_keys=key)
action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
vm.start(action=action)
# Stop instance
def vm_stop(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.stop()
# restart instance
def vm_restart(conn, vmname):
state = vm_status(conn, vmname)
vm = conn.vms.get(name=vmname)
vm.stop()
while conn.vms.get(vmname).get_status().get_state() != 'down':
time.sleep(5)
vm.start()
# remove an instance
def vm_remove(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.delete()
# ------------------------------------------------------------------- #
# VM statuses
#
# Get the VMs status
def vm_status(conn, vmname):
status = conn.vms.get(name=vmname).status.state
return status
# Get VM object and return it's name if object exists
def get_vm(conn, vmname):
vm = conn.vms.get(name=vmname)
if vm == None:
name = "empty"
else:
name = vm.get_name()
return name
# ------------------------------------------------------------------- #
# Hypervisor operations
#
# not available yet
# ------------------------------------------------------------------- #
# Main
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
#name = dict(required=True),
user = dict(required=True),
url = dict(required=True),
instance_name = dict(required=True, aliases=['vmname']),
password = dict(required=True, no_log=True),
image = dict(),
resource_type = dict(choices=['new', 'template']),
zone = dict(),
instance_disksize = dict(aliases=['vm_disksize']),
instance_cpus = dict(default=1, aliases=['vmcpus']),
instance_nic = dict(aliases=['vmnic']),
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
instance_mem = dict(aliases=['vmmem']),
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
instance_os = dict(aliases=['vmos']),
instance_cores = dict(default=1, aliases=['vmcores']),
instance_hostname = dict(aliases=['hostname']),
instance_ip = dict(aliases=['ip']),
instance_netmask = dict(aliases=['netmask']),
instance_gateway = dict(aliases=['gateway']),
instance_domain = dict(aliases=['domain']),
instance_dns = dict(aliases=['dns']),
instance_rootpw = dict(aliases=['rootpw']),
instance_key = dict(aliases=['key']),
sdomain = dict(),
region = dict(),
)
)
if not HAS_OVIRTSDK:
module.fail_json(msg='ovirtsdk required for this module')
state = module.params['state']
user = module.params['user']
url = module.params['url']
vmname = module.params['instance_name']
password = module.params['password']
image = module.params['image'] # name of the image to deploy
resource_type = module.params['resource_type'] # template or from scratch
zone = module.params['zone'] # oVirt cluster
vmdisk_size = module.params['instance_disksize'] # disksize
vmcpus = module.params['instance_cpus'] # number of cpu
vmnic = module.params['instance_nic'] # network interface
vmnetwork = module.params['instance_network'] # logical network
vmmem = module.params['instance_mem'] # mem size
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
vmos = module.params['instance_os'] # Operating System
vmtype = module.params['instance_type'] # server or desktop
vmcores = module.params['instance_cores'] # number of cores
sdomain = module.params['sdomain'] # storage domain to store disk on
region = module.params['region'] # oVirt Datacenter
hostname = module.params['instance_hostname']
ip = module.params['instance_ip']
netmask = module.params['instance_netmask']
gateway = module.params['instance_gateway']
domain = module.params['instance_domain']
dns = module.params['instance_dns']
rootpw = module.params['instance_rootpw']
key = module.params['instance_key']
#initialize connection
try:
c = conn(url+"/api", user, password)
except Exception, e:
module.fail_json(msg='%s' % e)
if state == 'present':
if get_vm(c, vmname) == "empty":
if resource_type == 'template':
try:
create_vm_template(c, vmname, image, zone)
except Exception, e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
elif resource_type == 'new':
# FIXME: refactor, use keyword args.
try:
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
except Exception, e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
else:
module.exit_json(changed=False, msg="You did not specify a resource type")
else:
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
if state == 'started':
if vm_status(c, vmname) == 'up':
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
else:
#vm_start(c, vmname)
vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
module.exit_json(changed=True, msg="VM %s started" % vmname)
if state == 'shutdown':
if vm_status(c, vmname) == 'down':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
else:
vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart':
if vm_status(c, vmname) == 'up':
vm_restart(c, vmname)
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
else:
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
if state == 'absent':
if get_vm(c, vmname) == "empty":
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
else:
vm_remove(c, vmname)
module.exit_json(changed=True, msg="VM %s removed" % vmname)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
cg31/glmark2 | waflib/Tools/c_config.py | 11 | 21756 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
import os,imp,sys,re,shlex,shutil
from waflib import Build,Utils,Configure,Task,Options,Logs,TaskGen,Errors,ConfigSet,Runner
from waflib.TaskGen import before_method,after_method,feature
from waflib.Configure import conf
WAF_CONFIG_H='config.h'
DEFKEYS='define_key'
INCKEYS='include_key'
cfg_ver={'atleast-version':'>=','exact-version':'==','max-version':'<=',}
SNIP_FUNCTION='''
int main() {
void *p;
p=(void*)(%s);
return 0;
}
'''
SNIP_TYPE='''
int main() {
if ((%(type_name)s *) 0) return 0;
if (sizeof (%(type_name)s)) return 0;
}
'''
SNIP_CLASS='''
int main() {
if (
}
'''
SNIP_EMPTY_PROGRAM='''
int main() {
return 0;
}
'''
SNIP_FIELD='''
int main() {
char *off;
off = (char*) &((%(type_name)s*)0)->%(field_name)s;
return (size_t) off < sizeof(%(type_name)s);
}
'''
MACRO_TO_DESTOS={'__linux__':'linux','__GNU__':'gnu','__FreeBSD__':'freebsd','__NetBSD__':'netbsd','__OpenBSD__':'openbsd','__sun':'sunos','__hpux':'hpux','__sgi':'irix','_AIX':'aix','__CYGWIN__':'cygwin','__MSYS__':'msys','_UWIN':'uwin','_WIN64':'win32','_WIN32':'win32','__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__':'darwin','__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__':'darwin','__QNX__':'qnx','__native_client__':'nacl'}
MACRO_TO_DEST_CPU={'__x86_64__':'x86_64','__i386__':'x86','__ia64__':'ia','__mips__':'mips','__sparc__':'sparc','__alpha__':'alpha','__arm__':'arm','__hppa__':'hppa','__powerpc__':'powerpc',}
def parse_flags(self,line,uselib,env=None,force_static=False):
assert(isinstance(line,str))
env=env or self.env
app=env.append_value
appu=env.append_unique
lex=shlex.shlex(line,posix=False)
lex.whitespace_split=True
lex.commenters=''
lst=list(lex)
while lst:
x=lst.pop(0)
st=x[:2]
ot=x[2:]
if st=='-I'or st=='/I':
if not ot:ot=lst.pop(0)
appu('INCLUDES_'+uselib,[ot])
elif st=='-include':
tmp=[x,lst.pop(0)]
app('CFLAGS',tmp)
app('CXXFLAGS',tmp)
elif st=='-D'or(self.env.CXX_NAME=='msvc'and st=='/D'):
if not ot:ot=lst.pop(0)
app('DEFINES_'+uselib,[ot])
elif st=='-l':
if not ot:ot=lst.pop(0)
prefix=force_static and'STLIB_'or'LIB_'
appu(prefix+uselib,[ot])
elif st=='-L':
if not ot:ot=lst.pop(0)
appu('LIBPATH_'+uselib,[ot])
elif x=='-pthread'or x.startswith('+')or x.startswith('-std'):
app('CFLAGS_'+uselib,[x])
app('CXXFLAGS_'+uselib,[x])
app('LINKFLAGS_'+uselib,[x])
elif x=='-framework':
appu('FRAMEWORK_'+uselib,[lst.pop(0)])
elif x.startswith('-F'):
appu('FRAMEWORKPATH_'+uselib,[x[2:]])
elif x.startswith('-Wl'):
app('LINKFLAGS_'+uselib,[x])
elif x.startswith('-m')or x.startswith('-f')or x.startswith('-dynamic'):
app('CFLAGS_'+uselib,[x])
app('CXXFLAGS_'+uselib,[x])
elif x.startswith('-bundle'):
app('LINKFLAGS_'+uselib,[x])
elif x.startswith('-undefined'):
arg=lst.pop(0)
app('LINKFLAGS_'+uselib,[x,arg])
elif x.startswith('-arch')or x.startswith('-isysroot'):
tmp=[x,lst.pop(0)]
app('CFLAGS_'+uselib,tmp)
app('CXXFLAGS_'+uselib,tmp)
app('LINKFLAGS_'+uselib,tmp)
elif x.endswith('.a')or x.endswith('.so')or x.endswith('.dylib'):
appu('LINKFLAGS_'+uselib,[x])
def ret_msg(self,f,kw):
if isinstance(f,str):
return f
return f(kw)
def validate_cfg(self,kw):
if not'path'in kw:
if not self.env.PKGCONFIG:
self.find_program('pkg-config',var='PKGCONFIG')
kw['path']=self.env.PKGCONFIG
if'atleast_pkgconfig_version'in kw:
if not'msg'in kw:
kw['msg']='Checking for pkg-config version >= %r'%kw['atleast_pkgconfig_version']
return
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'errmsg'in kw:
kw['errmsg']='not found'
if'modversion'in kw:
if not'msg'in kw:
kw['msg']='Checking for %r version'%kw['modversion']
return
for x in cfg_ver.keys():
y=x.replace('-','_')
if y in kw:
if not'package'in kw:
raise ValueError('%s requires a package'%x)
if not'msg'in kw:
kw['msg']='Checking for %r %s %s'%(kw['package'],cfg_ver[x],kw[y])
return
if not'msg'in kw:
kw['msg']='Checking for %r'%(kw['package']or kw['path'])
def exec_cfg(self,kw):
if'atleast_pkgconfig_version'in kw:
cmd=[kw['path'],'--atleast-pkgconfig-version=%s'%kw['atleast_pkgconfig_version']]
self.cmd_and_log(cmd)
if not'okmsg'in kw:
kw['okmsg']='yes'
return
for x in cfg_ver:
y=x.replace('-','_')
if y in kw:
self.cmd_and_log([kw['path'],'--%s=%s'%(x,kw[y]),kw['package']])
if not'okmsg'in kw:
kw['okmsg']='yes'
self.define(self.have_define(kw.get('uselib_store',kw['package'])),1,0)
break
if'modversion'in kw:
version=self.cmd_and_log([kw['path'],'--modversion',kw['modversion']]).strip()
self.define('%s_VERSION'%Utils.quote_define_name(kw.get('uselib_store',kw['modversion'])),version)
return version
lst=[kw['path']]
defi=kw.get('define_variable',None)
if not defi:
defi=self.env.PKG_CONFIG_DEFINES or{}
for key,val in defi.items():
lst.append('--define-variable=%s=%s'%(key,val))
if kw['package']:
lst.extend(Utils.to_list(kw['package']))
if'variables'in kw:
env=kw.get('env',self.env)
uselib=kw.get('uselib_store',kw['package'].upper())
vars=Utils.to_list(kw['variables'])
for v in vars:
val=self.cmd_and_log(lst+['--variable='+v]).strip()
var='%s_%s'%(uselib,v)
env[var]=val
if not'okmsg'in kw:
kw['okmsg']='yes'
return
static=False
if'args'in kw:
args=Utils.to_list(kw['args'])
if'--static'in args or'--static-libs'in args:
static=True
lst+=args
ret=self.cmd_and_log(lst)
if not'okmsg'in kw:
kw['okmsg']='yes'
self.define(self.have_define(kw.get('uselib_store',kw['package'])),1,0)
self.parse_flags(ret,kw.get('uselib_store',kw['package'].upper()),kw.get('env',self.env),force_static=static)
return ret
def check_cfg(self,*k,**kw):
if k:
lst=k[0].split()
kw['package']=lst[0]
kw['args']=' '.join(lst[1:])
self.validate_cfg(kw)
if'msg'in kw:
self.start_msg(kw['msg'])
ret=None
try:
ret=self.exec_cfg(kw)
except self.errors.WafError ,e:
if'errmsg'in kw:
self.end_msg(kw['errmsg'],'YELLOW')
if Logs.verbose>1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success']=ret
if'okmsg'in kw:
self.end_msg(self.ret_msg(kw['okmsg'],kw))
return ret
def validate_c(self,kw):
if not'env'in kw:
kw['env']=self.env.derive()
env=kw['env']
if not'compiler'in kw and not'features'in kw:
kw['compiler']='c'
if env['CXX_NAME']and Task.classes.get('cxx',None):
kw['compiler']='cxx'
if not self.env['CXX']:
self.fatal('a c++ compiler is required')
else:
if not self.env['CC']:
self.fatal('a c compiler is required')
if not'compile_mode'in kw:
kw['compile_mode']='c'
if'cxx'in Utils.to_list(kw.get('features',[]))or kw.get('compiler','')=='cxx':
kw['compile_mode']='cxx'
if not'type'in kw:
kw['type']='cprogram'
if not'features'in kw:
kw['features']=[kw['compile_mode'],kw['type']]
else:
kw['features']=Utils.to_list(kw['features'])
if not'compile_filename'in kw:
kw['compile_filename']='test.c'+((kw['compile_mode']=='cxx')and'pp'or'')
def to_header(dct):
if'header_name'in dct:
dct=Utils.to_list(dct['header_name'])
return''.join(['#include <%s>\n'%x for x in dct])
return''
if'framework_name'in kw:
fwkname=kw['framework_name']
if not'uselib_store'in kw:
kw['uselib_store']=fwkname.upper()
if not kw.get('no_header',False):
if not'header_name'in kw:
kw['header_name']=[]
fwk='%s/%s.h'%(fwkname,fwkname)
if kw.get('remove_dot_h',None):
fwk=fwk[:-2]
kw['header_name']=Utils.to_list(kw['header_name'])+[fwk]
kw['msg']='Checking for framework %s'%fwkname
kw['framework']=fwkname
if'function_name'in kw:
fu=kw['function_name']
if not'msg'in kw:
kw['msg']='Checking for function %s'%fu
kw['code']=to_header(kw)+SNIP_FUNCTION%fu
if not'uselib_store'in kw:
kw['uselib_store']=fu.upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(fu)
elif'type_name'in kw:
tu=kw['type_name']
if not'header_name'in kw:
kw['header_name']='stdint.h'
if'field_name'in kw:
field=kw['field_name']
kw['code']=to_header(kw)+SNIP_FIELD%{'type_name':tu,'field_name':field}
if not'msg'in kw:
kw['msg']='Checking for field %s in %s'%(field,tu)
if not'define_name'in kw:
kw['define_name']=self.have_define((tu+'_'+field).upper())
else:
kw['code']=to_header(kw)+SNIP_TYPE%{'type_name':tu}
if not'msg'in kw:
kw['msg']='Checking for type %s'%tu
if not'define_name'in kw:
kw['define_name']=self.have_define(tu.upper())
elif'header_name'in kw:
if not'msg'in kw:
kw['msg']='Checking for header %s'%kw['header_name']
l=Utils.to_list(kw['header_name'])
assert len(l)>0,'list of headers in header_name is empty'
kw['code']=to_header(kw)+SNIP_EMPTY_PROGRAM
if not'uselib_store'in kw:
kw['uselib_store']=l[0].upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(l[0])
if'lib'in kw:
if not'msg'in kw:
kw['msg']='Checking for library %s'%kw['lib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['lib'].upper()
if'stlib'in kw:
if not'msg'in kw:
kw['msg']='Checking for static library %s'%kw['stlib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['stlib'].upper()
if'fragment'in kw:
kw['code']=kw['fragment']
if not'msg'in kw:
kw['msg']='Checking for code snippet'
if not'errmsg'in kw:
kw['errmsg']='no'
for(flagsname,flagstype)in[('cxxflags','compiler'),('cflags','compiler'),('linkflags','linker')]:
if flagsname in kw:
if not'msg'in kw:
kw['msg']='Checking for %s flags %s'%(flagstype,kw[flagsname])
if not'errmsg'in kw:
kw['errmsg']='no'
if not'execute'in kw:
kw['execute']=False
if kw['execute']:
kw['features'].append('test_exec')
if not'errmsg'in kw:
kw['errmsg']='not found'
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'code'in kw:
kw['code']=SNIP_EMPTY_PROGRAM
if self.env[INCKEYS]:
kw['code']='\n'.join(['#include <%s>'%x for x in self.env[INCKEYS]])+'\n'+kw['code']
if not kw.get('success'):kw['success']=None
if'define_name'in kw:
self.undefine(kw['define_name'])
assert'msg'in kw,'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
def post_check(self,*k,**kw):
is_success=0
if kw['execute']:
if kw['success']is not None:
if kw.get('define_ret',False):
is_success=kw['success']
else:
is_success=(kw['success']==0)
else:
is_success=(kw['success']==0)
if'define_name'in kw:
if'header_name'in kw or'function_name'in kw or'type_name'in kw or'fragment'in kw:
nm=kw['define_name']
if kw['execute']and kw.get('define_ret',None)and isinstance(is_success,str):
self.define(kw['define_name'],is_success,quote=kw.get('quote',1))
else:
self.define_cond(kw['define_name'],is_success)
else:
self.define_cond(kw['define_name'],is_success)
if'header_name'in kw:
if kw.get('auto_add_header_name',False):
self.env.append_value(INCKEYS,Utils.to_list(kw['header_name']))
if is_success and'uselib_store'in kw:
from waflib.Tools import ccroot
_vars=set([])
for x in kw['features']:
if x in ccroot.USELIB_VARS:
_vars|=ccroot.USELIB_VARS[x]
for k in _vars:
lk=k.lower()
if k=='INCLUDES':lk='includes'
if k=='DEFINES':lk='defines'
if lk in kw:
val=kw[lk]
if isinstance(val,str):
val=val.rstrip(os.path.sep)
self.env.append_unique(k+'_'+kw['uselib_store'],val)
return is_success
def check(self,*k,**kw):
self.validate_c(kw)
self.start_msg(kw['msg'])
ret=None
try:
ret=self.run_c_code(*k,**kw)
except self.errors.ConfigurationError ,e:
self.end_msg(kw['errmsg'],'YELLOW')
if Logs.verbose>1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success']=ret
self.end_msg(self.ret_msg(kw['okmsg'],kw))
ret=self.post_check(*k,**kw)
if not ret:
self.fatal('The configuration failed %r'%ret)
return ret
class test_exec(Task.Task):
color='PINK'
def run(self):
if getattr(self.generator,'rpath',None):
if getattr(self.generator,'define_ret',False):
self.generator.bld.retval=self.generator.bld.cmd_and_log([self.inputs[0].abspath()])
else:
self.generator.bld.retval=self.generator.bld.exec_command([self.inputs[0].abspath()])
else:
env=self.env.env or{}
env.update(dict(os.environ))
for var in('LD_LIBRARY_PATH','DYLD_LIBRARY_PATH','PATH'):
env[var]=self.inputs[0].parent.abspath()+os.path.pathsep+env.get(var,'')
if getattr(self.generator,'define_ret',False):
self.generator.bld.retval=self.generator.bld.cmd_and_log([self.inputs[0].abspath()],env=env)
else:
self.generator.bld.retval=self.generator.bld.exec_command([self.inputs[0].abspath()],env=env)
def test_exec_fun(self):
self.create_task('test_exec',self.link_task.outputs[0])
CACHE_RESULTS=1
COMPILE_ERRORS=2
def run_c_code(self,*k,**kw):
lst=[str(v)for(p,v)in kw.items()if p!='env']
h=Utils.h_list(lst)
dir=self.bldnode.abspath()+os.sep+(not Utils.is_win32 and'.'or'')+'conf_check_'+Utils.to_hex(h)
try:
os.makedirs(dir)
except:
pass
try:
os.stat(dir)
except:
self.fatal('cannot use the configuration test folder %r'%dir)
cachemode=getattr(Options.options,'confcache',None)
if cachemode==CACHE_RESULTS:
try:
proj=ConfigSet.ConfigSet(os.path.join(dir,'cache_run_c_code'))
ret=proj['cache_run_c_code']
except:
pass
else:
if isinstance(ret,str)and ret.startswith('Test does not build'):
self.fatal(ret)
return ret
bdir=os.path.join(dir,'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
self.test_bld=bld=Build.BuildContext(top_dir=dir,out_dir=bdir)
bld.init_dirs()
bld.progress_bar=0
bld.targets='*'
if kw['compile_filename']:
node=bld.srcnode.make_node(kw['compile_filename'])
node.write(kw['code'])
bld.logger=self.logger
bld.all_envs.update(self.all_envs)
bld.env=kw['env']
o=bld(features=kw['features'],source=kw['compile_filename'],target='testprog')
for k,v in kw.items():
setattr(o,k,v)
self.to_log("==>\n%s\n<=="%kw['code'])
bld.targets='*'
ret=-1
try:
try:
bld.compile()
except Errors.WafError:
ret='Test does not build: %s'%Utils.ex_stack()
self.fatal(ret)
else:
ret=getattr(bld,'retval',0)
finally:
proj=ConfigSet.ConfigSet()
proj['cache_run_c_code']=ret
proj.store(os.path.join(dir,'cache_run_c_code'))
return ret
def check_cxx(self,*k,**kw):
kw['compiler']='cxx'
return self.check(*k,**kw)
def check_cc(self,*k,**kw):
kw['compiler']='c'
return self.check(*k,**kw)
def define(self,key,val,quote=True):
assert key and isinstance(key,str)
if isinstance(val,int)or isinstance(val,float):
s='%s=%s'
else:
s=quote and'%s="%s"'or'%s=%s'
app=s%(key,str(val))
ban=key+'='
lst=self.env['DEFINES']
for x in lst:
if x.startswith(ban):
lst[lst.index(x)]=app
break
else:
self.env.append_value('DEFINES',app)
self.env.append_unique(DEFKEYS,key)
def undefine(self,key):
assert key and isinstance(key,str)
ban=key+'='
lst=[x for x in self.env['DEFINES']if not x.startswith(ban)]
self.env['DEFINES']=lst
self.env.append_unique(DEFKEYS,key)
def define_cond(self,key,val):
assert key and isinstance(key,str)
if val:
self.define(key,1)
else:
self.undefine(key)
def is_defined(self,key):
assert key and isinstance(key,str)
ban=key+'='
for x in self.env['DEFINES']:
if x.startswith(ban):
return True
return False
def get_define(self,key):
assert key and isinstance(key,str)
ban=key+'='
for x in self.env['DEFINES']:
if x.startswith(ban):
return x[len(ban):]
return None
def have_define(self,key):
return self.__dict__.get('HAVE_PAT','HAVE_%s')%Utils.quote_define_name(key)
def write_config_header(self,configfile='',guard='',top=False,env=None,defines=True,headers=False,remove=True):
if not configfile:configfile=WAF_CONFIG_H
waf_guard=guard or'_%s_WAF'%Utils.quote_define_name(configfile)
node=top and self.bldnode or self.path.get_bld()
node=node.make_node(configfile)
node.parent.mkdir()
lst=['/* WARNING! All changes made to this file will be lost! */\n']
lst.append('#ifndef %s\n#define %s\n'%(waf_guard,waf_guard))
lst.append(self.get_config_header(defines,headers))
lst.append('\n#endif /* %s */\n'%waf_guard)
node.write('\n'.join(lst))
env=env or self.env
env.append_unique(Build.CFG_FILES,[node.abspath()])
if remove:
for key in self.env[DEFKEYS]:
self.undefine(key)
self.env[DEFKEYS]=[]
def get_config_header(self,defines=True,headers=False):
lst=[]
if headers:
for x in self.env[INCKEYS]:
lst.append('#include <%s>'%x)
if defines:
for x in self.env[DEFKEYS]:
if self.is_defined(x):
val=self.get_define(x)
lst.append('#define %s %s'%(x,val))
else:
lst.append('/* #undef %s */'%x)
return"\n".join(lst)
def cc_add_flags(conf):
conf.add_os_flags('CPPFLAGS','CFLAGS')
conf.add_os_flags('CFLAGS')
def cxx_add_flags(conf):
conf.add_os_flags('CPPFLAGS','CXXFLAGS')
conf.add_os_flags('CXXFLAGS')
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS')
conf.add_os_flags('LDFLAGS','LINKFLAGS')
def cc_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS=Utils.unversioned_sys_platform()
conf.load('c')
def cxx_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS=Utils.unversioned_sys_platform()
conf.load('cxx')
def get_cc_version(conf,cc,gcc=False,icc=False):
cmd=cc+['-dM','-E','-']
env=conf.env.env or None
try:
p=Utils.subprocess.Popen(cmd,stdin=Utils.subprocess.PIPE,stdout=Utils.subprocess.PIPE,stderr=Utils.subprocess.PIPE,env=env)
p.stdin.write('\n')
out=p.communicate()[0]
except:
conf.fatal('Could not determine the compiler version %r'%cmd)
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding)
if gcc:
if out.find('__INTEL_COMPILER')>=0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__')<0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER')<0:
conf.fatal('Not icc/icpc')
k={}
if icc or gcc:
out=out.split('\n')
for line in out:
lst=shlex.split(line)
if len(lst)>2:
key=lst[1]
val=lst[2]
k[key]=val
def isD(var):
return var in k
def isT(var):
return var in k and k[var]!='0'
if not conf.env.DEST_OS:
conf.env.DEST_OS=''
for i in MACRO_TO_DESTOS:
if isD(i):
conf.env.DEST_OS=MACRO_TO_DESTOS[i]
break
else:
if isD('__APPLE__')and isD('__MACH__'):
conf.env.DEST_OS='darwin'
elif isD('__unix__'):
conf.env.DEST_OS='generic'
if isD('__ELF__'):
conf.env.DEST_BINFMT='elf'
elif isD('__WINNT__')or isD('__CYGWIN__'):
conf.env.DEST_BINFMT='pe'
conf.env.LIBDIR=conf.env['PREFIX']+'/bin'
elif isD('__APPLE__'):
conf.env.DEST_BINFMT='mac-o'
if not conf.env.DEST_BINFMT:
conf.env.DEST_BINFMT=Utils.destos_to_binfmt(conf.env.DEST_OS)
for i in MACRO_TO_DEST_CPU:
if isD(i):
conf.env.DEST_CPU=MACRO_TO_DEST_CPU[i]
break
Logs.debug('ccroot: dest platform: '+' '.join([conf.env[x]or'?'for x in('DEST_OS','DEST_BINFMT','DEST_CPU')]))
if icc:
ver=k['__INTEL_COMPILER']
conf.env['CC_VERSION']=(ver[:-2],ver[-2],ver[-1])
else:
conf.env['CC_VERSION']=(k['__GNUC__'],k['__GNUC_MINOR__'],k['__GNUC_PATCHLEVEL__'])
return k
def get_xlc_version(conf,cc):
version_re=re.compile(r"IBM XL C/C\+\+.*, V(?P<major>\d*)\.(?P<minor>\d*)",re.I).search
cmd=cc+['-qversion']
try:
out,err=conf.cmd_and_log(cmd,output=0)
except Errors.WafError:
conf.fatal('Could not find xlc %r'%cmd)
if out:match=version_re(out)
else:match=version_re(err)
if not match:
conf.fatal('Could not determine the XLC version.')
k=match.groupdict()
conf.env['CC_VERSION']=(k['major'],k['minor'])
def add_as_needed(self):
if self.env.DEST_BINFMT=='elf'and'gcc'in(self.env.CXX_NAME,self.env.CC_NAME):
self.env.append_unique('LINKFLAGS','--as-needed')
class cfgtask(Task.TaskBase):
def display(self):
return''
def runnable_status(self):
return Task.RUN_ME
def run(self):
conf=self.conf
bld=Build.BuildContext(top_dir=conf.srcnode.abspath(),out_dir=conf.bldnode.abspath())
bld.env=conf.env
bld.init_dirs()
bld.in_msg=1
bld.logger=self.logger
try:
bld.check(**self.args)
except:
return 1
def multicheck(self,*k,**kw):
self.start_msg(kw.get('msg','Executing %d configuration tests'%len(k)))
class par(object):
def __init__(self):
self.keep=False
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.returned_tasks=[]
def total(self):
return len(tasks)
def to_log(self,*k,**kw):
return
bld=par()
tasks=[]
for dct in k:
x=cfgtask(bld=bld)
tasks.append(x)
x.args=dct
x.bld=bld
x.conf=self
x.args=dct
x.logger=Logs.make_mem_logger(str(id(x)),self.logger)
def it():
yield tasks
while 1:
yield[]
p=Runner.Parallel(bld,Options.options.jobs)
p.biter=it()
p.start()
for x in tasks:
x.logger.memhandler.flush()
for x in tasks:
if x.hasrun!=Task.SUCCESS:
self.end_msg(kw.get('errmsg','no'),color='YELLOW')
self.fatal(kw.get('fatalmsg',None)or'One of the tests has failed, see the config.log for more information')
self.end_msg('ok')
conf(parse_flags)
conf(ret_msg)
conf(validate_cfg)
conf(exec_cfg)
conf(check_cfg)
conf(validate_c)
conf(post_check)
conf(check)
feature('test_exec')(test_exec_fun)
after_method('apply_link')(test_exec_fun)
conf(run_c_code)
conf(check_cxx)
conf(check_cc)
conf(define)
conf(undefine)
conf(define_cond)
conf(is_defined)
conf(get_define)
conf(have_define)
conf(write_config_header)
conf(get_config_header)
conf(cc_add_flags)
conf(cxx_add_flags)
conf(link_add_flags)
conf(cc_load_tools)
conf(cxx_load_tools)
conf(get_cc_version)
conf(get_xlc_version)
conf(add_as_needed)
conf(multicheck) | gpl-3.0 |
tcanabarro/dynamic-dynamodb | dynamic_dynamodb/log_handler.py | 9 | 3339 | # -*- coding: utf-8 -*-
"""
Logging management for Dynamic DynamoDB
APACHE LICENSE 2.0
Copyright 2013-2014 Sebastian Dahlgren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os.path
import sys
from logutils import dictconfig
import config_handler
LOG_CONFIG = {
'version': 1,
'disable_existing_LOGGERs': False,
'formatters': {
'standard': {
'format': (
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
},
'dry-run': {
'format': (
'%(asctime)s - %(name)s - %(levelname)s - dryrun - %(message)s'
)
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': True
},
'dynamic-dynamodb': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
}
}
}
if config_handler.get_logging_option('log_config_file'):
# Read configuration from an external Python logging file
logging.config.fileConfig(os.path.expanduser(
config_handler.get_logging_option('log_config_file')))
else:
# File handler
if config_handler.get_logging_option('log_file'):
log_file = os.path.expanduser(
config_handler.get_logging_option('log_file'))
LOG_CONFIG['handlers']['file'] = {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'standard',
'filename': log_file,
'when': 'midnight',
'backupCount': 5
}
LOG_CONFIG['loggers']['']['handlers'].append('file')
LOG_CONFIG['loggers']['dynamic-dynamodb']['handlers'].append('file')
# Configure a custom log level
if config_handler.get_logging_option('log_level'):
LOG_CONFIG['handlers']['console']['level'] = \
config_handler.get_logging_option('log_level').upper()
if 'file' in LOG_CONFIG['handlers']:
LOG_CONFIG['handlers']['file']['level'] = \
config_handler.get_logging_option('log_level').upper()
# Add dry-run to the formatter if in dry-run mode
if config_handler.get_global_option('dry_run'):
LOG_CONFIG['handlers']['console']['formatter'] = 'dry-run'
if 'file' in LOG_CONFIG['handlers']:
LOG_CONFIG['handlers']['file']['formatter'] = 'dry-run'
try:
dictconfig.dictConfig(LOG_CONFIG)
except ValueError as error:
print('Error configuring logger: {0}'.format(error))
sys.exit(1)
except:
raise
LOGGER = logging.getLogger('dynamic-dynamodb')
| apache-2.0 |
PetePriority/home-assistant | tests/components/sensor/test_mfi.py | 4 | 6956 | """The tests for the mFi sensor platform."""
import unittest
import unittest.mock as mock
import requests
from homeassistant.setup import setup_component
import homeassistant.components.sensor as sensor
import homeassistant.components.sensor.mfi as mfi
from homeassistant.const import TEMP_CELSIUS
from tests.common import get_test_home_assistant
class TestMfiSensorSetup(unittest.TestCase):
"""Test the mFi sensor platform."""
PLATFORM = mfi
COMPONENT = sensor
THING = 'sensor'
GOOD_CONFIG = {
'sensor': {
'platform': 'mfi',
'host': 'foo',
'port': 6123,
'username': 'user',
'password': 'pass',
'ssl': True,
'verify_ssl': True,
}
}
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch('mficlient.client.MFiClient')
def test_setup_missing_config(self, mock_client):
"""Test setup with missing configuration."""
config = {
'sensor': {
'platform': 'mfi',
}
}
assert setup_component(self.hass, 'sensor', config)
assert not mock_client.called
@mock.patch('mficlient.client.MFiClient')
def test_setup_failed_login(self, mock_client):
"""Test setup with login failure."""
from mficlient.client import FailedToLogin
mock_client.side_effect = FailedToLogin
assert not self.PLATFORM.setup_platform(
self.hass, dict(self.GOOD_CONFIG), None)
@mock.patch('mficlient.client.MFiClient')
def test_setup_failed_connect(self, mock_client):
"""Test setup with connection failure."""
mock_client.side_effect = requests.exceptions.ConnectionError
assert not self.PLATFORM.setup_platform(
self.hass, dict(self.GOOD_CONFIG), None)
@mock.patch('mficlient.client.MFiClient')
def test_setup_minimum(self, mock_client):
"""Test setup with minimum configuration."""
config = dict(self.GOOD_CONFIG)
del config[self.THING]['port']
assert setup_component(self.hass, self.COMPONENT.DOMAIN, config)
assert mock_client.call_count == 1
assert mock_client.call_args == \
mock.call(
'foo', 'user', 'pass', port=6443, use_tls=True, verify=True
)
@mock.patch('mficlient.client.MFiClient')
def test_setup_with_port(self, mock_client):
"""Test setup with port."""
config = dict(self.GOOD_CONFIG)
config[self.THING]['port'] = 6123
assert setup_component(self.hass, self.COMPONENT.DOMAIN, config)
assert mock_client.call_count == 1
assert mock_client.call_args == \
mock.call(
'foo', 'user', 'pass', port=6123, use_tls=True, verify=True
)
@mock.patch('mficlient.client.MFiClient')
def test_setup_with_tls_disabled(self, mock_client):
"""Test setup without TLS."""
config = dict(self.GOOD_CONFIG)
del config[self.THING]['port']
config[self.THING]['ssl'] = False
config[self.THING]['verify_ssl'] = False
assert setup_component(self.hass, self.COMPONENT.DOMAIN, config)
assert mock_client.call_count == 1
assert mock_client.call_args == \
mock.call(
'foo', 'user', 'pass', port=6080, use_tls=False, verify=False
)
@mock.patch('mficlient.client.MFiClient')
@mock.patch('homeassistant.components.sensor.mfi.MfiSensor')
def test_setup_adds_proper_devices(self, mock_sensor, mock_client):
"""Test if setup adds devices."""
ports = {i: mock.MagicMock(model=model)
for i, model in enumerate(mfi.SENSOR_MODELS)}
ports['bad'] = mock.MagicMock(model='notasensor')
mock_client.return_value.get_devices.return_value = \
[mock.MagicMock(ports=ports)]
assert setup_component(self.hass, sensor.DOMAIN, self.GOOD_CONFIG)
for ident, port in ports.items():
if ident != 'bad':
mock_sensor.assert_any_call(port, self.hass)
assert mock.call(ports['bad'], self.hass) not in mock_sensor.mock_calls
class TestMfiSensor(unittest.TestCase):
"""Test for mFi sensor platform."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.port = mock.MagicMock()
self.sensor = mfi.MfiSensor(self.port, self.hass)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_name(self):
"""Test the name."""
assert self.port.label == self.sensor.name
def test_uom_temp(self):
"""Test the UOM temperature."""
self.port.tag = 'temperature'
assert TEMP_CELSIUS == self.sensor.unit_of_measurement
def test_uom_power(self):
"""Test the UOEM power."""
self.port.tag = 'active_pwr'
assert 'Watts' == self.sensor.unit_of_measurement
def test_uom_digital(self):
"""Test the UOM digital input."""
self.port.model = 'Input Digital'
assert 'State' == self.sensor.unit_of_measurement
def test_uom_unknown(self):
"""Test the UOM."""
self.port.tag = 'balloons'
assert 'balloons' == self.sensor.unit_of_measurement
def test_uom_uninitialized(self):
"""Test that the UOM defaults if not initialized."""
type(self.port).tag = mock.PropertyMock(side_effect=ValueError)
assert 'State' == self.sensor.unit_of_measurement
def test_state_digital(self):
"""Test the digital input."""
self.port.model = 'Input Digital'
self.port.value = 0
assert mfi.STATE_OFF == self.sensor.state
self.port.value = 1
assert mfi.STATE_ON == self.sensor.state
self.port.value = 2
assert mfi.STATE_ON == self.sensor.state
def test_state_digits(self):
"""Test the state of digits."""
self.port.tag = 'didyoucheckthedict?'
self.port.value = 1.25
with mock.patch.dict(mfi.DIGITS, {'didyoucheckthedict?': 1}):
assert 1.2 == self.sensor.state
with mock.patch.dict(mfi.DIGITS, {}):
assert 1.0 == self.sensor.state
def test_state_uninitialized(self):
"""Test the state of uninitialized sensors."""
type(self.port).tag = mock.PropertyMock(side_effect=ValueError)
assert mfi.STATE_OFF == self.sensor.state
def test_update(self):
"""Test the update."""
self.sensor.update()
assert self.port.refresh.call_count == 1
assert self.port.refresh.call_args == mock.call()
| apache-2.0 |
RaD/django-south | setup.py | 60 | 2289 | #!/usr/bin/env python
# Use setuptools if we can
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
from south import __version__
setup(
name='South',
version=__version__,
description='South: Migrations for Django',
long_description='South is an intelligent database migrations library for the Django web framework. It is database-independent and DVCS-friendly, as well as a whole host of other features.',
author='Andrew Godwin & Andy McCurdy',
author_email='south@aeracode.org',
url='http://south.aeracode.org/',
download_url='http://south.aeracode.org/wiki/Download',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
],
packages=[
'south',
'south.creator',
'south.db',
'south.management',
'south.introspection_plugins',
'south.hacks',
'south.migration',
'south.tests',
'south.db.sql_server',
'south.management.commands',
'south.tests.circular_a',
'south.tests.emptyapp',
'south.tests.deps_a',
'south.tests.fakeapp',
'south.tests.brokenapp',
'south.tests.circular_b',
'south.tests.otherfakeapp',
'south.tests.deps_c',
'south.tests.deps_b',
'south.tests.non_managed',
'south.tests.circular_a.migrations',
'south.tests.emptyapp.migrations',
'south.tests.deps_a.migrations',
'south.tests.fakeapp.migrations',
'south.tests.brokenapp.migrations',
'south.tests.circular_b.migrations',
'south.tests.otherfakeapp.migrations',
'south.tests.deps_c.migrations',
'south.tests.deps_b.migrations',
'south.tests.non_managed.migrations',
'south.utils',
],
)
| apache-2.0 |
ciudadanointeligente/votainteligente-portal-electoral | preguntales/views.py | 1 | 7058 | # coding=utf-8
from django.views.generic import DetailView, CreateView
from elections.models import Election, Candidate
from preguntales.forms import MessageForm
from django.core.urlresolvers import reverse
from django.views.generic.base import View
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.core.mail import mail_admins
from elections.views import CandidateDetailView
from django.db.models import Q
from preguntales.models import Message, Answer
from operator import itemgetter
from django.shortcuts import get_object_or_404
class MessageDetailView(DetailView):
model = Message
context_object_name = 'votainteligentemessage'
def get_context_data(self, **kwargs):
context = super(MessageDetailView, self).get_context_data(**kwargs)
election = Election.objects.get(slug=self.kwargs['election_slug'])
context['election'] = election
return context
class ElectionAskCreateView(CreateView):
model = Message
form_class = MessageForm
def dispatch(self, request, *args, **kwargs):
if 'slug' in kwargs:
self.election = Election.objects.get(slug = kwargs['slug'])
return super(ElectionAskCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ElectionAskCreateView, self).get_context_data(**kwargs)
context['election'] = self.election
context['writeitmessages'] = Message.objects.filter(election=self.election)
return context
def get_form_kwargs(self):
kwargs = super(ElectionAskCreateView, self).get_form_kwargs()
election_slug = self.kwargs['slug']
election = Election.objects.get(slug = election_slug)
kwargs['election'] = election
return kwargs
def get_success_url(self):
election_slug = self.kwargs['slug']
return reverse('ask_detail_view', kwargs={'slug':election_slug,})
class QuestionsPerCandidateView(CandidateDetailView):
def get_queryset(self):
queryset = super(QuestionsPerCandidateView, self).get_queryset()
election_slug = self.kwargs['election_slug']
queryset.filter(Q(elections__slug=election_slug))
return queryset
def get_context_data(self, **kwargs):
context = super(QuestionsPerCandidateView, self)\
.get_context_data(**kwargs)
messages = Message.objects.filter(people=self.object)
context['questions'] = messages
return context
class RankingMixin(object):
candidate_queryset = None
votainteligentemessages = None
def __init__(self, *args, **kwargs):
super(RankingMixin, self).__init__(*args, **kwargs)
def get_ranking(self):
return self.candidate_queryset
def all_messages(self):
if not self.votainteligentemessages:
self.votainteligentemessages = Message.objects\
.filter(people__in=self.candidate_queryset).distinct()
return self.votainteligentemessages
def all_possible_answers(self):
answers = self.all_messages()
total_possible_answers = 0
for answer in answers:
total_possible_answers += answer.people.count()
return total_possible_answers
def actual_answers(self):
messages = self.all_messages()
actual_count = 0
for message in messages:
actual_count += message.answers.count()
return actual_count
def success_index(self):
all_possible_answers = float(self.all_possible_answers())
actual_answers = float(self.actual_answers())
return all_possible_answers/actual_answers
def get_clasified(self):
clasified = []
messages = self.all_messages()
if not messages:
return []
are_there_answers = Answer.objects.\
filter(message__in=messages).exists()
if not are_there_answers:
return []
success_index = self.success_index()
for candidate in self.candidate_queryset:
possible_answers = Message.objects.\
filter(Q(people=candidate)).count()
actual_answers = Answer.objects.\
filter(Q(person=candidate) & Q(message__in=messages)).\
count()
points = (success_index + 1)*possible_answers*actual_answers\
- possible_answers*possible_answers
clasified.append({'id': candidate.id,
'name': candidate.name,
'candidate': candidate,
'possible_answers': possible_answers,
'actual_answers': actual_answers,
'points': points
})
return clasified
def get_ordered(self):
clasified = self.get_clasified()
clasified = sorted(clasified, key=itemgetter('points'),
reverse=True)
return clasified
def get_good(self):
amount_of_good_ones = self.candidate_queryset.count()/2
good = []
ordered = self.get_ordered()
for i in range(0, min(amount_of_good_ones, len(ordered))):
if ordered[i]["actual_answers"] > 0:
good.append(ordered[i])
return good
def get_bad(self):
amount_of_bad_ones = -self.candidate_queryset.count()/2
ordered = self.get_ordered()[::-1]
bad = ordered[:amount_of_bad_ones]
for item in ordered[amount_of_bad_ones:]:
if item["actual_answers"] > 0:
break
bad.append(item)
return bad
class ElectionRankingView(DetailView, RankingMixin):
model = Election
def get_object(self, queryset=None):
the_object = super(ElectionRankingView, self).get_object(queryset)
queryset = the_object.candidates.all()
self.candidate_queryset = queryset
return the_object
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context['good'] = self.get_good()
context['bad'] = self.get_bad()
return context
class ConfirmationView(DetailView):
model = Message
template_name = 'preguntales/confirmation.html'
def get_queryset(self):
return self.model.objects.filter(confirmation__isnull=False).filter(confirmation__when_confirmed__isnull=True)
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
self.key = self.kwargs['key']
message = get_object_or_404(queryset, confirmation__key=self.key)
message.confirm()
return message
| gpl-3.0 |
g-vidal/upm | examples/python/eboled.py | 6 | 1563 | #!/usr/bin/env python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys
from upm import pyupm_lcd as lcdObj
def main():
# setup with default values
lcd = lcdObj.EBOLED();
lcd.clear();
lcd.setCursor(10, 15);
lcd.write("Hello");
lcd.setCursor(30, 15);
lcd.write("World!");
lcd.refresh();
print("Sleeping for 5 seconds...")
time.sleep(5)
if __name__ == '__main__':
main()
| mit |
Semillas/semillas_backend | semillas_backend/users/migrations/0007_auto_20170226_1801.py | 2 | 1142 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-26 18:01
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20161112_1132'),
]
operations = [
migrations.AddField(
model_name='user',
name='phone',
field=phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128),
),
migrations.AddField(
model_name='user',
name='phone_verified',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| mit |
afandria/mojo | mojo/public/third_party/jinja2/_compat.py | 638 | 4042 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock
| bsd-3-clause |
sunny94/temp | sympy/polys/tests/test_orderings.py | 24 | 4262 | """Tests of monomial orderings. """
from sympy.polys.orderings import (
monomial_key, lex, grlex, grevlex, ilex, igrlex, igrevlex,
LexOrder, InverseOrder, ProductOrder, build_product_order,
)
from sympy.abc import x, y, z, t
from sympy.core import S
from sympy.utilities.pytest import raises
def test_lex_order():
assert lex((1, 2, 3)) == (1, 2, 3)
assert str(lex) == 'lex'
assert lex((1, 2, 3)) == lex((1, 2, 3))
assert lex((2, 2, 3)) > lex((1, 2, 3))
assert lex((1, 3, 3)) > lex((1, 2, 3))
assert lex((1, 2, 4)) > lex((1, 2, 3))
assert lex((0, 2, 3)) < lex((1, 2, 3))
assert lex((1, 1, 3)) < lex((1, 2, 3))
assert lex((1, 2, 2)) < lex((1, 2, 3))
assert lex.is_global is True
assert lex == LexOrder()
assert lex != grlex
def test_grlex_order():
assert grlex((1, 2, 3)) == (6, (1, 2, 3))
assert str(grlex) == 'grlex'
assert grlex((1, 2, 3)) == grlex((1, 2, 3))
assert grlex((2, 2, 3)) > grlex((1, 2, 3))
assert grlex((1, 3, 3)) > grlex((1, 2, 3))
assert grlex((1, 2, 4)) > grlex((1, 2, 3))
assert grlex((0, 2, 3)) < grlex((1, 2, 3))
assert grlex((1, 1, 3)) < grlex((1, 2, 3))
assert grlex((1, 2, 2)) < grlex((1, 2, 3))
assert grlex((2, 2, 3)) > grlex((1, 2, 4))
assert grlex((1, 3, 3)) > grlex((1, 2, 4))
assert grlex((0, 2, 3)) < grlex((1, 2, 2))
assert grlex((1, 1, 3)) < grlex((1, 2, 2))
assert grlex((0, 1, 1)) > grlex((0, 0, 2))
assert grlex((0, 3, 1)) < grlex((2, 2, 1))
assert grlex.is_global is True
def test_grevlex_order():
assert grevlex((1, 2, 3)) == (6, (-3, -2, -1))
assert str(grevlex) == 'grevlex'
assert grevlex((1, 2, 3)) == grevlex((1, 2, 3))
assert grevlex((2, 2, 3)) > grevlex((1, 2, 3))
assert grevlex((1, 3, 3)) > grevlex((1, 2, 3))
assert grevlex((1, 2, 4)) > grevlex((1, 2, 3))
assert grevlex((0, 2, 3)) < grevlex((1, 2, 3))
assert grevlex((1, 1, 3)) < grevlex((1, 2, 3))
assert grevlex((1, 2, 2)) < grevlex((1, 2, 3))
assert grevlex((2, 2, 3)) > grevlex((1, 2, 4))
assert grevlex((1, 3, 3)) > grevlex((1, 2, 4))
assert grevlex((0, 2, 3)) < grevlex((1, 2, 2))
assert grevlex((1, 1, 3)) < grevlex((1, 2, 2))
assert grevlex((0, 1, 1)) > grevlex((0, 0, 2))
assert grevlex((0, 3, 1)) < grevlex((2, 2, 1))
assert grevlex.is_global is True
def test_InverseOrder():
ilex = InverseOrder(lex)
igrlex = InverseOrder(grlex)
assert ilex((1, 2, 3)) > ilex((2, 0, 3))
assert igrlex((1, 2, 3)) < igrlex((0, 2, 3))
assert str(ilex) == "ilex"
assert str(igrlex) == "igrlex"
assert ilex.is_global is False
assert igrlex.is_global is False
assert ilex != igrlex
assert ilex == InverseOrder(LexOrder())
def test_ProductOrder():
P = ProductOrder((grlex, lambda m: m[:2]), (grlex, lambda m: m[2:]))
assert P((1, 3, 3, 4, 5)) > P((2, 1, 5, 5, 5))
assert str(P) == "ProductOrder(grlex, grlex)"
assert P.is_global is True
assert ProductOrder((grlex, None), (ilex, None)).is_global is None
assert ProductOrder((igrlex, None), (ilex, None)).is_global is False
def test_monomial_key():
assert monomial_key() == lex
assert monomial_key('lex') == lex
assert monomial_key('grlex') == grlex
assert monomial_key('grevlex') == grevlex
raises(ValueError, lambda: monomial_key('foo'))
raises(ValueError, lambda: monomial_key(1))
M = [x, x**2*z**2, x*y, x**2, S(1), y**2, x**3, y, z, x*y**2*z, x**2*y**2]
assert sorted(M, key=monomial_key('lex', [z, y, x])) == \
[S(1), x, x**2, x**3, y, x*y, y**2, x**2*y**2, z, x*y**2*z, x**2*z**2]
assert sorted(M, key=monomial_key('grlex', [z, y, x])) == \
[S(1), x, y, z, x**2, x*y, y**2, x**3, x**2*y**2, x*y**2*z, x**2*z**2]
assert sorted(M, key=monomial_key('grevlex', [z, y, x])) == \
[S(1), x, y, z, x**2, x*y, y**2, x**3, x**2*y**2, x**2*z**2, x*y**2*z]
def test_build_product_order():
assert build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t])((4, 5, 6, 7)) == \
((9, (4, 5)), (13, (6, 7)))
assert build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t]) == \
build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t])
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.