text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import absolute_import, division, print_function
import copy
import numpy as np
from matplotlib.patches import Ellipse, Polygon, Rectangle, Path as MplPath, PathPatch
from matplotlib.transforms import IdentityTransform, blended_transform_factory
from glue.core.exceptions import UndefinedROI
from glue.utils import points_inside_poly
np.seterr(all='ignore')
__all__ = ['Roi', 'RectangularROI', 'CircularROI', 'PolygonalROI',
'AbstractMplRoi', 'MplRectangularROI', 'MplCircularROI',
'MplPolygonalROI', 'MplXRangeROI', 'MplYRangeROI',
'XRangeROI', 'RangeROI', 'YRangeROI', 'VertexROIBase',
'CategoricalROI']
PATCH_COLOR = '#FFFF00'
SCRUBBING_KEY = 'control'
def aspect_ratio(axes):
""" Returns the pixel height / width of a box that spans 1
data unit in x and y
"""
width = axes.get_position().width * axes.figure.get_figwidth()
height = axes.get_position().height * axes.figure.get_figheight()
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
return height / width / (ymax - ymin) * (xmax - xmin)
def data_to_norm(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
pixel = axes.transData.transform(xy)
norm = axes.transAxes.inverted().transform(pixel)
return norm
def data_to_pixel(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.transform(xy)
def pixel_to_data(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.inverted().transform(xy)
class Roi(object): # pragma: no cover
"""
A geometrical 2D region of interest.
Glue uses Roi's to represent user-drawn regions on plots. There
are many specific subtypes of Roi, but they all have a ``contains``
method to test whether a collection of 2D points lies inside the region.
"""
def contains(self, x, y):
"""Return true/false for each x/y pair.
:param x: Array of X locations
:param y: Array of Y locations
:returns: A Boolean array, where each element is True
if the corresponding (x,y) tuple is inside the Roi.
:raises: UndefinedROI exception if not defined
"""
raise NotImplementedError()
def center(self):
"""Return the (x,y) coordinates of the ROI center"""
raise NotImplementedError()
def move_to(self, x, y):
"""Translate the ROI to a center of (x, y)"""
raise NotImplementedError()
def defined(self):
""" Returns whether or not the subset is properly defined """
raise NotImplementedError()
def to_polygon(self):
""" Returns a tuple of x and y points, approximating the ROI
as a polygon."""
raise NotImplementedError
def copy(self):
"""
Return a clone of the ROI
"""
return copy.copy(self)
class PointROI(Roi):
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def contains(self, x, y):
return False
def move_to(self, x, y):
self.x = x
self.y = y
def defined(self):
try:
return np.isfinite([self.x, self.y]).all()
except TypeError:
return False
def center(self):
return self.x, self.y
def reset(self):
self.x = self.y = None
class RectangularROI(Roi):
"""
A 2D rectangular region of interest.
"""
def __init__(self, xmin=None, xmax=None, ymin=None, ymax=None):
super(RectangularROI, self).__init__()
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
if self.defined():
return "x=[%0.3f, %0.3f], y=[%0.3f, %0.3f]" % (self.xmin,
self.xmax,
self.ymin,
self.ymax)
else:
return "Undefined Rectangular ROI"
def center(self):
return self.xmin + self.width() / 2, self.ymin + self.height() / 2
def move_to(self, x, y):
cx, cy = self.center()
dx = x - cx
dy = y - cy
self.xmin += dx
self.xmax += dx
self.ymin += dy
self.ymax += dy
def transpose(self, copy=True):
if copy:
new = self.copy()
new.xmin, new.xmax = self.ymin, self.ymax
new.ymin, new.ymax = self.xmin, self.xmax
return new
self.xmin, self.ymin = self.ymin, self.xmin
self.xmax, self.ymax = self.ymax, self.xmax
def corner(self):
return (self.xmin, self.ymin)
def width(self):
return self.xmax - self.xmin
def height(self):
return self.ymax - self.ymin
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A scalar or numpy array of x points
:param y: A scalar or numpy array of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
return (x > self.xmin) & (x < self.xmax) & \
(y > self.ymin) & (y < self.ymax)
def update_limits(self, xmin, ymin, xmax, ymax):
"""
Update the limits of the rectangle
"""
self.xmin = min(xmin, xmax)
self.xmax = max(xmin, xmax)
self.ymin = min(ymin, ymax)
self.ymax = max(ymin, ymax)
def reset(self):
"""
Reset the rectangular region.
"""
self.xmin = None
self.xmax = None
self.ymin = None
self.ymax = None
def defined(self):
return self.xmin is not None
def to_polygon(self):
if self.defined():
return [self.xmin, self.xmax, self.xmax, self.xmin, self.xmin], \
[self.ymin, self.ymin, self.ymax, self.ymax, self.ymin]
else:
return [], []
def __gluestate__(self, context):
return dict(xmin=self.xmin, xmax=self.xmax, ymin=self.ymin, ymax=self.ymax)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xmin=rec['xmin'], xmax=rec['xmax'],
ymin=rec['ymin'], ymax=rec['ymax'])
class RangeROI(Roi):
def __init__(self, orientation, min=None, max=None):
""":param orientation: 'x' or 'y'. Sets which axis to range"""
super(RangeROI, self).__init__()
self.min = min
self.max = max
self.ori = orientation
@property
def ori(self):
return self._ori
@ori.setter
def ori(self, value):
if value in set('xy'):
self._ori = value
else:
raise ValueError("Orientation must be one of 'x', 'y'")
def __str__(self):
if self.defined():
return "%0.3f < %s < %0.3f" % (self.min, self.ori,
self.max)
else:
return "Undefined %s" % type(self).__name__
def range(self):
return self.min, self.max
def center(self):
return (self.min + self.max) / 2
def set_range(self, lo, hi):
self.min, self.max = lo, hi
def move_to(self, center):
delta = center - self.center()
self.min += delta
self.max += delta
def contains(self, x, y):
if not self.defined():
raise UndefinedROI()
coord = x if self.ori == 'x' else y
return (coord > self.min) & (coord < self.max)
def reset(self):
self.min = None
self.max = None
def defined(self):
return self.min is not None and self.max is not None
def to_polygon(self):
if self.defined():
on = [self.min, self.max, self.max, self.min, self.min]
off = [-1e100, -1e100, 1e100, 1e100, -1e100]
x, y = (on, off) if (self.ori == 'x') else (off, on)
return x, y
else:
return [], []
def __gluestate__(self, context):
return dict(ori=self.ori, min=self.min, max=self.max)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(rec['ori'], min=rec['min'], max=rec['max'])
class XRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(XRangeROI, self).__init__('x', min=min, max=max)
class YRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(YRangeROI, self).__init__('y', min=min, max=max)
class CircularROI(Roi):
"""
A 2D circular region of interest.
"""
def __init__(self, xc=None, yc=None, radius=None):
super(CircularROI, self).__init__()
self.xc = xc
self.yc = yc
self.radius = radius
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
return (x - self.xc) ** 2 + (y - self.yc) ** 2 < self.radius ** 2
def set_center(self, x, y):
"""
Set the center of the circular region
"""
self.xc = x
self.yc = y
def set_radius(self, radius):
"""
Set the radius of the circular region
"""
self.radius = radius
def get_center(self):
return self.xc, self.yc
def get_radius(self):
return self.radius
def reset(self):
"""
Reset the rectangular region.
"""
self.xc = None
self.yc = None
self.radius = 0.
def defined(self):
""" Returns True if the ROI is defined """
return self.xc is not None and \
self.yc is not None and self.radius is not None
def to_polygon(self):
""" Returns x, y, where each is a list of points """
if not self.defined():
return [], []
theta = np.linspace(0, 2 * np.pi, num=20)
x = self.xc + self.radius * np.cos(theta)
y = self.yc + self.radius * np.sin(theta)
return x, y
def __gluestate__(self, context):
return dict(xc=self.xc, yc=self.yc, radius=self.radius)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xc=rec['xc'], yc=rec['yc'], radius=rec['radius'])
class VertexROIBase(Roi):
def __init__(self, vx=None, vy=None):
"""
:param vx: initial x vertices
:type vx: list
:param vy: initial y vertices
:type vy: list
"""
super(VertexROIBase, self).__init__()
self.vx = vx
self.vy = vy
if self.vx is None:
self.vx = []
if self.vy is None:
self.vy = []
def add_point(self, x, y):
"""
Add another vertex to the ROI
:param x: The x coordinate
:param y: The y coordinate
"""
self.vx.append(x)
self.vy.append(y)
def reset(self):
"""
Reset the vertex list.
"""
self.vx = []
self.vy = []
def replace_last_point(self, x, y):
if len(self.vx) > 0:
self.vx[-1] = x
self.vy[-1] = y
def remove_point(self, x, y, thresh=None):
"""Remove the vertex closest to a reference (xy) point
:param x: The x coordinate of the reference point
:param y: The y coordinate of the reference point
:param thresh: An optional threshhold. If present, the vertex
closest to (x,y) will only be removed if the distance
is less than thresh
"""
if len(self.vx) == 0:
return
# find distance between vertices and input
dist = [(x - a) ** 2 + (y - b) ** 2 for a, b
in zip(self.vx, self.vy)]
inds = range(len(dist))
near = min(inds, key=lambda x: dist[x])
if thresh is not None and dist[near] > (thresh ** 2):
return
self.vx = [self.vx[i] for i in inds if i != near]
self.vy = [self.vy[i] for i in inds if i != near]
def defined(self):
return len(self.vx) > 0
def to_polygon(self):
return self.vx, self.vy
def __gluestate__(self, context):
return dict(vx=np.asarray(self.vx).tolist(),
vy=np.asarray(self.vy).tolist())
@classmethod
def __setgluestate__(cls, rec, context):
return cls(vx=rec['vx'], vy=rec['vy'])
class PolygonalROI(VertexROIBase):
"""
A class to define 2D polygonal regions-of-interest
"""
def __str__(self):
result = 'Polygonal ROI ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
result = points_inside_poly(x.flat, y.flat, self.vx, self.vy)
good = np.isfinite(x.flat) & np.isfinite(y.flat)
result[~good] = False
result.shape = x.shape
return result
def move_to(self, xdelta, ydelta):
self.vx = list(map(lambda x: x + xdelta, self.vx))
self.vy = list(map(lambda y: y + ydelta, self.vy))
class Path(VertexROIBase):
def __str__(self):
result = 'Path ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
class AbstractMplRoi(object): # pragma: no cover
""" Base class for objects which use
Matplotlib user events to edit/display ROIs
"""
def __init__(self, axes):
"""
:param axes: The Matplotlib Axes object to draw to
"""
self._axes = axes
self._roi = self._roi_factory()
self._previous_roi = None
self._mid_selection = False
self._scrubbing = False
def _draw(self):
self._axes.figure.canvas.draw()
def _roi_factory(self):
raise NotImplementedError()
def roi(self):
return self._roi.copy()
def reset(self, include_roi=True):
self._mid_selection = False
self._scrubbing = False
if include_roi:
self._roi.reset()
self._sync_patch()
def active(self):
return self._mid_selection
def start_selection(self, event):
raise NotImplementedError()
def update_selection(self, event):
raise NotImplementedError()
def finalize_selection(self, event):
raise NotImplementedError()
def abort_selection(self, event):
if self._mid_selection:
self._roi_restore()
self.reset(include_roi=False)
def _sync_patch(self):
raise NotImplementedError()
def _roi_store(self):
self._previous_roi = self._roi.copy()
def _roi_restore(self):
self._roi = self._previous_roi
class MplPickROI(AbstractMplRoi):
def _draw(self):
pass
def _roi_factory(self):
return PointROI()
def start_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def update_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def finalize_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def _sync_patch(self):
pass
class MplRectangularROI(AbstractMplRoi):
"""
A subclass of RectangularROI that also renders the ROI to a plot
*Attributes*:
plot_opts:
Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self._yi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._patch = Rectangle((0., 0.), 1., 1.)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return RectangularROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
self._xi = event.xdata
self._yi = event.ydata
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx, self._cy = self._roi.center()
else:
self.reset()
self._roi.update_limits(event.xdata, event.ydata,
event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(self._cx + event.xdata - self._xi,
self._cy + event.ydata - self._yi)
else:
self._roi.update_limits(min(event.xdata, self._xi),
min(event.ydata, self._yi),
max(event.xdata, self._xi),
max(event.ydata, self._yi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
corner = self._roi.corner()
width = self._roi.width()
height = self._roi.height()
self._patch.set_xy(corner)
self._patch.set_width(width)
self._patch.set_height(height)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
def __str__(self):
return "MPL Rectangle: %s" % self._patch
class MplXRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transData,
self._axes.transAxes)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return XRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dx = event.xdata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.xdata, event.xdata)
self._xi = event.xdata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata + self._dx)
else:
self._roi.set_range(min(event.xdata, self._xi),
max(event.xdata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((rng[0], 0))
self._patch.set_width(rng[1] - rng[0])
self._patch.set_height(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplYRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transAxes,
self._axes.transData)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return YRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dy = event.ydata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.ydata, event.ydata)
self._xi = event.ydata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.ydata + self._dy)
else:
self._roi.set_range(min(event.ydata, self._xi),
max(event.ydata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((0, rng[0]))
self._patch.set_height(rng[1] - rng[0])
self._patch.set_width(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplCircularROI(AbstractMplRoi):
"""
Class to display / edit circular ROIs using matplotlib
Since circles on the screen may not be circles in the data
(due, e.g., to logarithmic scalings on the axes), the
ultimate ROI that is created is a polygonal ROI
:param plot_opts:
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._xi = None
self._yi = None
self._setup_patch()
def _setup_patch(self):
self._patch = Ellipse((0., 0.), transform=IdentityTransform(),
width=0., height=0.,)
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return CircularROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
xy = self._roi.get_center()
r = self._roi.get_radius()
self._patch.center = xy
self._patch.width = 2. * r
self._patch.height = 2. * r
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(xi, yi):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
(xc, yc) = self._roi.get_center()
self._dx = xc - xi
self._dy = yc - yi
else:
self.reset()
self._roi.set_center(xi, yi)
self._roi.set_radius(0.)
self._xi = xi
self._yi = yi
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.set_center(xi + self._dx, yi + self._dy)
else:
dx = xy[0, 0] - self._xi
dy = xy[0, 1] - self._yi
self._roi.set_radius(np.hypot(dx, dy))
self._sync_patch()
def roi(self):
if not self._roi.defined():
return PolygonalROI()
theta = np.linspace(0, 2 * np.pi, num=200)
xy_center = self._roi.get_center()
rad = self._roi.get_radius()
x = xy_center[0] + rad * np.cos(theta)
y = xy_center[1] + rad * np.sin(theta)
xy_data = pixel_to_data(self._axes, x, y)
vx = xy_data[:, 0].ravel().tolist()
vy = xy_data[:, 1].ravel().tolist()
result = PolygonalROI(vx, vy)
return result
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPolygonalROI(AbstractMplRoi):
"""
Defines and displays polygonal ROIs on matplotlib plots
Attributes:
plot_opts: Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._setup_patch()
def _setup_patch(self):
self._patch = Polygon(np.array(list(zip([0, 1], [0, 1]))))
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return PolygonalROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
x, y = self._roi.to_polygon()
self._patch.set_xy(list(zip(x + [x[0]],
y + [y[0]])))
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx = event.xdata
self._cy = event.ydata
else:
self.reset()
self._roi.add_point(event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata - self._cx,
event.ydata - self._cy)
self._cx = event.xdata
self._cy = event.ydata
else:
self._roi.add_point(event.xdata, event.ydata)
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPathROI(MplPolygonalROI):
def roi_factory(self):
return Path()
def _setup_patch(self):
self._patch = None
def _sync_patch(self):
if self._patch is not None:
self._patch.remove()
self._patch = None
# Update geometry
if not self._roi.defined():
return
else:
x, y = self._roi.to_polygon()
p = MplPath(np.column_stack((x, y)))
self._patch = PathPatch(p)
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def finalize_selection(self, event):
self._mid_selection = False
if self._patch is not None:
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class CategoricalROI(Roi):
"""
A ROI abstraction to represent selections of categorical data.
"""
def __init__(self, categories=None):
if categories is None:
self.categories = None
else:
self.update_categories(categories)
def to_polygon(self):
""" Just not possible.
"""
raise NotImplementedError
def _categorical_helper(self, indata):
"""
A helper function to do the rigamaroll of getting categorical data.
:param indata: Any type of input data
:return: The best guess at the categorical data associated with indata
"""
try:
if indata.categorical:
return indata._categorical_data
else:
return indata[:]
except AttributeError:
return np.asarray(indata)
def contains(self, x, y):
"""
Test whether a set categorical elements fall within
the region of interest
:param x: Any array-like object of categories
(includes CategoricalComponenets)
:param y: Unused but required for compatibility
*Returns*
A list of True/False values, for whether each x value falls
within the ROI
"""
if self.categories is None or len(self.categories) == 0:
return np.zeros(x.shape, dtype=bool)
else:
check = self._categorical_helper(x)
index = np.minimum(np.searchsorted(self.categories, check),
len(self.categories) - 1)
return self.categories[index] == check
def update_categories(self, categories):
self.categories = np.unique(self._categorical_helper(categories))
def defined(self):
""" Returns True if the ROI is defined """
return self.categories is not None
def reset(self):
self.categories = None
@staticmethod
def from_range(cat_comp, lo, hi):
"""
Utility function to help construct the Roi from a range.
:param cat_comp: Anything understood by ._categorical_helper ... array, list or component
:param lo: lower bound of the range
:param hi: upper bound of the range
:return: CategoricalROI object
"""
# Convert lo and hi to integers. Note that if lo or hi are negative,
# which can happen if the user zoomed out, we need to reset the to zero
# otherwise they will have strange effects when slicing the categories.
# Note that we used ceil for lo, because if lo is 0.9 then we should
# only select 1 and above.
lo = np.intp(np.ceil(lo) if lo > 0 else 0)
hi = np.intp(np.ceil(hi) if hi > 0 else 0)
roi = CategoricalROI()
cat_data = cat_comp.categories
roi.update_categories(cat_data[lo:hi])
return roi
| {
"content_hash": "f10d3404c272f4746cd1d64afa148c7c",
"timestamp": "",
"source": "github",
"line_count": 1265,
"max_line_length": 97,
"avg_line_length": 27.87114624505929,
"alnum_prop": 0.5380208185608532,
"repo_name": "saimn/glue",
"id": "e1e90366ca69919b820ecd79b8e15c1a1557ea84",
"size": "35257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/core/roi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1609137"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
} |
import k3d
import os
def generate():
filepath = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'../../assets/factory/arcade_carpet_512.png')
with open(filepath, 'rb') as stl:
data = stl.read()
plt_texture = k3d.texture(data,
file_format='png')
plot = k3d.plot()
plot += plt_texture
plot.snapshot_type = 'inline'
return plot.get_snapshot()
| {
"content_hash": "cd13e1f5c0ca65db97d9007c8d0500a0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 23.63157894736842,
"alnum_prop": 0.5478841870824054,
"repo_name": "K3D-tools/K3D-jupyter",
"id": "553849fcf69f202b7643826deb3bd9993b26c514",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/source/reference/plots/factory/texture_basic_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "1326"
},
{
"name": "GLSL",
"bytes": "33792"
},
{
"name": "HTML",
"bytes": "8112"
},
{
"name": "JavaScript",
"bytes": "599147"
},
{
"name": "Jupyter Notebook",
"bytes": "5311"
},
{
"name": "Python",
"bytes": "1949685"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
import time
from simplejson import dumps
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.template import RequestContext
# ZeroMQ Connection
from gevent import spawn
from gevent_zeromq import zmq
context = zmq.Context()
publisher = context.socket(zmq.PUB)
publisher.bind("tcp://127.0.0.1:5000")
ACTIVE_ROOMS = set([])
# Message Coroutines
def send_message(socket, room, text):
socket.send_unicode("%s:%s" % (room, text))
def message_listener(socketio, room):
# For too many threads spawning new connection will cause a
# "too many mailboxes" error, but for small amounts of
# threads this is fine.
subscriber = context.socket(zmq.SUB)
subscriber.connect("tcp://127.0.0.1:5000")
# setsockopt doesn't like unicode
subscriber.setsockopt(zmq.SUBSCRIBE, str(room))
socketio.send({'message': 'connected: ' + room})
while True:
msg = subscriber.recv()
if msg:
socketio.send({'message': msg.split(":")[1]})
# Room Coroutines
def new_room(socket, room_name):
socket.send("room:%s" % str(room_name))
def room_listener(socketio):
# For too many threads spawning new connection will cause a
# "too many mailboxes" error, but for small amounts of
# threads this is fine.
subscriber = context.socket(zmq.SUB)
subscriber.connect("tcp://127.0.0.1:5000")
subscriber.setsockopt(zmq.SUBSCRIBE, 'room')
while True:
msg = subscriber.recv()
if msg:
socketio.send({'room_name': msg.split(":")[1]})
time.sleep(5)
def room(request, room_name=None, template_name='room.html'):
context = {
'room_name': room_name,
'initial_rooms': dumps(list(ACTIVE_ROOMS)),
}
if room_name not in ACTIVE_ROOMS:
spawn(new_room, publisher, room_name)
ACTIVE_ROOMS.add(room_name)
return render_to_response(template_name, context,
context_instance=RequestContext(request))
def room_list(request, template_name='room_list.html'):
context = {
'initial_rooms': dumps(list(ACTIVE_ROOMS)),
}
return render_to_response(template_name, context,
context_instance=RequestContext(request))
# SocketIO Handler
def socketio(request):
socketio = request.environ['socketio']
while True:
message = socketio.recv()
if len(message) == 1:
action, arg = message[0].split(':')
if action == 'subscribe':
if arg == 'rooms':
spawn(room_listener, socketio)
else:
spawn(message_listener, socketio, arg)
elif action == 'message':
room, text = arg.split(',')
#timestamp = time.strftime("(%H.%M.%S)", time.localtime())
ip_addr = request.META['REMOTE_ADDR']
message = "(%s) %s" % (ip_addr, text)
spawn(send_message, publisher, room, message).join()
return HttpResponse()
| {
"content_hash": "b001e002d6c416bb284b4bbb11309441",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 74,
"avg_line_length": 27.436363636363637,
"alnum_prop": 0.621272365805169,
"repo_name": "MechanisM/zeromq-chat",
"id": "705544ed8db2e6aa9da08e0c526f0a29459d5db1",
"size": "3018",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/chat/views.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import unittest
import numpy as np
import chainer
from chainer import testing
from chainer.dataset import tabular
class TestFromData(unittest.TestCase):
def test_unary_args(self):
dataset = tabular.from_data(np.arange(10))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 1)
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [1, 3])
self.assertIsInstance(output, np.ndarray)
def test_unary_args_with_key(self):
dataset = tabular.from_data(('a', np.arange(10)))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a',))
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [1, 3])
self.assertIsInstance(output, np.ndarray)
def test_unary_kwargs(self):
dataset = tabular.from_data(a=np.arange(10))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a',))
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [1, 3])
self.assertIsInstance(output, np.ndarray)
def test_tuple(self):
dataset = tabular.from_data(
np.arange(10),
('b', [2, 7, 1, 8, 4, 5, 9, 0, 3, 6]))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 2)
self.assertEqual(dataset.keys[1], 'b')
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [7, 8]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_unique(self):
dataset_a = tabular.from_data(
np.arange(10),
[3, 1, 4, 5, 9, 2, 6, 8, 7, 0])
dataset_b = tabular.from_data(
[2, 7, 1, 8, 4, 5, 9, 0, 3, 6],
-np.arange(10))
self.assertFalse(set(dataset_a.keys) & set(dataset_b.keys))
def test_dict(self):
dataset = tabular.from_data(
a=np.arange(10),
b=[2, 7, 1, 8, 4, 5, 9, 0, 3, 6])
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(set(dataset.keys), {'a', 'b'})
self.assertEqual(dataset.mode, dict)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, {'a': [1, 3], 'b': [7, 8]})
self.assertIsInstance(output['a'], np.ndarray)
self.assertIsInstance(output['b'], list)
testing.run_module(__name__, __file__)
| {
"content_hash": "811b1639ee9eb9dfedb36371bd507912",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 70,
"avg_line_length": 33.831460674157306,
"alnum_prop": 0.6067751577548987,
"repo_name": "okuta/chainer",
"id": "11a1d017bcfde06434c2b27c021702bf2320588d",
"size": "3011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/dataset_tests/tabular_tests/test_from_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1548487"
},
{
"name": "CMake",
"bytes": "51604"
},
{
"name": "Cuda",
"bytes": "128377"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5851909"
},
{
"name": "Shell",
"bytes": "41045"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import abc
import six
from st2common.util import concurrency
__all__ = ["Sensor", "PollingSensor"]
@six.add_metaclass(abc.ABCMeta)
class BaseSensor(object):
"""
Base Sensor class - not to be instantiated directly.
"""
def __init__(self, sensor_service, config=None):
"""
:param sensor_service: Sensor Service instance.
:type sensor_service: :class:``st2reactor.container.sensor_wrapper.SensorService``
:keyword config: Sensor config.
:type config: ``dict`` or None
"""
self._sensor_service = sensor_service # Deprecate in the future
self.sensor_service = sensor_service
self._config = config or {} # Deprecate in the future
self.config = self._config
@abc.abstractmethod
def setup(self):
"""
Run the sensor initialization / setup code (if any).
"""
pass
@abc.abstractmethod
def run(self):
"""
Run the sensor.
"""
pass
@abc.abstractmethod
def cleanup(self):
"""
Run the sensor cleanup code (if any).
"""
pass
@abc.abstractmethod
def add_trigger(self, trigger):
"""
Runs when trigger is created
"""
pass
@abc.abstractmethod
def update_trigger(self, trigger):
"""
Runs when trigger is updated
"""
pass
@abc.abstractmethod
def remove_trigger(self, trigger):
"""
Runs when trigger is deleted
"""
pass
class Sensor(BaseSensor):
"""
Base class to be inherited from by the passive sensors.
"""
@abc.abstractmethod
def run(self):
pass
class PollingSensor(BaseSensor):
"""
Base class to be inherited from by the active sensors.
Active sensors periodically poll a 3rd party system for new information.
"""
def __init__(self, sensor_service, config=None, poll_interval=5):
super(PollingSensor, self).__init__(
sensor_service=sensor_service, config=config
)
self._poll_interval = poll_interval
@abc.abstractmethod
def poll(self):
"""
Poll 3rd party system for new information.
"""
pass
def run(self):
while True:
self.poll()
concurrency.sleep(self._poll_interval)
def get_poll_interval(self):
"""
Retrieve current poll interval.
:return: Current poll interval.
:rtype: ``float``
"""
return self._poll_interval
def set_poll_interval(self, poll_interval):
"""
Set the poll interval.
:param poll_interval: Poll interval to use.
:type poll_interval: ``float``
"""
self._poll_interval = poll_interval
| {
"content_hash": "53d4a0b35e7227bd05035c20ca2960cd",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 90,
"avg_line_length": 22.808,
"alnum_prop": 0.5762890213960014,
"repo_name": "Plexxi/st2",
"id": "a8309ba292b412b15659395f7f5acce6d7cafbf2",
"size": "3479",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2reactor/st2reactor/sensor/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
} |
""" Heap sort. A O(n * log(n)) operation to sort
a list in increasing order. It's based on a MinHeap
implementation (Priority Queue).
"""
from heaps.min_heap import MinHeap
from utils.arrays import create_random_array
def heap_sort(source_list=None):
if isinstance(source_list, list):
sorted_data = []
# Building the heap is an O(n * log(n)) operation, because inserting
# an element in the heap is an O(log(n)) operation, and this operation
# must be executed n times (one for every element on the source list).
heap = MinHeap(source_list)
while heap.size > 0:
sorted_data.append(heap.remove())
return sorted_data
else:
raise Exception("You must provide a list in order to execute this process.")
if __name__ == '__main__':
random_array = create_random_array(100)
sorted_array = heap_sort(random_array)
# Check the array is sorted in increasing order.
index = 0
for current_value in sorted_array[1:]:
previous_value = sorted_array[index]
if current_value < previous_value:
raise Exception(
"The array isn't sorted. index {} has value {}, and index {} has value {}".format(
index,
previous_value,
index + 1,
current_value
)
)
index += 1
print("The array was correctly sorted.")
| {
"content_hash": "4f474d8ac8fb1eda28c8414211920d9d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 98,
"avg_line_length": 35.390243902439025,
"alnum_prop": 0.5933838731909028,
"repo_name": "rcanepa/cs-fundamentals",
"id": "75a3fc5f2f6c5f6a77c1eae70d0a322c9e6851b1",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sorting/heap_sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "78962"
},
{
"name": "Python",
"bytes": "256458"
}
],
"symlink_target": ""
} |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: http://effbot.org/tkinterbook/canvas.htm
"""
TODO...
"""
__all__ = ['TkGUI']
from jdhp.tictactoe.game import Game
from jdhp.tictactoe.player.greedy import GreedyPlayer
from jdhp.tictactoe.player.human import HumanPlayer
from jdhp.tictactoe.player.random import RandomPlayer
import random
import tkinter as tk
SQUARE_NUM = 3 # squares per side
class TkGUI:
"""
TODO...
"""
def __init__(self):
"""
TODO...
"""
# Game attributes #############
self.game = Game()
self.player_list = None
self.current_player_index = None
self.current_state = None
# GUI parameters ##############
# TODO...
self.square_size = 128 # pixels
self.symbol_offset = 20
self.symbol_line_width = 12
self.grid_line_width = 8
self.square_color = "white"
self.player1_over_square_color = "red"
self.player2_over_square_color = "green"
self.over_wrong_square_color = "white" # "gray"
self.player1_symbol_color = "red"
self.player2_symbol_color = "green"
# Make the main window ########
self.root = tk.Tk() # TODO
self.root.resizable(False, False) # <- Lock the size of the window
# Make widgets ################
self.available_player_type_list = ["Human",
"Computer (easy)",
"Computer (very easy)"]
# Player1 option menu
player1_label = tk.Label(self.root, text="Player 1 (X):")
player1_label.grid(row=0, column=0, padx=8, sticky="w")
self._player1_var = tk.StringVar()
self._player1_var.set(self.available_player_type_list[0])
self.player1_optionmenu = tk.OptionMenu(self.root,
self._player1_var,
*self.available_player_type_list)
#command=self.set_player1)
self.player1_optionmenu.config(width=24)
self.player1_optionmenu.grid(row=0, column=1, sticky="we")
# Player2 option menu
player2_label = tk.Label(self.root, text="Player 2 (O):")
player2_label.grid(row=1, column=0, padx=8, sticky="w")
self._player2_var = tk.StringVar()
self._player2_var.set(self.available_player_type_list[0])
self.player2_optionmenu = tk.OptionMenu(self.root,
self._player2_var,
*self.available_player_type_list)
#command=self.set_player2)
self.player2_optionmenu.config(width=24)
self.player2_optionmenu.grid(row=1, column=1, sticky="we")
# Canvas
self.canvas = tk.Canvas(self.root,
width=SQUARE_NUM*self.square_size,
height=SQUARE_NUM*self.square_size)
self.canvas.grid(row=2, column=0, columnspan=2, sticky="nswe")
self.lock_canvas()
self.canvas.tag_bind("square", # a tag string or an item id
"<Button-1>", # the event descriptor
self.click_on_canvas_callback, # the callback function
add="+") # "+" to add this binding to the previous one(s) (i.e. keep the previous binding(s)) or None to replace it or them
self.canvas.create_text((self.square_size * 1.5, self.square_size * 1.5),
text="Define players then press start",
font="Sans 12 bold",
fill="gray",
anchor="center") # n, ne, e, se, s, sw, w, nw, or center
# Status label
self.status_label = tk.Label(self.root,
font="Sans 12 bold",
foreground="#000000",
text="")
self.status_label.grid(row=3, column=0, columnspan=2, pady=10, sticky="we")
# Start / Quit / Restart button
self.button = tk.Button(self.root, text="Start", command=self.start)
self.button.grid(row=4, column=0, columnspan=2, sticky="we")
###################################
def run(self):
"""
TODO...
"""
# Tk event loop
# TODO ???
self.root.mainloop()
###################################
def start(self):
"""
TODO...
"""
self.lock_player_option_menus()
self.status_label["text"] = ""
#self.status_label["font"] = "Sans 12 bold"
#self.status_label["foreground"] = "#000000"
# Change button's label and callback funtion
self.button["text"] = "Quit"
self.button["command"] = self.stop
# Init game state
self.player_list = [None, None] # TODO
if self.get_player1_type() == "Human": # TODO
self.player_list[0] = HumanPlayer("X")
elif self.get_player1_type() == "Computer (easy)": # TODO
self.player_list[0] = GreedyPlayer("X")
elif self.get_player1_type() == "Computer (very easy)": # TODO
self.player_list[0] = RandomPlayer("X")
else:
raise Exception("Internal error")
if self.get_player2_type() == "Human": # TODO
self.player_list[1] = HumanPlayer("O")
elif self.get_player2_type() == "Computer (easy)": # TODO
self.player_list[1] = GreedyPlayer("O")
elif self.get_player2_type() == "Computer (very easy)": # TODO
self.player_list[1] = RandomPlayer("O")
else:
raise Exception("Internal error")
self.current_player_index = random.randint(0, 1) # TODO
self.current_state = self.game.getInitialState()
# Display the game grid
self.draw_current_state()
# Call the play loop
self.play_loop()
def stop(self):
"""
TODO...
"""
self.lock_canvas()
self.unlock_player_option_menus()
# Change button's label and callback funtion
self.button["text"] = "Start"
self.button["command"] = self.start
# Display score
if self.game.isFinal(self.current_state, self.player_list):
if self.game.hasWon(self.player_list[0], self.current_state):
self.status_label["text"] = "Player1 has won!"
#self.status_label["font"] = "Sans 12 bold"
#self.status_label["foreground"] = "#000000"
elif self.game.hasWon(self.player_list[1], self.current_state):
self.status_label["text"] = "Player2 has won!"
#self.status_label["font"] = "Sans 12 bold"
#self.status_label["foreground"] = "#000000"
else:
self.status_label["text"] = "Draw!"
#self.status_label["font"] = "Sans 12 bold"
#self.status_label["foreground"] = "#000000"
def lock_player_option_menus(self):
"""
Lock the player selection menu so that the selection cannot change.
"""
self.player1_optionmenu["state"] = "disabled"
self.player2_optionmenu["state"] = "disabled"
def unlock_player_option_menus(self):
"""
Unlock the player selection menu so that users can change the
selection.
"""
self.player1_optionmenu["state"] = "normal"
self.player2_optionmenu["state"] = "normal"
def lock_canvas(self):
"""
Lock the canvas so that users cannot play.
"""
self.canvas["state"] = "disabled"
def unlock_canvas(self):
"""
Unlock the canvas so that users can play.
"""
self.canvas["state"] = "normal"
###################################
def get_player1_type(self):
"""
TODO...
"""
return self._player1_var.get()
def get_player2_type(self):
"""
TODO...
"""
return self._player2_var.get()
###################################
def play_loop(self):
"""
TODO...
"""
current_player = self.player_list[self.current_player_index]
is_final = self.game.isFinal(self.current_state, self.player_list)
# While computer plays
while (not isinstance(current_player, HumanPlayer)) and (not is_final):
action = self.player_list[self.current_player_index].play(self.game, self.current_state) # TODO: execute this function in a separate thread to avoid Tk lock
self.play(action)
current_player = self.player_list[self.current_player_index] # TODO
is_final = self.game.isFinal(self.current_state, self.player_list) # TODO
# Let (human) user play
if not is_final: # TODO
self.draw_current_state() # TODO (to update the "over square color"...)
self.status_label["text"] = "{} Turn".format(current_player.symbol)
self.unlock_canvas()
def play(self, action):
"""
TODO...
"""
current_player = self.player_list[self.current_player_index]
self.current_state = self.game.nextState(self.current_state,
action,
current_player)
self.draw_current_state()
if self.game.isFinal(self.current_state, self.player_list):
self.stop()
else:
self.current_player_index = (self.current_player_index + 1) % 2
###################################
def click_on_canvas_callback(self, event): # event is a tkinter.Event object
"""
TODO...
"""
id_tuple = self.canvas.find_withtag("current") # get the item which is under the mouse cursor
if len(id_tuple) > 0:
item_id = id_tuple[0]
#print(self.canvas.gettags(item_id))
item_tag1, item_tag2, item_tag3 = self.canvas.gettags(item_id)
#print("square {} (item #{})".format(item_tag2, item_id))
action = int(item_tag2)
if self.game.isValidAction(self.current_state, action):
self.lock_canvas()
self.play(action) # TODO
self.play_loop() # TODO
else:
raise Exception("Unexpected error")
def draw_current_state(self):
"""
TODO...
"""
# Clear the canvas (remove all shapes)
self.canvas.delete(tk.ALL)
for row_index in range(SQUARE_NUM):
# Make squares
for col_index in range(SQUARE_NUM):
square_index = row_index * 3 + col_index
color = self.square_color
tags = ("square", "{}".format(square_index))
if self.current_state is not None:
if self.current_state[square_index] != " ":
active_fill_color = self.over_wrong_square_color
elif self.current_player_index == 0:
active_fill_color = self.player1_over_square_color
elif self.current_player_index == 1:
active_fill_color = self.player2_over_square_color
else:
raise Exception("Internal error")
self.canvas.create_rectangle(self.square_size * col_index, # x1
self.square_size * (2 - row_index), # y1
self.square_size * (col_index + 1), # x2
self.square_size * (3 - row_index), # y2
tag=tags,
fill=color,
activefill=active_fill_color,
width=0)
if self.current_state is not None:
off = self.symbol_offset
if self.current_state[square_index] == "X":
line_coordinates = (self.square_size * col_index + off, # x1
self.square_size * (2 - row_index) + off, # y1
self.square_size * (col_index + 1) - off, # x2
self.square_size * (3 - row_index) - off) # y2
self.canvas.create_line(line_coordinates,
fill=self.player1_symbol_color,
width=self.symbol_line_width)
line_coordinates = (self.square_size * col_index + off, # x1
self.square_size * (3 - row_index) - off, # y1
self.square_size * (col_index + 1) - off, # x2
self.square_size * (2 - row_index) + off) # y2
self.canvas.create_line(line_coordinates,
fill=self.player1_symbol_color,
width=self.symbol_line_width)
elif self.current_state[square_index] == "O":
line_coordinates = (self.square_size * col_index + off, # x1
self.square_size * (2 - row_index) + off, # y1
self.square_size * (col_index + 1) - off, # x2
self.square_size * (3 - row_index) - off) # y2
self.canvas.create_oval(line_coordinates,
outline=self.player2_symbol_color,
width=self.symbol_line_width)
# Draw vertical lines
for col_index in range(1, SQUARE_NUM):
line_coordinates = (self.square_size * col_index, # x1
0, # y1
self.square_size * col_index, # x2
self.square_size * SQUARE_NUM) # y2
self.canvas.create_line(line_coordinates,
fill="black",
width=self.grid_line_width)
# Draw horizontal lines
for row_index in range(1, SQUARE_NUM):
line_coordinates = (0, # x1
self.square_size * row_index, # y1
self.square_size * SQUARE_NUM, # x2
self.square_size * row_index) # y2
self.canvas.create_line(line_coordinates,
fill="black",
width=self.grid_line_width)
def main():
"""
TODO...
"""
gui = TkGUI()
# Launch the main loop
gui.run()
if __name__ == '__main__':
main()
| {
"content_hash": "90f09b5af2bfb2700d58de7e7312ef95",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 174,
"avg_line_length": 38.41927710843373,
"alnum_prop": 0.48306573005519315,
"repo_name": "jeremiedecock/tictactoe-py",
"id": "76f2dd51421a2e35332a767cedb66d45746abd01",
"size": "16630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jdhp/tictactoe/gui.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "4552"
},
{
"name": "Python",
"bytes": "64076"
},
{
"name": "Shell",
"bytes": "8145"
}
],
"symlink_target": ""
} |
import unittest
from test import test_support
from weakref import proxy, ref, WeakSet
import operator
import copy
import string
import os
from random import randrange, shuffle
import sys
import warnings
import collections
import gc
import contextlib
from UserString import UserString as ustr
class Foo:
pass
class SomeClass(object):
def __init__(self, value):
self.value = value
def __eq__(self, other):
if type(other) != type(self):
return False
return other.value == self.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((SomeClass, self.value))
class RefCycle(object):
def __init__(self):
self.cycle = self
class TestWeakSet(unittest.TestCase):
def setUp(self):
# need to keep references to them
self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
self.letters = [SomeClass(c) for c in string.ascii_letters]
self.ab_items = [SomeClass(c) for c in 'ab']
self.abcde_items = [SomeClass(c) for c in 'abcde']
self.def_items = [SomeClass(c) for c in 'def']
self.ab_weakset = WeakSet(self.ab_items)
self.abcde_weakset = WeakSet(self.abcde_items)
self.def_weakset = WeakSet(self.def_items)
self.s = WeakSet(self.items)
self.d = dict.fromkeys(self.items)
self.obj = SomeClass('F')
self.fs = WeakSet([self.obj])
def test_methods(self):
weaksetmethods = dir(WeakSet)
for method in dir(set):
if method == 'test_c_api' or method.startswith('_'):
continue
self.assertIn(method, weaksetmethods,
"WeakSet missing method " + method)
def test_new_or_init(self):
self.assertRaises(TypeError, WeakSet, [], 2)
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
self.assertEqual(len(self.fs), 1)
del self.obj
test_support.gc_collect()
self.assertEqual(len(self.fs), 0)
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
# 1 is not weakref'able, but that TypeError is caught by __contains__
self.assertNotIn(1, self.s)
self.assertIn(self.obj, self.fs)
del self.obj
test_support.gc_collect()
self.assertNotIn(SomeClass('F'), self.fs)
def test_union(self):
u = self.s.union(self.items2)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.items2)
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(u), WeakSet)
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet(self.items + self.items2)
c = C(self.items2)
self.assertEqual(self.s.union(c), x)
del c
self.assertEqual(len(u), len(self.items) + len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(u), len(self.items) + len(self.items2))
def test_or(self):
i = self.s.union(self.items2)
self.assertEqual(self.s | set(self.items2), i)
self.assertEqual(self.s | frozenset(self.items2), i)
def test_intersection(self):
s = WeakSet(self.letters)
i = s.intersection(self.items2)
for c in self.letters:
self.assertEqual(c in i, c in self.items2 and c in self.letters)
self.assertEqual(s, WeakSet(self.letters))
self.assertEqual(type(i), WeakSet)
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet([])
self.assertEqual(i.intersection(C(self.items)), x)
self.assertEqual(len(i), len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(i), len(self.items2))
def test_isdisjoint(self):
self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))
def test_and(self):
i = self.s.intersection(self.items2)
self.assertEqual(self.s & set(self.items2), i)
self.assertEqual(self.s & frozenset(self.items2), i)
def test_difference(self):
i = self.s.difference(self.items2)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.items2)
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.difference, [[]])
def test_sub(self):
i = self.s.difference(self.items2)
self.assertEqual(self.s - set(self.items2), i)
self.assertEqual(self.s - frozenset(self.items2), i)
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.items2)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
self.assertEqual(len(i), len(self.items) + len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(i), len(self.items) + len(self.items2))
def test_xor(self):
i = self.s.symmetric_difference(self.items2)
self.assertEqual(self.s ^ set(self.items2), i)
self.assertEqual(self.s ^ frozenset(self.items2), i)
def test_sub_and_super(self):
self.assertTrue(self.ab_weakset <= self.abcde_weakset)
self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
self.assertTrue(self.abcde_weakset >= self.ab_weakset)
self.assertFalse(self.abcde_weakset <= self.def_weakset)
self.assertFalse(self.abcde_weakset >= self.def_weakset)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_lt(self):
self.assertTrue(self.ab_weakset < self.abcde_weakset)
self.assertFalse(self.abcde_weakset < self.def_weakset)
self.assertFalse(self.ab_weakset < self.ab_weakset)
self.assertFalse(WeakSet() < WeakSet())
def test_gt(self):
self.assertTrue(self.abcde_weakset > self.ab_weakset)
self.assertFalse(self.abcde_weakset > self.def_weakset)
self.assertFalse(self.ab_weakset > self.ab_weakset)
self.assertFalse(WeakSet() > WeakSet())
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
s = WeakSet(Foo() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = WeakSet([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(WeakSet):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_init(self):
s = WeakSet()
s.__init__(self.items)
self.assertEqual(s, self.s)
s.__init__(self.items2)
self.assertEqual(s, WeakSet(self.items2))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = WeakSet(self.items)
t = WeakSet(s)
self.assertNotEqual(id(s), id(t))
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, WeakSet([]))
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
def test_add(self):
x = SomeClass('Q')
self.s.add(x)
self.assertIn(x, self.s)
dup = self.s.copy()
self.s.add(x)
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
self.fs.add(Foo())
test_support.gc_collect()
self.assertTrue(len(self.fs) == 1)
self.fs.add(self.obj)
self.assertTrue(len(self.fs) == 1)
def test_remove(self):
x = SomeClass('a')
self.s.remove(x)
self.assertNotIn(x, self.s)
self.assertRaises(KeyError, self.s.remove, x)
self.assertRaises(TypeError, self.s.remove, [])
def test_discard(self):
a, q = SomeClass('a'), SomeClass('Q')
self.s.discard(a)
self.assertNotIn(a, self.s)
self.s.discard(q)
self.assertRaises(TypeError, self.s.discard, [])
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
self.assertIn(c, self.s)
self.assertRaises(TypeError, self.s.update, [[]])
def test_update_set(self):
self.s.update(set(self.items2))
for c in (self.items + self.items2):
self.assertIn(c, self.s)
def test_ior(self):
self.s |= set(self.items2)
for c in (self.items + self.items2):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if c in self.items2 and c in self.items:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.intersection_update, [[]])
def test_iand(self):
self.s &= set(self.items2)
for c in (self.items + self.items2):
if c in self.items2 and c in self.items:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if c in self.items and c not in self.items2:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
def test_isub(self):
self.s -= set(self.items2)
for c in (self.items + self.items2):
if c in self.items and c not in self.items2:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if (c in self.items) ^ (c in self.items2):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
def test_ixor(self):
self.s ^= set(self.items2)
for c in (self.items + self.items2):
if (c in self.items) ^ (c in self.items2):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, WeakSet())
t = self.s.copy()
t ^= t
self.assertEqual(t, WeakSet())
def test_eq(self):
# issue 5964
self.assertTrue(self.s == self.s)
self.assertTrue(self.s == WeakSet(self.items))
self.assertFalse(self.s == set(self.items))
self.assertFalse(self.s == list(self.items))
self.assertFalse(self.s == tuple(self.items))
self.assertFalse(self.s == 1)
def test_ne(self):
self.assertTrue(self.s != set(self.items))
s1 = WeakSet()
s2 = WeakSet()
self.assertFalse(s1 != s2)
def test_weak_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
# Create new items to be sure no-one else holds a reference
items = [SomeClass(c) for c in ('a', 'b', 'c')]
s = WeakSet(items)
it = iter(s)
next(it) # Trigger internal iteration
# Destroy an item
del items[-1]
test_support.gc_collect()
# We have removed either the first consumed items, or another one
self.assertIn(len(list(it)), [len(items), len(items) - 1])
del it
test_support.gc_collect()
# The removal has been committed
self.assertEqual(len(s), len(items))
def test_weak_destroy_and_mutate_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
items = [SomeClass(c) for c in string.ascii_letters]
s = WeakSet(items)
@contextlib.contextmanager
def testcontext():
try:
it = iter(s)
next(it)
# Schedule an item for removal and recreate it
u = SomeClass(str(items.pop()))
gc.collect() # just in case
yield u
finally:
it = None # should commit all removals
with testcontext() as u:
self.assertNotIn(u, s)
with testcontext() as u:
self.assertRaises(KeyError, s.remove, u)
self.assertNotIn(u, s)
with testcontext() as u:
s.add(u)
self.assertIn(u, s)
t = s.copy()
with testcontext() as u:
s.update(t)
self.assertEqual(len(s), len(t))
with testcontext() as u:
s.clear()
self.assertEqual(len(s), 0)
def test_len_cycles(self):
N = 20
items = [RefCycle() for i in range(N)]
s = WeakSet(items)
del items
it = iter(s)
try:
next(it)
except StopIteration:
pass
gc.collect()
n1 = len(s)
del it
test_support.gc_collect()
n2 = len(s)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_len_race(self):
# Extended sanity checks for len() in the face of cyclic collection
#self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
test_support.gc_collect()
#gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
s = WeakSet(items)
del items
# All items will be collected at next garbage collection pass
it = iter(s)
try:
next(it)
except StopIteration:
pass
n1 = len(s)
del it
n2 = len(s)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
# Create new items to be sure no-one else holds a reference
items = [ustr(c) for c in ('a', 'b', 'c')]
s = WeakSet(items)
it = iter(s)
next(it) # Trigger internal iteration
# Destroy an item
del items[-1]
gc.collect() # just in case
# We have removed either the first consumed items, or another one
self.assertIn(len(list(it)), [len(items), len(items) - 1])
del it
# The removal has been committed
self.assertEqual(len(s), len(items))
def test_weak_destroy_and_mutate_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
items = [ustr(c) for c in string.ascii_letters]
s = WeakSet(items)
@contextlib.contextmanager
def testcontext():
try:
it = iter(s)
# Start iterator
yielded = ustr(str(next(it)))
# Schedule an item for removal and recreate it
u = ustr(str(items.pop()))
if yielded == u:
# The iterator still has a reference to the removed item,
# advance it (issue #20006).
next(it)
gc.collect() # just in case
yield u
finally:
it = None # should commit all removals
with testcontext() as u:
self.assertFalse(u in s)
with testcontext() as u:
self.assertRaises(KeyError, s.remove, u)
self.assertFalse(u in s)
with testcontext() as u:
s.add(u)
self.assertTrue(u in s)
t = s.copy()
with testcontext() as u:
s.update(t)
self.assertEqual(len(s), len(t))
with testcontext() as u:
s.clear()
self.assertEqual(len(s), 0)
def test_main(verbose=None):
test_support.run_unittest(TestWeakSet)
if __name__ == "__main__":
test_main(verbose=True)
| {
"content_hash": "5b744cad2292a47955f3126b2cd29266",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 81,
"avg_line_length": 34.81517509727627,
"alnum_prop": 0.5668063704945515,
"repo_name": "shiblon/pytour",
"id": "c939dad27f23d2245e4607fdfc6ea051ebecdd90",
"size": "17895",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_weakset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "195977"
},
{
"name": "HTML",
"bytes": "2110262"
},
{
"name": "JavaScript",
"bytes": "5106892"
},
{
"name": "Python",
"bytes": "15081380"
},
{
"name": "Shell",
"bytes": "1018"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
=======
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "3475512182878930d6a9b3ece892036c",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 75,
"avg_line_length": 27.25225225225225,
"alnum_prop": 0.608099173553719,
"repo_name": "ArcherSys/ArcherSys",
"id": "c94412fcf666582ec4ca48f51275a48387f25977",
"size": "6050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common_test.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,urlparse
from resources.lib.modules import control
from resources.lib.modules import trakt
from resources.lib.modules import cache
sysaddon = sys.argv[0] ; syshandle = int(sys.argv[1]) ; control.moderator()
artPath = control.artPath() ; addonFanart = control.addonFanart()
imdbCredentials = False if control.setting('imdb.user') == '' else True
traktCredentials = trakt.getTraktCredentialsInfo()
traktIndicators = trakt.getTraktIndicatorsInfo()
queueMenu = control.lang(32065).encode('utf-8')
class navigator:
def root(self):
self.addDirectoryItem(32001, 'movieNavigator', 'movies.png', 'DefaultMovies.png')
self.addDirectoryItem(32002, 'tvNavigator', 'tvshows.png', 'DefaultTVShows.png')
if not control.setting('lists.widget') == '0':
self.addDirectoryItem(32003, 'mymovieNavigator', 'mymovies.png', 'DefaultVideoPlaylists.png')
self.addDirectoryItem(32004, 'mytvNavigator', 'mytvshows.png', 'DefaultVideoPlaylists.png')
if not control.setting('movie.widget') == '0':
self.addDirectoryItem(32005, 'movieWidget', 'latest-movies.png', 'DefaultRecentlyAddedMovies.png')
if (traktIndicators == True and not control.setting('tv.widget.alt') == '0') or (traktIndicators == False and not control.setting('tv.widget') == '0'):
self.addDirectoryItem(32006, 'tvWidget', 'latest-episodes.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(32007, 'channels', 'channels.png', 'DefaultMovies.png')
self.addDirectoryItem(32008, 'toolNavigator', 'tools.png', 'DefaultAddonProgram.png')
downloads = True if control.setting('downloads') == 'true' and (len(control.listDir(control.setting('movie.download.path'))[0]) > 0 or len(control.listDir(control.setting('tv.download.path'))[0]) > 0) else False
if downloads == True:
self.addDirectoryItem(32009, 'downloadNavigator', 'downloads.png', 'DefaultFolder.png')
self.addDirectoryItem(32010, 'searchNavigator', 'search.png', 'DefaultFolder.png')
self.endDirectory()
def movies(self, lite=False):
self.addDirectoryItem(32011, 'movieGenres', 'genres.png', 'DefaultMovies.png')
self.addDirectoryItem(32012, 'movieYears', 'years.png', 'DefaultMovies.png')
self.addDirectoryItem(32013, 'moviePersons', 'people.png', 'DefaultMovies.png')
self.addDirectoryItem(32014, 'movieLanguages', 'languages.png', 'DefaultMovies.png')
self.addDirectoryItem(32015, 'movieCertificates', 'certificates.png', 'DefaultMovies.png')
self.addDirectoryItem(32017, 'movies&url=trending', 'people-watching.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(32018, 'movies&url=popular', 'most-popular.png', 'DefaultMovies.png')
self.addDirectoryItem(32019, 'movies&url=views', 'most-voted.png', 'DefaultMovies.png')
self.addDirectoryItem(32020, 'movies&url=boxoffice', 'box-office.png', 'DefaultMovies.png')
self.addDirectoryItem(32021, 'movies&url=oscars', 'oscar-winners.png', 'DefaultMovies.png')
self.addDirectoryItem(32022, 'movies&url=theaters', 'in-theaters.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(32005, 'movieWidget', 'latest-movies.png', 'DefaultRecentlyAddedMovies.png')
if lite == False:
if not control.setting('lists.widget') == '0':
self.addDirectoryItem(32003, 'mymovieliteNavigator', 'mymovies.png', 'DefaultVideoPlaylists.png')
self.addDirectoryItem(32028, 'moviePerson', 'people-search.png', 'DefaultMovies.png')
self.addDirectoryItem(32010, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.endDirectory()
def mymovies(self, lite=False):
self.accountCheck()
if traktCredentials == True and imdbCredentials == True:
self.addDirectoryItem(32032, 'movies&url=traktcollection', 'trakt.png', 'DefaultMovies.png', queue=True, context=(32551, 'moviesToLibrary&url=traktcollection'))
self.addDirectoryItem(32033, 'movies&url=traktwatchlist', 'trakt.png', 'DefaultMovies.png', queue=True, context=(32551, 'moviesToLibrary&url=traktwatchlist'))
self.addDirectoryItem(32034, 'movies&url=imdbwatchlist', 'imdb.png', 'DefaultMovies.png', queue=True)
elif traktCredentials == True:
self.addDirectoryItem(32032, 'movies&url=traktcollection', 'trakt.png', 'DefaultMovies.png', queue=True, context=(32551, 'moviesToLibrary&url=traktcollection'))
self.addDirectoryItem(32033, 'movies&url=traktwatchlist', 'trakt.png', 'DefaultMovies.png', queue=True, context=(32551, 'moviesToLibrary&url=traktwatchlist'))
elif imdbCredentials == True:
self.addDirectoryItem(32032, 'movies&url=imdbwatchlist', 'imdb.png', 'DefaultMovies.png', queue=True)
self.addDirectoryItem(32033, 'movies&url=imdbwatchlist2', 'imdb.png', 'DefaultMovies.png', queue=True)
if traktCredentials == True:
self.addDirectoryItem(32035, 'movies&url=traktfeatured', 'trakt.png', 'DefaultMovies.png', queue=True)
elif imdbCredentials == True:
self.addDirectoryItem(32035, 'movies&url=featured', 'imdb.png', 'DefaultMovies.png', queue=True)
if traktIndicators == True:
self.addDirectoryItem(32036, 'movies&url=trakthistory', 'trakt.png', 'DefaultMovies.png', queue=True)
self.addDirectoryItem(32039, 'movieUserlists', 'userlists.png', 'DefaultMovies.png')
if lite == False:
self.addDirectoryItem(32031, 'movieliteNavigator', 'movies.png', 'DefaultMovies.png')
self.addDirectoryItem(32028, 'moviePerson', 'people-search.png', 'DefaultMovies.png')
self.addDirectoryItem(32010, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.endDirectory()
def tvshows(self, lite=False):
self.addDirectoryItem(32011, 'tvGenres', 'genres.png', 'DefaultTVShows.png')
self.addDirectoryItem(32016, 'tvNetworks', 'networks.png', 'DefaultTVShows.png')
self.addDirectoryItem(32014, 'tvLanguages', 'languages.png', 'DefaultTVShows.png')
self.addDirectoryItem(32015, 'tvCertificates', 'certificates.png', 'DefaultTVShows.png')
self.addDirectoryItem(32017, 'tvshows&url=trending', 'people-watching.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(32018, 'tvshows&url=popular', 'most-popular.png', 'DefaultTVShows.png')
self.addDirectoryItem(32023, 'tvshows&url=rating', 'highly-rated.png', 'DefaultTVShows.png')
self.addDirectoryItem(32019, 'tvshows&url=views', 'most-voted.png', 'DefaultTVShows.png')
self.addDirectoryItem(32024, 'tvshows&url=airing', 'airing-today.png', 'DefaultTVShows.png')
#self.addDirectoryItem(32025, 'tvshows&url=active', 'returning-tvshows.png', 'DefaultTVShows.png')
self.addDirectoryItem(32026, 'tvshows&url=premiere', 'new-tvshows.png', 'DefaultTVShows.png')
self.addDirectoryItem(32006, 'calendar&url=added', 'latest-episodes.png', 'DefaultRecentlyAddedEpisodes.png', queue=True)
self.addDirectoryItem(32027, 'calendars', 'calendar.png', 'DefaultRecentlyAddedEpisodes.png')
if lite == False:
if not control.setting('lists.widget') == '0':
self.addDirectoryItem(32004, 'mytvliteNavigator', 'mytvshows.png', 'DefaultVideoPlaylists.png')
self.addDirectoryItem(32028, 'tvPerson', 'people-search.png', 'DefaultTVShows.png')
self.addDirectoryItem(32010, 'tvSearch', 'search.png', 'DefaultTVShows.png')
self.endDirectory()
def mytvshows(self, lite=False):
self.accountCheck()
if traktCredentials == True and imdbCredentials == True:
self.addDirectoryItem(32032, 'tvshows&url=traktcollection', 'trakt.png', 'DefaultTVShows.png', context=(32551, 'tvshowsToLibrary&url=traktcollection'))
self.addDirectoryItem(32033, 'tvshows&url=traktwatchlist', 'trakt.png', 'DefaultTVShows.png', context=(32551, 'tvshowsToLibrary&url=traktwatchlist'))
self.addDirectoryItem(32034, 'tvshows&url=imdbwatchlist', 'imdb.png', 'DefaultTVShows.png')
elif traktCredentials == True:
self.addDirectoryItem(32032, 'tvshows&url=traktcollection', 'trakt.png', 'DefaultTVShows.png', context=(32551, 'tvshowsToLibrary&url=traktcollection'))
self.addDirectoryItem(32033, 'tvshows&url=traktwatchlist', 'trakt.png', 'DefaultTVShows.png', context=(32551, 'tvshowsToLibrary&url=traktwatchlist'))
elif imdbCredentials == True:
self.addDirectoryItem(32032, 'tvshows&url=imdbwatchlist', 'imdb.png', 'DefaultTVShows.png')
self.addDirectoryItem(32033, 'tvshows&url=imdbwatchlist2', 'imdb.png', 'DefaultTVShows.png')
if traktCredentials == True:
self.addDirectoryItem(32035, 'tvshows&url=traktfeatured', 'trakt.png', 'DefaultTVShows.png')
elif imdbCredentials == True:
self.addDirectoryItem(32035, 'tvshows&url=trending', 'imdb.png', 'DefaultMovies.png', queue=True)
if traktIndicators == True:
self.addDirectoryItem(32036, 'calendar&url=trakthistory', 'trakt.png', 'DefaultTVShows.png', queue=True)
self.addDirectoryItem(32037, 'calendar&url=progress', 'trakt.png', 'DefaultRecentlyAddedEpisodes.png', queue=True)
self.addDirectoryItem(32038, 'calendar&url=mycalendar', 'trakt.png', 'DefaultRecentlyAddedEpisodes.png', queue=True)
self.addDirectoryItem(32040, 'tvUserlists', 'userlists.png', 'DefaultTVShows.png')
if traktCredentials == True:
self.addDirectoryItem(32041, 'episodeUserlists', 'userlists.png', 'DefaultTVShows.png')
if lite == False:
self.addDirectoryItem(32031, 'tvliteNavigator', 'tvshows.png', 'DefaultTVShows.png')
self.addDirectoryItem(32028, 'tvPerson', 'people-search.png', 'DefaultTVShows.png')
self.addDirectoryItem(32010, 'tvSearch', 'search.png', 'DefaultTVShows.png')
self.endDirectory()
def tools(self):
self.addDirectoryItem(32043, 'openSettings&query=0.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32044, 'openSettings&query=3.1', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32045, 'openSettings&query=1.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32046, 'openSettings&query=6.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32047, 'openSettings&query=2.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32556, 'libraryNavigator', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32048, 'openSettings&query=5.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32049, 'viewsNavigator', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32050, 'clearSources', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32604, 'clearCacheSearch', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32052, 'clearCache', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32073, 'authTrakt', 'trakt.png', 'DefaultAddonProgram.png')
self.endDirectory()
def library(self):
self.addDirectoryItem(32557, 'openSettings&query=4.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32558, 'updateLibrary&query=tool', 'library_update.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32559, control.setting('library.movie'), 'movies.png', 'DefaultMovies.png', isAction=False)
self.addDirectoryItem(32560, control.setting('library.tv'), 'tvshows.png', 'DefaultTVShows.png', isAction=False)
if trakt.getTraktCredentialsInfo():
self.addDirectoryItem(32561, 'moviesToLibrary&url=traktcollection', 'trakt.png', 'DefaultMovies.png')
self.addDirectoryItem(32562, 'moviesToLibrary&url=traktwatchlist', 'trakt.png', 'DefaultMovies.png')
self.addDirectoryItem(32563, 'tvshowsToLibrary&url=traktcollection', 'trakt.png', 'DefaultTVShows.png')
self.addDirectoryItem(32564, 'tvshowsToLibrary&url=traktwatchlist', 'trakt.png', 'DefaultTVShows.png')
self.endDirectory()
def downloads(self):
movie_downloads = control.setting('movie.download.path')
tv_downloads = control.setting('tv.download.path')
if len(control.listDir(movie_downloads)[0]) > 0:
self.addDirectoryItem(32001, movie_downloads, 'movies.png', 'DefaultMovies.png', isAction=False)
if len(control.listDir(tv_downloads)[0]) > 0:
self.addDirectoryItem(32002, tv_downloads, 'tvshows.png', 'DefaultTVShows.png', isAction=False)
self.endDirectory()
def search(self):
self.addDirectoryItem(32001, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.addDirectoryItem(32002, 'tvSearch', 'search.png', 'DefaultTVShows.png')
self.addDirectoryItem(32029, 'moviePerson', 'people-search.png', 'DefaultMovies.png')
self.addDirectoryItem(32030, 'tvPerson', 'people-search.png', 'DefaultTVShows.png')
self.endDirectory()
def views(self):
try:
control.idle()
items = [ (control.lang(32001).encode('utf-8'), 'movies'), (control.lang(32002).encode('utf-8'), 'tvshows'), (control.lang(32054).encode('utf-8'), 'seasons'), (control.lang(32038).encode('utf-8'), 'episodes') ]
select = control.selectDialog([i[0] for i in items], control.lang(32049).encode('utf-8'))
if select == -1: return
content = items[select][1]
title = control.lang(32059).encode('utf-8')
url = '%s?action=addView&content=%s' % (sys.argv[0], content)
poster, banner, fanart = control.addonPoster(), control.addonBanner(), control.addonFanart()
item = control.item(label=title)
item.setInfo(type='Video', infoLabels = {'title': title})
item.setArt({'icon': poster, 'thumb': poster, 'poster': poster, 'banner': banner})
item.setProperty('Fanart_Image', fanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=False)
control.content(int(sys.argv[1]), content)
control.directory(int(sys.argv[1]), cacheToDisc=True)
from resources.lib.modules import views
views.setView(content, {})
except:
return
def accountCheck(self):
if traktCredentials == False and imdbCredentials == False:
control.idle()
control.infoDialog(control.lang(32042).encode('utf-8'), sound=True, icon='WARNING')
sys.exit()
def infoCheck(self, version):
try:
control.infoDialog('', control.lang(32074).encode('utf-8'), time=5000, sound=False)
return '1'
except:
return '1'
def clearCache(self):
control.idle()
yes = control.yesnoDialog(control.lang(32056).encode('utf-8'), '', '')
if not yes: return
from resources.lib.modules import cache
cache.cache_clear()
control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO')
def clearCacheMeta(self):
control.idle()
yes = control.yesnoDialog(control.lang(32056).encode('utf-8'), '', '')
if not yes: return
from resources.lib.modules import cache
cache.cache_clear_meta()
control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO')
def clearCacheProviders(self):
control.idle()
yes = control.yesnoDialog(control.lang(32056).encode('utf-8'), '', '')
if not yes: return
from resources.lib.modules import cache
cache.cache_clear_providers()
control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO')
def clearCacheSearch(self):
control.idle()
yes = control.yesnoDialog(control.lang(32056).encode('utf-8'), '', '')
if not yes: return
from resources.lib.modules import cache
cache.cache_clear_search()
control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO')
def clearCacheAll(self):
control.idle()
yes = control.yesnoDialog(control.lang(32056).encode('utf-8'), '', '')
if not yes: return
from resources.lib.modules import cache
cache.cache_clear_all()
control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO')
def addDirectoryItem(self, name, query, thumb, icon, context=None, queue=False, isAction=True, isFolder=True):
try: name = control.lang(name).encode('utf-8')
except: pass
url = '%s?action=%s' % (sysaddon, query) if isAction == True else query
thumb = os.path.join(artPath, thumb) if not artPath == None else icon
cm = []
if queue == True: cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
if not context == None: cm.append((control.lang(context[0]).encode('utf-8'), 'RunPlugin(%s?action=%s)' % (sysaddon, context[1])))
item = control.item(label=name)
item.addContextMenuItems(cm)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder)
def endDirectory(self):
control.content(syshandle, 'addons')
control.directory(syshandle, cacheToDisc=True)
| {
"content_hash": "ec5d6d8477f4c8ba3e9fa81d48a1926a",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 222,
"avg_line_length": 53.26086956521739,
"alnum_prop": 0.6765714285714286,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "0d8e11260719ecc7d34ed58038b9c4841ba531d3",
"size": "18400",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "script.module.uncoded/lib/resources/lib/indexers/navigator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import PyOpenWorm as P
P.connect('default.conf')
class NC_neighbor(P.Property):
def __init__(self, *args, **kwargs):
P.Property.__init__(self, '_nb', *args, **kwargs)
self.real_neighbor = self.owner.neighbor
# Re-assigning neighbor Property
self.owner.neighbor = self
def get(self,**kwargs):
# get the members of the class
for x in self.owner.neighbor():
yield x
def set(self, ob, **kwargs):
self.real_neighbor(ob)
if isinstance(ob, NeuronClass):
ob_name = ob.name()
this_name = self.owner.name()
for x in ob.member():
# Get the name for the neighbor
# XXX:
try:
n = x.name()
side = n[n.find(ob_name)+len(ob_name):]
name_here = this_name + side
this_neuron = P.Neuron(name_here)
self.owner.member(this_neuron)
this_neuron.neighbor(x,**kwargs)
except ValueError:
# XXX: could default to all-to-all semantics
print('Do not recoginze the membership of this neuron/neuron class', ob)
elif isinstance(ob, Neuron):
for x in self.owner.member:
x.neighbor(ob)
def triples(self,*args,**kwargs):
""" Stub. All of the actual relationships are encoded in Neuron.neighbor and NeuronClass.member """
return []
# Circuit from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2760495/
class NeuronClass(P.Neuron):
def __init__(self, name=False, **kwargs):
P.Neuron.__init__(self,**kwargs)
NeuronClass.ObjectProperty('member', owner=self, value_type=P.Neuron, multiple=True)
NC_neighbor(owner=self)
if name:
self.name(name)
NeuronClass.register()
# A neuron class should be a way for saying what all neurons of a class have in common
# (The notation below is a bit of a mish-mash. basically it's PyOpenWorm without
# iterators, type notations with ':', and some Python string operations)
#
# nc : NeuronClass
# n : Neuron
# p : Property
# a : DataObject
# | Literal ;
# bc : NeuronClass
# b : Neuron
# d : Neuron
# p.linkName not in {'name', 'connection', 'neighbor'}
# nc.p(a)
#
# bc.member(b)
# b.name(bc.name() + n.name()[-1])
# nc.member(n)
# nc.neighbor(bc)
# nc.neighbor(d)
# class_name = nc.name()
# ------------------------------------[implies]-------
# n.p(a) # Any property except name, connection, and neighbor is the same as in nc
# n.neighbor(d) # For neighbors, if the neighbor is a neuron, then just the connection
# # holds for all members of the class
# n.neighbor(b) # Otherwise, the neuron (b) in the connected class on the same side as
# # n (i.e., the one for which the last character in its name matches the
# # last in n's name) in the neighbor
# n.name()[:-1] == nc.name()
#
# Setting up the data
#
ev = P.Evidence(title="A Hub-and-Spoke Circuit Drives Pheromone Attraction and Social Behavior in C. elegans",
uri="http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2760495/",
year=2009)
w = P.Worm("C. elegans")
net = P.Network()
w.neuron_network(net)
ev.asserts(w)
def setup(name,type):
n = NeuronClass(name)
n.type(type)
n.member(P.Neuron(name+"R"))
n.member(P.Neuron(name+"L"))
net.neuron(n)
return n
rmg = setup("RMG",'interneuron')
rmh = setup("RMH",'motor')
ask = setup("ASK",'sensory')
ash = setup("ASH",'sensory')
adl = setup("ADL",'sensory')
urx = setup("URX",'sensory')
awb = setup("AWB",'sensory')
il2 = setup("IL2",'sensory')
# describing the connections
d = [(ask, 'gj', rmg),
(rmh, 'gj', rmg),
(urx, 'gj', rmg),
(urx, 'sn', rmg),
(awb, 'gj', rmg),
(il2, 'gj', rmg),
(adl, 'gj', rmg),
(ash, 'sn', rmg),
(ash, 'gj', rmg),
(rmg, 'sn', ash)]
for p,x,o in d:
if x == 'gj':
x='GapJunction'
else:
x='Send'
p.neighbor(o, syntype=x)
ev.save()
nc = NeuronClass()
nc.type('sensory')
print('Sensory neuron classes in the circuit and their neurons')
# XXX: Add an evidence query like ev.asserts(nc.member(P.Neuron("ADLL")))
for x in nc.load():
print(x.name(), "has:")
for y in x.member():
print(" ", y.name(), "of type", ",".join(y.type()))
P.disconnect()
| {
"content_hash": "2f0bde1f267d4607f668937b3cc742fa",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 110,
"avg_line_length": 30.236486486486488,
"alnum_prop": 0.580782122905028,
"repo_name": "gsarma/PyOpenWorm",
"id": "6de0874900b227734953b30e8a6bf9bed87cf212",
"size": "4475",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "examples/rmgr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Prolog",
"bytes": "149462"
},
{
"name": "Python",
"bytes": "422141"
},
{
"name": "Shell",
"bytes": "493"
},
{
"name": "TeX",
"bytes": "7280"
}
],
"symlink_target": ""
} |
import six
from couchdbkit import ResourceNotFound
from dimagi.utils.couch.database import iter_docs
from .interface import DocumentStore
from pillowtop.dao.exceptions import DocumentMissingError, DocumentDeletedError, DocumentNotFoundError
ID_CHUNK_SIZE = 10000
class CouchDocumentStore(DocumentStore):
def __init__(self, couch_db, domain=None, doc_type=None):
self._couch_db = couch_db
self.domain = domain
self.doc_type = doc_type
def get_document(self, doc_id):
try:
return self._couch_db.get(doc_id)
except ResourceNotFound as e:
if six.text_type(e) == 'missing':
raise DocumentMissingError()
else:
raise DocumentDeletedError()
def iter_document_ids(self):
from corehq.apps.domain.dbaccessors import iterate_doc_ids_in_domain_by_type
if not (self.domain and self.doc_type):
raise ValueError('This function requires a domain and doc_type set!')
return iterate_doc_ids_in_domain_by_type(
self.domain,
self.doc_type,
chunk_size=ID_CHUNK_SIZE,
database=self._couch_db,
)
def iter_documents(self, ids):
return iter_docs(self._couch_db, ids, chunksize=500)
_DATE_MAP = {
'XFormInstance': 'received_on',
'CommCareCase': 'opened_on',
}
| {
"content_hash": "9cc2b215ef5a9193b11d795a60c75714",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 102,
"avg_line_length": 29.29787234042553,
"alnum_prop": 0.6405228758169934,
"repo_name": "dimagi/commcare-hq",
"id": "ba21e1746575fe12270c09a7df34a73376552eb2",
"size": "1377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/ex-submodules/pillowtop/dao/couch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from core.checks import * # NOQA
default_app_config = 'panel.apps.PanelConfig'
| {
"content_hash": "0f052da56d97547fb93b8d78b2fd5676",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 20.5,
"alnum_prop": 0.7317073170731707,
"repo_name": "ikcam/django-skeleton",
"id": "cda7d92fa600ccd8e9b81bec9dd8c3289f6085b3",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "panel/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "198458"
},
{
"name": "HTML",
"bytes": "75155"
},
{
"name": "JavaScript",
"bytes": "28974"
},
{
"name": "Python",
"bytes": "217638"
},
{
"name": "Shell",
"bytes": "1972"
}
],
"symlink_target": ""
} |
import pytest
from django.core.exceptions import ImproperlyConfigured
from modernrpc.views import RPCEntryPoint
def test_invalid_entry_point_no_handler(settings, rf):
settings.MODERNRPC_HANDLERS = []
entry_point = RPCEntryPoint.as_view()
exc_match = r"At least 1 handler must be instantiated"
with pytest.raises(ImproperlyConfigured, match=exc_match):
entry_point(rf.post("xxx"))
| {
"content_hash": "b2fb444c0702d63ca824fcc2bf1d8f86",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 31.384615384615383,
"alnum_prop": 0.75,
"repo_name": "alorence/django-modern-rpc",
"id": "e89678b847fb58f7ab8e8491842aeadede768751",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/test_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6088"
},
{
"name": "Python",
"bytes": "165018"
}
],
"symlink_target": ""
} |
from django.core import validators
from django.db import models
from rest_framework.serializers import WritableField
class MoneyFieldDRF(WritableField):
def __init__(self, max_value=9999999999, min_value=0, *args, **kwargs):
kwargs.setdefault("validators", [])
if max_value is not None:
kwargs["validators"].append(validators.MaxValueValidator(max_value))
if min_value is not None:
kwargs["validators"].append(validators.MinValueValidator(min_value))
super(MoneyFieldDRF, self).__init__(*args, **kwargs)
class MoneyField(models.BigIntegerField):
"""
Stores money to nearest penny as integer. e.g. £10.22 would be 1022
"""
def __init__(self, max_value=9999999999, min_value=0, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
# kwargs['coerce'] = kwargs.pop('coerce', int)
# kwargs['widget'] = forms.NumberInput
super(MoneyField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
| {
"content_hash": "ca2c979e1dd8e001c97af2b64c954d6b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 39.32258064516129,
"alnum_prop": 0.6628383921246924,
"repo_name": "ministryofjustice/cla_backend",
"id": "66f025de58eabe8116ed532a9784fcb9d5f1602e",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_backend/apps/legalaid/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45941"
},
{
"name": "Dockerfile",
"bytes": "1272"
},
{
"name": "HTML",
"bytes": "14794"
},
{
"name": "JavaScript",
"bytes": "2762"
},
{
"name": "Mustache",
"bytes": "3607"
},
{
"name": "Python",
"bytes": "1577558"
},
{
"name": "Shell",
"bytes": "11204"
},
{
"name": "Smarty",
"bytes": "283906"
}
],
"symlink_target": ""
} |
from sklearn_explain.tests.skl_datasets import skl_datasets_test as skltest
skltest.test_class_dataset_and_model("FourClass_100" , "LogisticRegression_4")
| {
"content_hash": "6da165c9c69eec69396376b56212a299",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 78,
"avg_line_length": 39.25,
"alnum_prop": 0.802547770700637,
"repo_name": "antoinecarme/sklearn_explain",
"id": "cb7a1341043b9c948d4231254ce63989dc8ce509",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/skl_datasets/FourClass_100/skl_dataset_FourClass_100_LogisticRegression_4_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "110343"
}
],
"symlink_target": ""
} |
import django.forms as forms
from string import Template
from django.utils.safestring import mark_safe
from django.conf import settings
import os
class ImageUploaderWidget(forms.TextInput):
def render(self, name, value, attrs=None):
tpl = Template(u'<img src="$url" />')
print(attrs)
url = "%s/%s" % (settings.MEDIA_URL, value)
return mark_safe(tpl.substitute(url=url)) | {
"content_hash": "5fea43053b798c50b69db49f58150ed7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 51,
"avg_line_length": 33.833333333333336,
"alnum_prop": 0.6945812807881774,
"repo_name": "Mercy-Nekesa/sokoapp",
"id": "afd92a7a579439f03ea9bd6ebc0a70820d8d818b",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sokoapp/gallery/widgets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11371"
},
{
"name": "JavaScript",
"bytes": "94912"
},
{
"name": "Python",
"bytes": "574289"
}
],
"symlink_target": ""
} |
"""Run inference on a single image with a MUSIQ checkpoint."""
import collections
import io
from typing import Dict, Sequence, Text, TypeVar
from absl import app
from absl import flags
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import tensorflow.compat.v1 as tf
import musiq.model.multiscale_transformer as model_mod
import musiq.model.preprocessing as pp_lib
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', '', 'Path to checkpoint.')
flags.DEFINE_string('image_path', '', 'Path to input image.')
flags.DEFINE_integer(
'num_classes', 1,
'Number of scores to predict. 10 for AVA and 1 for the other datasets.')
# Image preprocessing config.
_PP_CONFIG = {
'patch_size': 32,
'patch_stride': 32,
'hse_grid_size': 10,
# The longer-side length for the resized variants.
'longer_side_lengths': [224, 384],
# -1 means using all the patches from the full-size image.
'max_seq_len_from_original_res': -1,
}
# Model backbone config.
_MODEL_CONFIG = {
'hidden_size': 384,
'representation_size': None,
'resnet_emb': {
'num_layers': 5
},
'transformer': {
'attention_dropout_rate': 0,
'dropout_rate': 0,
'mlp_dim': 1152,
'num_heads': 6,
'num_layers': 14,
'num_scales': 3,
'spatial_pos_grid_size': 10,
'use_scale_emb': True,
'use_sinusoid_pos_emb': False,
}
}
T = TypeVar('T') # Declare type variable
def recover_tree(keys, values):
"""Recovers a tree as a nested dict from flat names and values.
This function is useful to analyze checkpoints that are saved by our programs
without need to access the exact source code of the experiment. In particular,
it can be used to extract an reuse various subtrees of the scheckpoint, e.g.
subtree of parameters.
Args:
keys: a list of keys, where '/' is used as separator between nodes.
values: a list of leaf values.
Returns:
A nested tree-like dict.
"""
tree = {}
sub_trees = collections.defaultdict(list)
for k, v in zip(keys, values):
if '/' not in k:
tree[k] = v
else:
k_left, k_right = k.split('/', 1)
sub_trees[k_left].append((k_right, v))
for k, kv_pairs in sub_trees.items():
k_subtree, v_subtree = zip(*kv_pairs)
tree[k] = recover_tree(k_subtree, v_subtree)
return tree
def prepare_image(image_path, pp_config):
"""Processes image to multi-scale representation.
Args:
image_path: input image path.
pp_config: image preprocessing config.
Returns:
An array representing image patches and input position annotations.
"""
with tf.compat.v1.gfile.FastGFile(image_path, 'rb') as f:
encoded_str = f.read()
data = dict(image=tf.constant(encoded_str))
pp_fn = pp_lib.get_preprocess_fn(**pp_config)
data = pp_fn(data)
image = data['image']
# Shape (1, length, dim)
image = tf.expand_dims(image, axis=0)
image = image.numpy()
return image
def run_model_single_image(model_config, num_classes, pp_config, params,
image_path):
"""Runs the model.
Args:
model_config: the parameters used in building the model backbone.
num_classes: number of outputs. 1 for single mos prediction.
pp_config: image preprocessing config.
params: model parameters loaded from checkpoint.
image_path: input image path.
Returns:
Model prediction for MOS score.
"""
image = prepare_image(image_path, pp_config)
model = model_mod.Model.partial(
num_classes=num_classes, train=False, **model_config)
logits = model.call(params, image)
preds = logits
if num_classes > 1:
preds = jax.nn.softmax(logits)
score_values = jnp.arange(1, num_classes + 1, dtype=np.float32)
preds = jnp.sum(preds * score_values, axis=-1)
return preds[0]
def get_params_and_config(ckpt_path):
"""Returns (model config, preprocessing config, model params from ckpt)."""
model_config = ml_collections.ConfigDict(_MODEL_CONFIG)
pp_config = ml_collections.ConfigDict(_PP_CONFIG)
with tf.compat.v1.gfile.FastGFile(ckpt_path, 'rb') as f:
data = f.read()
values = np.load(io.BytesIO(data))
params = recover_tree(*zip(*values.items()))
params = params['opt']['target']
if not model_config.representation_size:
params['pre_logits'] = {}
return model_config, pp_config, params
def main(_):
model_config, pp_config, params = get_params_and_config(FLAGS.ckpt_path)
pred_mos = run_model_single_image(model_config, FLAGS.num_classes, pp_config,
params, FLAGS.image_path)
print('============== Precited MOS:', pred_mos)
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "f51cbf1adf338a4a1498aed1cb7af76d",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 28.803680981595093,
"alnum_prop": 0.6602768903088392,
"repo_name": "google-research/google-research",
"id": "a6fdcbc5b4b1baa9f635e7f7c0ea90e53e2598be",
"size": "5303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "musiq/run_predict_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import sys
from pywrap.import_hook import CppFinder
from pywrap.testing import PREFIX
from pywrap.utils import remove_files
from nose.tools import assert_equal, assert_raises
def test_import_hook_missing_header():
del sys.meta_path[:]
sys.meta_path.append(CppFinder(import_path=PREFIX))
try:
assert_raises(ImportError, __import__, "missing")
finally:
del sys.meta_path[:]
def test_import_hook():
del sys.meta_path[:]
sys.meta_path.append(CppFinder(import_path=PREFIX))
try:
import doubleindoubleout
a = doubleindoubleout.A()
assert_equal(a.plus2(2.0), 4.0)
finally:
remove_files(["doubleindoubleout.so"])
del sys.meta_path[:]
| {
"content_hash": "b0bc19a46d6957cd4722ed7ae4d20c5e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 57,
"avg_line_length": 27.615384615384617,
"alnum_prop": 0.6727019498607242,
"repo_name": "AlexanderFabisch/cythonwrapper",
"id": "c86c1c8fa3c0438d8ffafc9c6e38eaefbbc023e2",
"size": "718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_import_hook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "10201"
},
{
"name": "Makefile",
"bytes": "126"
},
{
"name": "Python",
"bytes": "150538"
}
],
"symlink_target": ""
} |
"""Test the datalab interface functions.
Note that the calls to do analysis, training, and prediction are all mocked.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# Set up the path so that we can import local packages.
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../solutionbox/structured_data/'))) # noqa
from test_mltoolbox.test_package_functions import * # noqa
| {
"content_hash": "38bfd0c15bf0cbbdd125cd6479189c8a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 76,
"avg_line_length": 30.3125,
"alnum_prop": 0.7010309278350515,
"repo_name": "jdanbrown/pydatalab",
"id": "734a183ec9845f27bee7376f0ae6860d2310a2a3",
"size": "1073",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/mltoolbox_structured_data/dl_interface_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7596"
},
{
"name": "Python",
"bytes": "1686612"
},
{
"name": "Shell",
"bytes": "3366"
},
{
"name": "TypeScript",
"bytes": "105129"
}
],
"symlink_target": ""
} |
"""
====================================
Displaying key timeseries statistics
====================================
Visualizing characteristics of a time series is a key component to effective
forecasting. In this example, we'll look at a very simple method to examine
critical statistics of a time series object.
.. raw:: html
<br/>
"""
print(__doc__)
# Author: Taylor Smith <taylor.smith@alkaline-ml.com>
import pmdarima as pm
from pmdarima import datasets
from pmdarima import preprocessing
# We'll use the sunspots dataset for this example
y = datasets.load_sunspots(True)
print("Data shape: {}".format(y.shape[0]))
print("Data head:")
print(y.head())
# Let's look at the series, its ACF plot, and a histogram of its values
pm.tsdisplay(y, lag_max=90, title="Sunspots", show=True)
# Notice that the histogram is very skewed. This is a prime candidate for
# box-cox transformation
y_bc, _ = preprocessing.BoxCoxEndogTransformer(lmbda2=1e-6).fit_transform(y)
pm.tsdisplay(
y_bc, lag_max=90, title="Sunspots (BoxCox-transformed)", show=True)
print("""
As evidenced by the more normally distributed values in the transformed series,
using a Box-Cox transformation may prove useful prior to fitting your model.
""")
| {
"content_hash": "f018b56a4eeebbb8b04873f9fcc30482",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 30.024390243902438,
"alnum_prop": 0.7059301380991064,
"repo_name": "alkaline-ml/pmdarima",
"id": "a78b5fde2459e1c906c70b843501687b91af5c5e",
"size": "1231",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/utils/example_tsdisplay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "277"
},
{
"name": "Cython",
"bytes": "9659"
},
{
"name": "Jupyter Notebook",
"bytes": "19652"
},
{
"name": "Makefile",
"bytes": "2159"
},
{
"name": "Python",
"bytes": "554553"
},
{
"name": "Shell",
"bytes": "10301"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from custom_auth.models import CustomUser
admin.site.register(CustomUser, UserAdmin)
| {
"content_hash": "ab93661403ef6128c2bfec99695dcc70",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.834319526627219,
"repo_name": "Larhard/Elgassia",
"id": "4e42b2991fbb7a8345ff9228a783ece5a42b4935",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom_auth/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4460"
},
{
"name": "HTML",
"bytes": "22649"
},
{
"name": "JavaScript",
"bytes": "2983"
},
{
"name": "Python",
"bytes": "27470"
},
{
"name": "Shell",
"bytes": "954"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from textwrap import dedent
# Bokeh imports
from bokeh.models import Slider
# Module under test
from bokeh.models import FuncTickFormatter
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
pscript = pytest.importorskip("pscript")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_functickformatter_from_py_func_no_args():
def convert_to_minutes():
return tick * 60 # noqa
formatter = FuncTickFormatter.from_py_func(convert_to_minutes)
js_code = pscript.py2js(convert_to_minutes, 'formatter')
function_wrapper = formatter.code.replace(js_code, '')
assert function_wrapper == "return formatter();\n"
def test_functickformatter_from_py_func_with_args():
slider = Slider()
def convert_to_minutes(x=slider):
return tick * 60 # noqa
formatter = FuncTickFormatter.from_py_func(convert_to_minutes)
js_code = pscript.py2js(convert_to_minutes, 'formatter')
function_wrapper = formatter.code.replace(js_code, '')
assert function_wrapper == "return formatter(x);\n"
assert formatter.args['x'] is slider
def test_functickformatter_bad_pyfunc_formats():
def has_positional_arg(x):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_positional_arg)
def has_positional_arg_with_kwargs(y, x=5):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_positional_arg_with_kwargs)
def has_non_Model_keyword_argument(x=10):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_non_Model_keyword_argument)
def test_functickformatter_from_coffeescript_no_arg():
coffee_code = dedent("""
square = (x) -> x * x
return square(tick)
""")
formatter = FuncTickFormatter.from_coffeescript(code=coffee_code)
assert formatter.code == dedent("""\
var square;
square = function (x) {
return x * x;
};
return square(tick);
""")
assert formatter.args == {}
def test_functickformatter_from_coffeescript_with_args():
coffee_code = dedent("""
return slider.get("value") // 2 + tick
""")
slider = Slider()
formatter = FuncTickFormatter.from_coffeescript(code=coffee_code, args={"slider": slider})
assert formatter.code == dedent("""\
return Math.floor(slider.get("value") / 2) + tick;
""")
assert formatter.args == {"slider": slider}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "1a4babc2bcbb32d7c42e24de3472b6ca",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 94,
"avg_line_length": 31.758928571428573,
"alnum_prop": 0.4877705931965139,
"repo_name": "timsnyder/bokeh",
"id": "d1319e69595cce010c2b13471e18964a89794a4c",
"size": "4061",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/models/tests/test_formatters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
} |
from .version import __version__
from .sendgrid import SendGridClient
from .exceptions import SendGridError, SendGridClientError, SendGridServerError
#v2 API
from .message import Mail
#v3 API
from .client import SendGridAPIClient | {
"content_hash": "2b15c5c9c0ef924bc734421af967810c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 79,
"avg_line_length": 32.714285714285715,
"alnum_prop": 0.834061135371179,
"repo_name": "thinkingserious/sendgrid-python",
"id": "270c2895eb2cd459f71a459c5ec91125b1ad8a2f",
"size": "229",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sendgrid/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43112"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
"""
So, you have some structure from wild. Say this will be some JSON over API.
But you cant change this JSON structure.
"""
sample_data = {
'userNameFirst': 'Adam',
'userNameSecond': 'Smith',
'userPassword': 'supersecretpassword',
'userEmail': 'adam@smith.math.edu',
'userRoles': 'teacher, worker, admin',
}
"""
And you will not save this structure like this, actually you want something
like this
"""
import hashlib
desired_data = {
'name': 'Adam',
'second_name': 'Smith',
'password': hashlib.md5('supersecretpassword').hexdigest(),
'email': 'adam@smith.math.edu',
'roles': ['teacher', 'worker', 'admin'],
}
"""
Ok, so you need to convert it somehow. You will write it simple
"""
new_data = {
'name': sample_data['userNameFirst'],
'second_name': sample_data['userNameSecond'],
'password': hashlib.md5(sample_data['userPassword']).hexdigest(),
'email': sample_data['userEmail'],
'roles': [s.strip() for s in sample_data['userRoles'].split(',')]
}
assert new_data == desired_data, 'Uh oh'
"""
And then you will figure out that you can get much more fields
and decide to optimize your solution with DRY in mind
"""
FIELDS = {
'userNameFirst': 'name',
'userNameSecond': 'second_name',
'userEmail': 'email',
}
new_data = dict((n2, sample_data[n1]) for n1, n2 in FIELDS.items())
new_data['roles'] = [s.strip() for s in sample_data['userRoles'].split(',')]
new_data['password'] = hashlib.md5(sample_data['userPassword']).hexdigest()
assert new_data == desired_data, 'Uh oh'
"""
Not so bad, if you have many fields it will save your time. But now you get
new information - 'userEmail' is optional field.
And specification added field 'userTitle', that must be 'bachelor' if not provided'.
Lets solve it!
"""
desired_data['title'] = 'Bachelor' # Update our test to new reality
from collections import namedtuple
Field = namedtuple('Field', 'name optional default')
FIELDS = {
'userNameFirst': 'name',
'userNameSecond': 'second_name',
'userEmail': ('email', '__optional'),
'userTitle': ('title', 'Bachelor'),
}
new_data = {}
for old, new in FIELDS.items():
if isinstance(new, tuple):
new, default = new
if old not in sample_data:
if default == '__optional':
continue
new_data[new] = default
else:
new_data[new] = sample_data[old]
new_data['roles'] = [s.strip() for s in sample_data['userRoles'].split(',')]
new_data['password'] = hashlib.md5(sample_data['userPassword']).hexdigest()
assert new_data == desired_data, 'Uh oh'
"""
Hm, ok, so much code, uh oh. I think first variant were more straightforward.
"""
new_data = {
'name': sample_data['userNameFirst'],
'second_name': sample_data['userNameSecond'],
'password': hashlib.md5(sample_data['userPassword']).hexdigest(),
'roles': [s.strip() for s in sample_data['userRoles'].split(',')]
}
if 'userEmail' in sample_data:
new_data['email'] = sample_data['userEmail']
new_data['title'] = sample_data.get('userTitle', 'Bachelor')
assert new_data == desired_data, 'Uh oh'
"""
Good old code without complicate smell, good, yeah.
But what if you will have more fields? I mean much more, and what will you do?
Dont panic, I have answer, look below.
"""
import trafaret as t
hash_md5 = lambda d: hashlib.md5(d).hexdigest()
comma_to_list = lambda d: [s.strip() for s in d.split(',')]
converter = t.Dict({
t.Key('userNameFirst') >> 'name': t.String,
t.Key('userNameSecond') >> 'second_name': t.String,
t.Key('userPassword') >> 'password': hash_md5,
t.Key('userEmail', optional=True, to_name='email'): t.String,
t.Key('userTitle', default='Bachelor', to_name='title'): t.String,
t.Key('userRoles', to_name='roles'): comma_to_list,
})
assert converter.check(sample_data) == desired_data
| {
"content_hash": "5e79daa892eaf341edab2e134abb92df",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 84,
"avg_line_length": 31.1496062992126,
"alnum_prop": 0.6352376137512639,
"repo_name": "rrader/trafaret",
"id": "49f915e8528b24f1a483c24758efa170778c366f",
"size": "3956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/sample3.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "331"
},
{
"name": "JavaScript",
"bytes": "6563"
},
{
"name": "Python",
"bytes": "72922"
}
],
"symlink_target": ""
} |
import unittest
import pythran
import os.path
#pythran export a((float,(int,uintp),str list) list list)
#pythran export a(str)
#pythran export a( (str,str), int, intp list list)
#pythran export a( float set )
#pythran export a( bool:str dict )
#pythran export a( float )
#pythran export a( int8[] )
#pythran export a( int8[][] order (F))
#pythran export a( byte )
#pythran export a0( uint8 )
#pythran export a1( int16 )
#pythran export a2( uint16 )
#pythran export a3( int32 )
#pythran export a4( uint32 )
#pythran export a5( int64 )
#pythran export a6( uint64 )
#pythran export a7( float32 )
#pythran export a8( float64 )
#pythran export a9( complex64 )
#pythran export a10( complex128 )
#pythran export a( int8 set )
#pythran export b( int8 set? )
#pythran export a( uint8 list)
#pythran export a( int16 [], slice)
#pythran export a( uint16 [][] order(C))
#pythran export a( uint16 [::][])
#pythran export a( uint16 [:,:,:])
#pythran export a( uint16 [:,::,:])
#pythran export a( uint16 [,,,,])
#pythran export a( (int32, ( uint32 , int64 ) ) )
#pythran export a( uint64:float32 dict )
#pythran export a( float64, complex64, complex128 )
class TestSpecParser(unittest.TestCase):
def test_parser(self):
real_path = os.path.splitext(os.path.realpath(__file__))[0]+".py"
with open(real_path) as fd:
print(pythran.spec_parser(fd.read()))
def test_invalid_specs0(self):
code = '#pythran export foo()\ndef foo(n): return n'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs1(self):
code = '#pythran export boo(int)\ndef boo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs2(self):
code = '#pythran export bar(int)\ndef foo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs3(self):
code = '#pythran export bar(int, int?, int)\ndef bar(x, y=1, z=1): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_multiline_spec0(self):
code = '''
#pythran export foo(
# )
def foo(): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec1(self):
code = '''
#pythran export foo(int
#, int
# )
def foo(i,j): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec2(self):
code = '''
# pythran export foo(int,
# float
#, int
# )
def foo(i,j,k): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec0(self):
code = '''
# pythran export soo(int) this is an int test
def soo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec1(self):
code = '''
# pythran export poo(int)
#this is a pythran export test
def poo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec0(self):
code = '''
def too(i): return
# pythran export too(int)
#this is a pythran export test
def bar(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec1(self):
code = '''
def zoo(i): return
#this is a pythran export test
# pythran export zoo(int)
#this is an export test
# pythran export zoo(str)
def bar(i): return
'''
self.assertEquals(len(pythran.spec_parser(code).functions), 1)
self.assertEquals(len(pythran.spec_parser(code).functions['zoo']), 2)
def test_var_export0(self):
code = '''
# pythran export coo
coo = 1
'''
self.assertTrue(pythran.spec_parser(code))
| {
"content_hash": "22bb86121c5f4b66c9956dbed6a00853",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 80,
"avg_line_length": 29.585185185185185,
"alnum_prop": 0.6236855282924386,
"repo_name": "pombredanne/pythran",
"id": "a8f5d1150f13a2e3a2a0c3527df0e8f3697eb620",
"size": "3994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythran/tests/test_spec_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1366767"
},
{
"name": "Makefile",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "1209572"
},
{
"name": "Shell",
"bytes": "264"
}
],
"symlink_target": ""
} |
import datetime
import re
import uuid
from functools import lru_cache
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import strip_quotes, truncate_name
from django.db.utils import DatabaseError
from django.utils import timezone
from django.utils.encoding import force_bytes, force_str
from django.utils.functional import cached_property
from .base import Database
from .utils import BulkInsertMapper, InsertIdVar, Oracle_datetime
class DatabaseOperations(BaseDatabaseOperations):
# Oracle uses NUMBER(11) and NUMBER(19) for integer fields.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
}
set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'}
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
seq_name user_tab_identity_cols.sequence_name%%TYPE;
BEGIN
BEGIN
SELECT sequence_name INTO seq_name FROM user_tab_identity_cols
WHERE table_name = '%(table_name)s' AND
column_name = '%(column_name)s';
EXCEPTION WHEN NO_DATA_FOUND THEN
seq_name := '%(no_autofield_sequence_name)s';
END;
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = seq_name;
WHILE table_value > seq_value LOOP
EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL'
INTO seq_value;
END LOOP;
END;
/"""
# Oracle doesn't support string without precision; use the max string size.
cast_char_field_without_max_length = 'NVARCHAR2(2000)'
cast_data_types = {
'AutoField': 'NUMBER(11)',
'BigAutoField': 'NUMBER(19)',
'TextField': cast_char_field_without_max_length,
}
def cache_key_culling_sql(self):
return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY'
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
elif lookup_type == 'week':
# IW = ISO week number
return "TO_CHAR(%s, 'IW')" % field_name
elif lookup_type == 'quarter':
return "TO_CHAR(%s, 'Q')" % field_name
elif lookup_type == 'iso_year':
return "TO_CHAR(%s, 'IYYY')" % field_name
else:
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
return "TRUNC(%s, 'Q')" % field_name
elif lookup_type == 'week':
return "TRUNC(%s, 'IW')" % field_name
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not settings.USE_TZ:
return field_name
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from connection timezone to the local time, returning
# TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the
# TIME ZONE details.
if self.connection.timezone_name != tzname:
return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % (
field_name,
self.connection.timezone_name,
tzname,
)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return 'TRUNC(%s)' % field_name
def datetime_cast_time_sql(self, field_name, tzname):
# Since `TimeField` values are stored as TIMESTAMP where only the date
# part is ignored, convert the field to the specified timezone.
return self._convert_field_to_tz(field_name, tzname)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
sql = "TRUNC(%s, 'Q')" % field_name
elif lookup_type == 'week':
sql = "TRUNC(%s, 'IW')" % field_name
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def time_trunc_sql(self, lookup_type, field_name):
# The implementation is similar to `datetime_trunc_sql` as both
# `DateTimeField` and `TimeField` are stored as TIMESTAMP where
# the date part of the later is ignored.
if lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
elif lookup_type == 'second':
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
# Oracle stores empty strings as null. If the field accepts the empty
# string, undo this to adhere to the Django convention of using
# the empty string instead of null.
if expression.field.empty_strings_allowed:
converters.append(
self.convert_empty_bytes
if internal_type == 'BinaryField' else
self.convert_empty_string
)
return converters
def convert_textfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = value.read()
return value
def convert_binaryfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
@staticmethod
def convert_empty_string(value, expression, connection):
return '' if value is None else value
@staticmethod
def convert_empty_bytes(value, expression, connection):
return b'' if value is None else value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_id(self, cursor):
value = cursor._insert_id_var.getvalue()
if value is None or value == []:
# cx_Oracle < 6.3 returns None, >= 6.3 returns empty list.
raise DatabaseError(
'The database did not return a new row id. Probably "ORA-1403: '
'no data found" was raised internally but was hidden by the '
'Oracle OCI library (see https://code.djangoproject.com/ticket/28859).'
)
# cx_Oracle < 7 returns value, >= 7 returns list with single value.
return value[0] if isinstance(value, list) else value
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def no_limit_value(self):
return None
def limit_offset_sql(self, low_mark, high_mark):
fetch, offset = self._get_limit_offset_params(low_mark, high_mark)
return ' '.join(sql for sql in (
('OFFSET %d ROWS' % offset) if offset else None,
('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None,
) if sql)
def last_executed_query(self, cursor, sql, params):
# https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
# Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's
# `statement` doesn't contain the query parameters. Substitute
# parameters manually.
if isinstance(params, (tuple, list)):
for i, param in enumerate(params):
statement = statement.replace(':arg%d' % i, force_str(param, errors='replace'))
elif isinstance(params, dict):
for key, param in params.items():
statement = statement.replace(':%s' % key, force_str(param, errors='replace'))
return statement
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name)
cursor.execute('"%s".currval' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def pk_default_value(self):
return "NULL"
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return value.read()
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name.upper(), self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def __foreign_key_constraints(self, table_name, recursive):
with self.connection.cursor() as cursor:
if recursive:
cursor.execute("""
SELECT
user_tables.table_name, rcons.constraint_name
FROM
user_tables
JOIN
user_constraints cons
ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U'))
LEFT JOIN
user_constraints rcons
ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R')
START WITH user_tables.table_name = UPPER(%s)
CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name
GROUP BY
user_tables.table_name, rcons.constraint_name
HAVING user_tables.table_name != UPPER(%s)
ORDER BY MAX(level) DESC
""", (table_name, table_name))
else:
cursor.execute("""
SELECT
cons.table_name, cons.constraint_name
FROM
user_constraints cons
WHERE
cons.constraint_type = 'R'
AND cons.table_name = UPPER(%s)
""", (table_name,))
return cursor.fetchall()
@cached_property
def _foreign_key_constraints(self):
# 512 is large enough to fit the ~330 tables (as of this writing) in
# Django's test suite.
return lru_cache(maxsize=512)(self.__foreign_key_constraints)
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
truncated_tables = {table.upper() for table in tables}
constraints = set()
# Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE
# foreign keys which Django doesn't define. Emulate the
# PostgreSQL behavior which truncates all dependent tables by
# manually retrieving all foreign key constraints and resolving
# dependencies.
for table in tables:
for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade):
if allow_cascade:
truncated_tables.add(foreign_table)
constraints.add((foreign_table, constraint))
sql = [
"%s %s %s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('DISABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
style.SQL_KEYWORD('KEEP'),
style.SQL_KEYWORD('INDEX'),
) for table, constraint in constraints
] + [
"%s %s %s;" % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
) for table in truncated_tables
] + [
"%s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('ENABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
) for table, constraint in constraints
]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table'])
table = self.quote_name(sequence_info['table'])
column = self.quote_name(sequence_info['column'] or 'id')
query = self._sequence_reset_sql % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table)
table = self.quote_name(model._meta.db_table)
column = self.quote_name(f.column)
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.remote_field.through:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table())
table = self.quote_name(f.m2m_db_table())
column = self.quote_name('id')
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': 'ID',
})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
if isinstance(value, str):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def combine_expression(self, connector, sub_expressions):
lhs, rhs = sub_expressions
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs}
elif connector == '<<':
return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '>>':
return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def _get_no_autofield_sequence_name(self, table):
"""
Manually created sequence name to keep backward compatibility for
AutoFields that aren't Oracle identity columns.
"""
name_length = self.max_name_length() - 3
return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper()
def _get_sequence_name(self, cursor, table, pk_name):
cursor.execute("""
SELECT sequence_name
FROM user_tab_identity_cols
WHERE table_name = UPPER(%s)
AND column_name = UPPER(%s)""", [table, pk_name])
row = cursor.fetchone()
return self._get_no_autofield_sequence_name(table) if row is None else row[0]
def bulk_insert_sql(self, fields, placeholder_rows):
query = []
for row in placeholder_rows:
select = []
for i, placeholder in enumerate(row):
# A model without any fields has fields=[None].
if fields[i]:
internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type()
placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder
# Add columns aliases to the first select to avoid "ORA-00918:
# column ambiguously defined" when two or more columns in the
# first select have the same value.
if not query:
placeholder = '%s col_%s' % (placeholder, i)
select.append(placeholder)
query.append('SELECT %s FROM DUAL' % ', '.join(select))
# Bulk insert to tables with Oracle identity columns causes Oracle to
# add sequence.nextval to it. Sequence.nextval cannot be used with the
# UNION operator. To prevent incorrect SQL, move UNION to a subquery.
return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query)
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super().subtract_temporals(internal_type, lhs, rhs)
def bulk_batch_size(self, fields, objs):
"""Oracle restricts the number of parameters in a query."""
if fields:
return self.connection.features.max_query_params // len(fields)
return len(objs)
| {
"content_hash": "2e212befdada77bc4ec89bb37e84dd12",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 114,
"avg_line_length": 43.17617449664429,
"alnum_prop": 0.582015311079159,
"repo_name": "fenginx/django",
"id": "77d330c4111f6e60dbdf764052c582f5d28ba652",
"size": "25733",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "django/db/backends/oracle/operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48399"
},
{
"name": "HTML",
"bytes": "175296"
},
{
"name": "JavaScript",
"bytes": "238848"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11137863"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from make_apex_cubes import all_lines
from spectral_cube import SpectralCube,BooleanArrayMask
import pyspeckit
from astropy.io import fits
from numpy.lib.stride_tricks import as_strided
from paths import mpath
import numpy as np
import time
from astropy import log
from astropy import constants
from astropy import units as u
cube_merge_high = SpectralCube.read(mpath('APEX_H2CO_merge_high_plait_all.fits'))
noise = fits.getdata(mpath('APEX_H2CO_merge_high_plait_all_noise.fits'))
nhits = fits.getdata(mpath('APEX_H2CO_merge_high_nhits.fits'))
noise[nhits<5] = np.nan
noise_cube = as_strided(noise, shape=cube_merge_high.shape,
strides=(0,)+noise.strides)
noise_spcube = SpectralCube(data=noise_cube, wcs=cube_merge_high.wcs)
cube_merge_high_sm = SpectralCube.read(mpath('APEX_H2CO_merge_high_plait_all_smooth.fits'))
noise_sm = fits.getdata(mpath('APEX_H2CO_merge_high_plait_all_smooth_noise.fits'))
noise_cube_sm = as_strided(noise_sm, shape=cube_merge_high_sm.shape,
strides=(0,)+noise_sm.strides)
noise_spcube_sm = SpectralCube(data=noise_cube_sm, wcs=cube_merge_high_sm.wcs)
# Create a cutout of the cube covering the H2CO lines
# it's barely worth it; cuts off 10% of pixels
f1 = all_lines['H2CO_303_202']*u.GHz
f2 = all_lines['H2CO_321_220']*u.GHz
h2co_cube_merge_high = cube_merge_high.spectral_slab(f1*(1-(150*u.km/u.s/constants.c)),
f2*(1+(100*u.km/u.s/constants.c)))
h2co_noise_cube = noise_spcube.spectral_slab(f1*(1-(150*u.km/u.s/constants.c)),
f2*(1+(100*u.km/u.s/constants.c)))
h2co_cube_merge_high_sm = cube_merge_high_sm.spectral_slab(f1*(1-(150*u.km/u.s/constants.c)),
f2*(1+(100*u.km/u.s/constants.c)))
h2co_noise_cube_sm = noise_spcube_sm.spectral_slab(f1*(1-(150*u.km/u.s/constants.c)),
f2*(1+(100*u.km/u.s/constants.c)))
# Pyspeckit cube made from spectralcube
pcube_merge_high = pyspeckit.Cube(cube=h2co_cube_merge_high._data,
errorcube=h2co_noise_cube._data,
header=h2co_cube_merge_high.header,
xarr=h2co_cube_merge_high.spectral_axis,
)
pcube_merge_high.xarr.refX = 218.22219
pcube_merge_high.xarr.refX_units = 'GHz'
pcube_merge_high_sm = pyspeckit.Cube(cube=h2co_cube_merge_high_sm._data,
errorcube=h2co_noise_cube_sm._data,
header=h2co_cube_merge_high_sm.header,
xarr=h2co_cube_merge_high_sm.spectral_axis,
)
pcube_merge_high_sm.xarr.refX = 218.22219
pcube_merge_high_sm.xarr.refX_units = 'GHz'
| {
"content_hash": "8eeea2fc88c24a86fe84043b07047c33",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 93,
"avg_line_length": 50.07017543859649,
"alnum_prop": 0.6121233356692362,
"repo_name": "adamginsburg/APEX_CMZ_H2CO",
"id": "480d50c259cb889f9e4a1616347fcc28ee651e6a",
"size": "2854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/full_cubes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "840749"
},
{
"name": "Shell",
"bytes": "3036"
},
{
"name": "TeX",
"bytes": "133946"
}
],
"symlink_target": ""
} |
from builtins import object
import json
import os
from PyAnalysisTools.base import _logger
from PyAnalysisTools.base.Singleton import Singleton
from PyAnalysisTools.base.ShellUtils import copy
from future.utils import with_metaclass
class JSONHandle(with_metaclass(Singleton, object)):
def __init__(self, *args, **kwargs):
kwargs.setdefault('copy', False)
self.data = {}
self.file_name = os.path.join(args[0], "config.json")
self.copy = kwargs['copy']
self.input_file = None
def add_args(self, **kwargs):
for val, arg in list(kwargs.items()):
self.data[val] = arg
def reset_path(self, path):
self.file_name = os.path.join(path, "config.json")
def dump(self):
if self.copy:
if self.input_file is None:
_logger.warning('Try copying json file, but no input file provided')
return
copy(self.input_file, self.file_name)
return
with open(self.file_name, 'w') as outfile:
json.dump(self.data, outfile)
def load(self):
self.copy = True
self.input_file = self.file_name
with open(self.file_name, 'r') as input_file:
return json.load(input_file)
| {
"content_hash": "6d758e4da67432ccf3b60c5a45148e74",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 84,
"avg_line_length": 32.35897435897436,
"alnum_prop": 0.6164817749603804,
"repo_name": "morgenst/PyAnalysisTools",
"id": "1c7fe854fe2e4b2bf071972c7ed2789bc58423e6",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyAnalysisTools/base/JSONHandle.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2887"
},
{
"name": "Dockerfile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "1156688"
},
{
"name": "Shell",
"bytes": "2314"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals, absolute_import
import os
import os.path
import shutil
import errno
import logging
logger = logging.getLogger(__name__)
COLOR_CODES = {
"reset": "\033[0m",
"black": "\033[1;30m",
"red": "\033[1;31m",
"green": "\033[1;32m",
"yellow": "\033[1;33m",
"blue": "\033[1;34m",
"magenta": "\033[1;35m",
"cyan": "\033[1;36m",
"white": "\033[1;37m",
"bgred": "\033[1;41m",
"bggrey": "\033[1;100m",
}
def color_msg(color, msg):
return COLOR_CODES[color] + msg + COLOR_CODES["reset"]
def check_extension(filename):
"""Check if the file extension is in the allowed extensions
The `fnmatch` module can also get the suffix:
patterns = ["*.md", "*.mkd", "*.markdown"]
fnmatch.filter(files, pattern)
"""
allowed_extensions = (".md", ".mkd", ".mdown", ".markdown")
return os.path.splitext(filename)[1] in allowed_extensions
def copytree(src, dst, symlinks=False, ignore=None):
"""Copy from source directory to destination"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def emptytree(directory, exclude_list=None):
"""Delete all the files and dirs under specified directory"""
if not isinstance(directory, unicode):
directory = unicode(directory, 'utf-8')
if not exclude_list:
exclude_list = []
for p in os.listdir(directory):
if p in exclude_list:
continue
fp = os.path.join(directory, p)
if os.path.isdir(fp):
try:
shutil.rmtree(fp)
logger.debug("Delete directory %s", fp)
except OSError as e:
logger.error("Unable to delete directory %s: %s",
fp, unicode(e))
elif os.path.isfile(fp):
try:
logging.debug("Delete file %s", fp)
os.remove(fp)
except OSError as e:
logger.error("Unable to delete file %s: %s", fp, unicode(e))
else:
logger.error("Unable to delete %s, unknown filetype", fp)
def mkdir_p(path):
"""Make parent directories as needed, like `mkdir -p`"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def listdir_nohidden(path):
"""List not hidden files or directories under path"""
for f in os.listdir(path):
if isinstance(f, str):
f = unicode(f, "utf-8")
if not f.startswith('.'):
yield f
if __name__ == "__main__":
print(color_msg("black", "Black"))
print(color_msg("red", "Red"))
print(color_msg("green", "Green"))
print(color_msg("yellow", "Yellow"))
print(color_msg("blue", "Blue"))
print(color_msg("magenta", "Magenta"))
print(color_msg("cyan", "Cyan"))
print(color_msg("white", "White"))
print(color_msg("bgred", "Background Red"))
print(color_msg("bggrey", "Background Grey"))
| {
"content_hash": "35b0ec0697f5521d74d8501d09e4fef0",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 76,
"avg_line_length": 28.780701754385966,
"alnum_prop": 0.5650716245047241,
"repo_name": "zhaochunqi/simiki",
"id": "fd34cdb6607dcac76834e3e8666cf0185293fc50",
"size": "3327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simiki/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39753"
},
{
"name": "HTML",
"bytes": "9694"
},
{
"name": "Makefile",
"bytes": "493"
},
{
"name": "Python",
"bytes": "53815"
},
{
"name": "Shell",
"bytes": "598"
}
],
"symlink_target": ""
} |
from oslo_policy import policy as common_policy
from keystone.common import policies
import keystone.conf
from keystone import exception
CONF = keystone.conf.CONF
_ENFORCER = None
def reset():
global _ENFORCER
_ENFORCER = None
def init():
global _ENFORCER
if not _ENFORCER:
_ENFORCER = common_policy.Enforcer(CONF)
register_rules(_ENFORCER)
def enforce(credentials, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
:param credentials: user credentials
:param action: string representing the action to be checked, which should
be colon separated for clarity.
:param target: dictionary representing the object of the action for object
creation this should be a dictionary representing the
location of the object e.g. {'project_id':
object.project_id}
:raises keystone.exception.Forbidden: If verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
init()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, action=action,
do_raise=do_raise)
return _ENFORCER.enforce(action, target, credentials, **extra)
def register_rules(enforcer):
enforcer.register_defaults(policies.list_rules())
| {
"content_hash": "43f162ab9e5211448a4fd430f2b644b5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 26.581818181818182,
"alnum_prop": 0.6757865937072504,
"repo_name": "rajalokan/keystone",
"id": "4ec0a0f996bae2f7c95d07c52d779b68424ea20c",
"size": "2008",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/common/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3865941"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
} |
"""Downloads and processes the Brown Corpus (http://www.nltk.org/nltk_data)."""
import os
import random
import string
from xml.etree import ElementTree
import lingvo.compat as tf
tf.flags.DEFINE_string("outdir", "/tmp/punctuator_data",
"The output directory.")
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
basename = "brown_tei"
fname = basename + ".zip"
url = ("https://raw.githubusercontent.com/nltk/nltk_data/" +
"gh-pages/packages/corpora/" + fname)
sha256 = "335bec1ea6362751d5d5c46970137ebb01c80bf7d7d75558787729d275e0a687"
tf.keras.utils.get_file(
fname, url, file_hash=sha256, cache_subdir=FLAGS.outdir, extract=True)
tf.logging.info("\nDownload completed. Preprocessing...")
with open(os.path.join(FLAGS.outdir, basename, "Corpus.xml"), "r") as xml:
root = ElementTree.fromstring(xml.read().replace(
'xmlns="http://www.tei-c.org/ns/1.0"', ""))
sentences = []
for sentence in root.findall("./TEI/text/body/p/s"):
# Example input sentence:
# <s n="1"><w type="AT">The</w> <w subtype="TL" type="NP">Fulton</w>
# <w subtype="TL" type="NN">County</w> <w subtype="TL" type="JJ">Grand</w>
# <w subtype="TL" type="NN">Jury</w> <w type="VBD">said</w>
# <w type="NR">Friday</w> <w type="AT">an</w> <w type="NN">investigation</w>
# <w type="IN">of</w> <w type="NPg">Atlanta's</w> <w type="JJ">recent</w>
# <w type="NN">primary</w> <w type="NN">election</w>
# <w type="VBD">produced</w> <c type="pct">``</c> <w type="AT">no</w>
# <w type="NN">evidence</w> <c type="pct">''</c> <w type="CS">that</w>
# <w type="DTI">any</w> <w type="NNS">irregularities</w>
# <w type="VBD">took</w> <w type="NN">place</w> <c type="pct">.</c> </s>
# Example output text:
# The Fulton County Grand Jury said Friday an investigation of Atlanta's
# recent primary election produced "no evidence" that any irregularities
# took place.
text = ""
prepend_space = False
for child in sentence:
if child.tag == "w":
if prepend_space:
text += " "
text += child.text
prepend_space = True
elif child.tag == "c":
if child.text == "``":
if prepend_space:
text += " "
text += '"'
prepend_space = False
elif child.text == "''":
text += '"'
prepend_space = True
elif child.text == "'":
if prepend_space:
text += " '"
prepend_space = False
else:
text += "'"
prepend_space = True
elif child.text == "(" or child.text == "[":
if prepend_space:
text += " "
text += child.text
prepend_space = False
elif child.text == "-" or child.text == "--":
if prepend_space:
text += " "
text += child.text
prepend_space = True
else:
text += child.text
prepend_space = True
text = text.replace("!!", "!").replace("??", "?").replace("--", "-")
text = text.replace("**", "*").replace(";;", ";").replace("::", ":")
text = text.replace(",,", ",")
# Filter out bad sentences.
if not set(text) & set(string.ascii_letters):
# No letters.
continue
if text.count('"') % 2 != 0:
# Uneven number of quotes.
continue
if text.count("(") != text.count(")") or text.count("[") != text.count("]"):
# Unbalanced parenthesis.
continue
if (text[0] == '"' and text[-1] == '"' or
text[0] == "(" and text[-1] == ")" or
text[0] == "[" and text[-1] == "]"):
text = text[1:-1]
if text[0] not in string.ascii_letters and text[0] not in string.digits:
# Doesn't start with a letter or number.
continue
text = text[:1].upper() + text[1:]
sentences.append(text)
sentences = sorted(set(sentences))
random.seed(1234)
random.shuffle(sentences)
with open(os.path.join(FLAGS.outdir, "train.txt"), "w") as f:
for line in sentences[:int(len(sentences) * 0.95)]:
f.write("%s\n" % line)
with open(os.path.join(FLAGS.outdir, "test.txt"), "w") as f:
for line in sentences[int(len(sentences) * 0.95):]:
f.write("%s\n" % line)
tf.logging.info("All done.")
if __name__ == "__main__":
tf.app.run(main)
| {
"content_hash": "717bcfccbeff38638299c1a9c51ad17c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 80,
"avg_line_length": 34.936,
"alnum_prop": 0.552553240210671,
"repo_name": "tensorflow/lingvo",
"id": "1a7f47c053c76af949c659b5b07d4de261f7f13c",
"size": "5056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/tasks/punctuator/tools/download_brown_corpus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
} |
"""
<description>
"""
import pytest as pt
import teetool as tt
# @pt.mark.xfail(reason="out of the blue stopped working on Travis. Mayavi/VTK *sigh*")
def test_visual_3d():
"""
can produce figures
"""
# build world
world_1 = tt.World(name="Example 3D", ndim=3, resolution=[10, 10, 10])
# extreme reduced resolution
# world_1.setResolution(xstep=3, ystep=3, zstep=3)
# add trajectories
for mtype in [0, 1]:
correct_cluster_name = "toy {0}".format(mtype)
correct_cluster_data = tt.helpers.get_trajectories(mtype,
ndim=3, ntraj=20)
world_1.addCluster(correct_cluster_data, correct_cluster_name)
list_icluster = [0, 1]
# model all trajectories
settings = {}
settings["model_type"] = "resampling"
settings["ngaus"] = 10
world_1.buildModel(settings)
# this part is Python 2.7 [ONLY] due to Mayavi / VTK dependencies
# visuals by mayavi
visual = tt.visual_3d.Visual_3d(world_1)
# visualise trajectories
visual.plotTrajectories(list_icluster, ntraj=3)
visual.plotSamples(list_icluster, ntraj=3)
# visualise points
# visualise trajectories
visual.plotTrajectoriesPoints(x1=0.0,list_icluster=None, ntraj=3)
# visualise intersection
visual.plotLogLikelihood()
#
visual.plotComplexityMap()
# visualise tube
visual.plotTube()
# visualise difference
visual.plotTubeDifference([0, 1])
# visualise outline
visual.plotOutline()
# close
visual.close()
| {
"content_hash": "cebbece9a2f5ac00a3f928e9e20caf6d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 88,
"avg_line_length": 27.155172413793103,
"alnum_prop": 0.6374603174603175,
"repo_name": "WillemEerland/teetool",
"id": "2f2401b8ea2c86835752ca86d47818e880d9ccc5",
"size": "1575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_visual_3d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1822884"
},
{
"name": "Makefile",
"bytes": "191"
},
{
"name": "Python",
"bytes": "140561"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0007_auto_20151025_0021'),
]
operations = [
migrations.AddField(
model_name='staticpage',
name='body_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u0441\u0442\u043e\u0440\u0456\u043d\u043a\u0438'),
),
migrations.AddField(
model_name='staticpage',
name='title_en',
field=models.CharField(default='', max_length=255),
),
]
| {
"content_hash": "0b7ac9c6d57c0a521f642baa184e1cb6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 172,
"avg_line_length": 29.541666666666668,
"alnum_prop": 0.6191819464033851,
"repo_name": "dchaplinsky/pep.org.ua",
"id": "297102a89edd19e8911f8e5b1a7b9392b7b4c005",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pepdb/cms_pages/migrations/0008_auto_20151025_0051.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "311924"
},
{
"name": "HTML",
"bytes": "387137"
},
{
"name": "JavaScript",
"bytes": "601123"
},
{
"name": "Python",
"bytes": "1272457"
}
],
"symlink_target": ""
} |
"""Graphviz visualizations of Program Graphs."""
from absl import logging # pylint: disable=unused-import
import pygraphviz
from python_graphs import program_graph_dataclasses as pb
import six
def to_graphviz(graph):
"""Creates a graphviz representation of a ProgramGraph.
Args:
graph: A ProgramGraph object to visualize.
Returns:
A pygraphviz object representing the ProgramGraph.
"""
g = pygraphviz.AGraph(strict=False, directed=True)
for unused_key, node in graph.nodes.items():
node_attrs = {}
if node.ast_type:
node_attrs['label'] = six.ensure_str(node.ast_type, 'utf-8')
else:
node_attrs['shape'] = 'point'
node_type_colors = {
}
if node.node_type in node_type_colors:
node_attrs['color'] = node_type_colors[node.node_type]
node_attrs['colorscheme'] = 'svg'
g.add_node(node.id, **node_attrs)
for edge in graph.edges:
edge_attrs = {}
edge_attrs['label'] = edge.type.name
edge_colors = {
pb.EdgeType.LAST_READ: 'red',
pb.EdgeType.LAST_WRITE: 'red',
}
if edge.type in edge_colors:
edge_attrs['color'] = edge_colors[edge.type]
edge_attrs['colorscheme'] = 'svg'
g.add_edge(edge.id1, edge.id2, **edge_attrs)
return g
def render(graph, path='/tmp/graph.png'):
g = to_graphviz(graph)
g.draw(path, prog='dot')
| {
"content_hash": "b8847023e711000f07159625ff79a1d4",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 66,
"avg_line_length": 28.659574468085108,
"alnum_prop": 0.6547884187082406,
"repo_name": "google-research/python-graphs",
"id": "046bf176ac227ba73753a40308d8214afbf9e3e1",
"size": "1923",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python_graphs/program_graph_graphviz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "208298"
}
],
"symlink_target": ""
} |
import sys
import os
import argparse
from ete2 import Tree
try:
print "importing image"
import Image
import ImageFont
import ImageDraw
import ImageMath
except ImportError:
print "importing image from pil"
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from PIL import ImageMath
import math
import tempfile
#ls trees/*.tree | xargs -I{} -P 20 bash -c 'echo {}; ./newick_to_png.py --input {} --inlist pimp_problems.lst; ./newick_to_png.py {} cherry.lst;'
print_ascii = False
transparent_color = (255, 255, 255)
transparent_thresh = 5
frame_prop = 0.05
TMP_DIR = '/var/run/shm'
#http://stackoverflow.com/questions/765736/using-pil-to-make-all-white-pixels-transparent
def distance2(a, b):
return (a[0] - b[0]) * (a[0] - b[0]) + (a[1] - b[1]) * (a[1] - b[1]) + (a[2] - b[2]) * (a[2] - b[2])
def makeColorTransparent(image, color, thresh2=0):
image = image.convert("RGBA")
red, green, blue, alpha = image.split()
image.putalpha(ImageMath.eval("""convert(((((t - d(c, (r, g, b))) >> 31) + 1) ^ 1) * a, 'L')""",
t=thresh2, d=distance2, c=color, r=red, g=green, b=blue, a=alpha))
return image
def main():
parser = argparse.ArgumentParser(description='Convert Newick file to PNG.')
parser.add_argument('--infile' , dest='infile' , default=None , action='store' , nargs='?', required=True, type=str , help='Input Newick file' )
parser.add_argument('--inlist' , dest='inlist' , default=None , action='store' , nargs='?', type=str , help='Input rename list' )
parser.add_argument('--caption' , dest='caption' , default=None , action='store' , nargs='?', type=str , help='Image caption' )
parser.add_argument('--prefix' , dest='prefix' , default=None , action='store' , nargs='?', type=str , help='File prefix' )
parser.add_argument('--output' , dest='output' , default=None , action='store' , nargs='?', type=str , help='Output name' )
parser.add_argument('--extension' , dest='extension' , default="png" , action='store' , nargs='?', type=str , help='Image extension' )
parser.add_argument('--dpi' , dest='dpi' , default=1200 , action='store' , nargs='?', type=int , help='Image DPI' )
parser.add_argument('--fontsize' , dest='fontsize' , default=14 , action='store' , nargs='?', type=int , help='Font size' )
parser.add_argument('--no_ladderize' , dest='ladderize' , action='store_false', help="Don't ladderize image" )
parser.add_argument('--no_addcaption', dest='addcaption' , action='store_false', help='Do not add caption to image')
parser.add_argument('--show_distance', dest='show_distance', action='store_true' , help='Plot with distance')
options = parser.parse_args()
print options
if options.infile is None:
print "No input file given"
parser.print_help()
sys.exit(1)
run(options.infile,
inlist = options.inlist ,
capt = options.caption ,
ofp = options.prefix ,
output = options.output ,
ladderize = options.ladderize ,
addcaption = options.addcaption ,
extension = options.extension ,
dpi = options.dpi ,
show_distance = options.show_distance,
fontsize = options.fontsize)
def run(infile, inlist=None, capt=None, ofp=None, output=None, ladderize=True, addcaption=True, extension="png", dpi=1200, fontsize=14, show_distance=False):
add_file(infile, inlist=inlist, capt=capt, ofp=ofp, output=output, ladderize=ladderize, addcaption=addcaption, extension=extension, dpi=dpi, fontsize=fontsize, show_distance=show_distance)
def add_file(infile, inlist=None, capt=None, ofp=None, output=None, ladderize=True, addcaption=True, extension="png", dpi=1200, fontsize=14, show_distance=False):
if not os.path.exists( infile ):
print "input file %s does not exists" % infile
sys.exit( 1 )
print "reading input file %s" % infile
caption = infile
caption = caption.replace("/", "_")
if capt:
caption = capt
outfile = infile + "." + extension
if ofp:
outfile = ofp + "." + extension
if show_distance:
tree = Tree(infile, format=0)
else:
#tree = Tree(infile, format=2)
#tree = Tree(infile, format=5)
tree = Tree(infile, format=9)
#tree = Tree(open(infile, 'r').read())
#root = tree.get_tree_root()
#print tree.children
#print tree.get_children()
#print root.get_children()
if inlist is not None:
prune(inlist, tree, ladderize=ladderize)
caption = infile + "_" + inlist
if capt:
caption = capt + "_" + inlist
caption = caption.replace("/", "_")
outfile = infile + "_" + inlist + "." + extension
if ofp:
outfile = ofp + "_" + inlist + "." + extension
elif ladderize:
tree.ladderize()
if output:
outfile = output
makeimage(infile, outfile, caption, tree, addcaption=addcaption, dpi=dpi, fontsize=fontsize)
return outfile
def add_seq(inseq, inlist=None, capt=None, ladderize=True, addcaption=False, extension="png", dpi=1200, fontsize=14):
fnm = tempfile.mkstemp(suffix=".tree", prefix=os.path.basename(sys.argv[0]) + '_tmp_', text=True, dir=TMP_DIR)[1]
print "saving tree", fnm
with open(fnm, 'w') as fhi:
fhi.write(inseq)
ofn = add_file(fnm, inlist=inlist, capt=capt, ladderize=ladderize, addcaption=addcaption, extension=extension, dpi=dpi, fontsize=fontsize)
data = None
print "opening png", ofn
if os.path.exists( ofn ):
with open(ofn, 'rb') as fho:
data = fho.read()
os.remove(ofn)
else:
print "tree image %s does not exists" % ofn
os.remove(fnm)
return data
def prune(inlist, tree, ladderize=True):
print "pruning", inlist
reqlist = []
with open( inlist, 'r' ) as fhd:
for line in fhd:
line = line.strip()
if len( line ) == 0:
continue
if line[0] == "#":
continue
print "excluding %s" % line
reqlist.append( line )
print reqlist
tree.prune( reqlist, preserve_branch_length=True )
if ladderize:
tree.ladderize()
return tree
def makeimage(infile, outfile, caption, tree, addcaption=True, dpi=1200, fontsize=14):
if os.path.exists( outfile ):
os.remove( outfile )
#print tree.get_midpoint_outgroup()
#print tree.get_sisters()
#print tree.get_tree_root()
#root = tree.get_tree_root()
#tree.delete( root )
#print "root", root
#root.unroot()
if print_ascii:
print "redering tree", infile, "to", outfile,'in ASCII'
print tree.get_ascii()
print tree.write()
print "redering tree", infile, "to", outfile
tree.render( outfile, dpi=dpi )
if not os.path.exists( outfile ):
print "redering tree", infile, "to", outfile, 'FAILED'
return None
orig = Image.open( outfile )
orig = makeColorTransparent(orig, transparent_color, thresh2=transparent_thresh);
(orig_w, orig_h) = orig.size
orig_dpi = orig.info["dpi"]
print "ORIG width",orig_w,"height",orig_h,"dpi",orig_dpi
charsperline = int( math.floor( orig_w/math.ceil(fontsize/1.6) ) )
textlines = []
if addcaption:
print "charsperline", charsperline
print "caption",caption
for pos in xrange(0, len(caption), charsperline):
#print "pos",pos,"end", pos+charsperline, caption[pos: pos+charsperline]
textlines.append( caption[pos: pos+charsperline] )
numlines = len(textlines)
print "numlines", numlines
htext = (fontsize*numlines)
himgtext = orig_h + htext
frame_w = int( orig_w * frame_prop )
orig_w_frame = orig_w + ( 2 * frame_w )
out = Image.new( 'RGBA', (orig_w_frame, himgtext), (255,255,255) )
out.info["dpi"] = (dpi, dpi)
maski = Image.new('L', (orig_w_frame, himgtext), color=255)
mask = ImageDraw.Draw( maski )
mask.rectangle((0, 0, orig_w_frame, himgtext), fill=0)
out.putalpha( maski )
out.paste( orig, ( frame_w, 0 ) )
if addcaption:
fontname = 'Consolas.ttf'
if os.path.exists( fontname ):
zoomout = 20
font = ImageFont.truetype(fontname, fontsize*zoomout)
texti = Image.new("RGBA", (orig_w*zoomout, htext*zoomout))
text = ImageDraw.Draw( texti )
for linepos in xrange( len(textlines) ):
hline = linepos * fontsize
line = textlines[linepos]
text.text( (fontsize*zoomout, hline*zoomout), line, (0,0,0), font=font)
texti = texti.resize((orig_w,htext), Image.ANTIALIAS)
out.paste( texti, ( frame_w, orig_h ) )
(out_w, out_h) = out.size
out_dpi = out.info["dpi"]
print "OUT width", out_w, "height", out_h, "dpi", out_dpi
out.save( outfile, optimize=True, dpi=(dpi,dpi) )
print "saved to %s" % outfile
return
if __name__ == '__main__':
main()
| {
"content_hash": "d26fe9e89fec6fce12bfd863eddcd150",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 192,
"avg_line_length": 34.80419580419581,
"alnum_prop": 0.5577657223226844,
"repo_name": "sauloal/introgressionbrowser",
"id": "afe34af472aa41cabcb06044cfac44000b9dcf42",
"size": "9973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vcfmerger/newick_to_png.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "25"
},
{
"name": "C",
"bytes": "13801"
},
{
"name": "C++",
"bytes": "41714"
},
{
"name": "CSS",
"bytes": "12352"
},
{
"name": "HTML",
"bytes": "158965"
},
{
"name": "JavaScript",
"bytes": "486509"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "553887"
},
{
"name": "R",
"bytes": "3449"
},
{
"name": "Ruby",
"bytes": "9178"
},
{
"name": "Shell",
"bytes": "19481"
}
],
"symlink_target": ""
} |
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Py 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [warning.category is warning_class for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>."
% (message, func.__name__))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer',
'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash']
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None, include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| {
"content_hash": "cd41d05ed9c535032ddcc46df32a5dc1",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 81,
"avg_line_length": 33.4390977443609,
"alnum_prop": 0.6112784997976346,
"repo_name": "ycaihua/scikit-learn",
"id": "f0ba189bd8c0fb52422ccbf86057666670df8060",
"size": "22237",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/utils/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18569015"
},
{
"name": "C++",
"bytes": "1810938"
},
{
"name": "CSS",
"bytes": "1503"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Makefile",
"bytes": "4897"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5887845"
},
{
"name": "Shell",
"bytes": "8730"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.http import HttpRequest
from schools.models import School
from django.views.generic import *
# Create your views here.
class SchoolList(ListView):
model = School
def stthomas_home(request):
return render(request, 'stthomas/home.html')
def stthomas_about(request):
return render(request, 'stthomas/about.html')
def stthomas_announcements(request):
return render(request, 'stthomas/announcements.html')
def stthomas_gallery(request):
return render(request, 'stthomas/gallery.html')
def stthomas_managemant(request):
return render(request, 'stthomas/management.html')
def stthomas_results(request):
return render(request, 'stthomas/results.html')
def stxaviers_home(request):
return redirect('http://www.stxaviersschool.com')
def jvmshyamali_home(request):
return redirect('http://www.jvmshyamali.com') | {
"content_hash": "1529fee8215b0525f976d1a5d5ba2946",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 57,
"avg_line_length": 33.25925925925926,
"alnum_prop": 0.7661469933184856,
"repo_name": "AnimeshSinha1309/Website-Edunet",
"id": "5dd99ffc0a2625da80640f8cbbbbd92ae802fb0f",
"size": "900",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "WebsiteEdunet/schools/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40871"
},
{
"name": "JavaScript",
"bytes": "6934"
},
{
"name": "PHP",
"bytes": "173675"
}
],
"symlink_target": ""
} |
'''
Config file generator
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by Jihoon Kang <kang@ghoon.net>
:license: Apache 2, see LICENSE for more details
'''
from jinja2 import Environment, FileSystemLoader
import yaml
def generate_config(project_name, project_rootdir, build_target,
tmpl_name='default', tmpl_dir='configs', output='config'):
env = Environment(loader=FileSystemLoader(tmpl_dir),
trim_blocks=True)
tmpl = env.get_template('%(tmpl_name)s.config' % locals())
with open(output, 'w') as f:
f.write(tmpl.render(locals()))
def read_config(filename):
with open(filename) as f:
configs = yaml.load(f)
return configs
| {
"content_hash": "32088b1b3c27afbdbdd6adbae3222bb7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 26.73076923076923,
"alnum_prop": 0.6431654676258993,
"repo_name": "kghoon/epgen",
"id": "edc66905cc3be0936f0e48d38f8d285a01d3b606",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epgen/confgen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9074"
}
],
"symlink_target": ""
} |
import collections
import logging
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.exceptions import (
DistributionNotFound,
InstallationError,
UnsupportedPythonVersion,
UnsupportedWheel,
)
from pip._internal.models.wheel import Wheel
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.misc import (
dist_in_site_packages,
dist_in_usersite,
get_installed_distributions,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import running_under_virtualenv
from .candidates import (
AlreadyInstalledCandidate,
EditableCandidate,
ExtrasCandidate,
LinkCandidate,
RequiresPythonCandidate,
)
from .requirements import (
ExplicitRequirement,
RequiresPythonRequirement,
SpecifierRequirement,
)
if MYPY_CHECK_RUNNING:
from typing import (
FrozenSet,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
)
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.pkg_resources import Distribution
from pip._vendor.resolvelib import ResolutionImpossible
from pip._internal.cache import CacheEntry, WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.resolution.base import InstallRequirementProvider
from .base import Candidate, Requirement
from .candidates import BaseCandidate
C = TypeVar("C")
Cache = Dict[Link, C]
VersionCandidates = Dict[_BaseVersion, Candidate]
logger = logging.getLogger(__name__)
class Factory(object):
def __init__(
self,
finder, # type: PackageFinder
preparer, # type: RequirementPreparer
make_install_req, # type: InstallRequirementProvider
wheel_cache, # type: Optional[WheelCache]
use_user_site, # type: bool
force_reinstall, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
py_version_info=None, # type: Optional[Tuple[int, ...]]
lazy_wheel=False, # type: bool
):
# type: (...) -> None
self._finder = finder
self.preparer = preparer
self._wheel_cache = wheel_cache
self._python_candidate = RequiresPythonCandidate(py_version_info)
self._make_install_req_from_spec = make_install_req
self._use_user_site = use_user_site
self._force_reinstall = force_reinstall
self._ignore_requires_python = ignore_requires_python
self.use_lazy_wheel = lazy_wheel
self._link_candidate_cache = {} # type: Cache[LinkCandidate]
self._editable_candidate_cache = {} # type: Cache[EditableCandidate]
if not ignore_installed:
self._installed_dists = {
canonicalize_name(dist.project_name): dist
for dist in get_installed_distributions()
}
else:
self._installed_dists = {}
@property
def force_reinstall(self):
# type: () -> bool
return self._force_reinstall
def _make_candidate_from_dist(
self,
dist, # type: Distribution
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
):
# type: (...) -> Candidate
base = AlreadyInstalledCandidate(dist, template, factory=self)
if extras:
return ExtrasCandidate(base, extras)
return base
def _make_candidate_from_link(
self,
link, # type: Link
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
name, # type: Optional[str]
version, # type: Optional[_BaseVersion]
):
# type: (...) -> Candidate
# TODO: Check already installed candidate, and use it if the link and
# editable flag match.
if template.editable:
if link not in self._editable_candidate_cache:
self._editable_candidate_cache[link] = EditableCandidate(
link, template, factory=self, name=name, version=version,
)
base = self._editable_candidate_cache[link] # type: BaseCandidate
else:
if link not in self._link_candidate_cache:
self._link_candidate_cache[link] = LinkCandidate(
link, template, factory=self, name=name, version=version,
)
base = self._link_candidate_cache[link]
if extras:
return ExtrasCandidate(base, extras)
return base
def _iter_found_candidates(
self,
ireqs, # type: Sequence[InstallRequirement]
specifier, # type: SpecifierSet
):
# type: (...) -> Iterable[Candidate]
if not ireqs:
return ()
# The InstallRequirement implementation requires us to give it a
# "template". Here we just choose the first requirement to represent
# all of them.
# Hopefully the Project model can correct this mismatch in the future.
template = ireqs[0]
name = canonicalize_name(template.req.name)
hashes = Hashes()
extras = frozenset() # type: FrozenSet[str]
for ireq in ireqs:
specifier &= ireq.req.specifier
hashes |= ireq.hashes(trust_internet=False)
extras |= frozenset(ireq.extras)
# We use this to ensure that we only yield a single candidate for
# each version (the finder's preferred one for that version). The
# requirement needs to return only one candidate per version, so we
# implement that logic here so that requirements using this helper
# don't all have to do the same thing later.
candidates = collections.OrderedDict() # type: VersionCandidates
# Get the installed version, if it matches, unless the user
# specified `--force-reinstall`, when we want the version from
# the index instead.
installed_version = None
installed_candidate = None
if not self._force_reinstall and name in self._installed_dists:
installed_dist = self._installed_dists[name]
installed_version = installed_dist.parsed_version
if specifier.contains(installed_version, prereleases=True):
installed_candidate = self._make_candidate_from_dist(
dist=installed_dist,
extras=extras,
template=template,
)
found = self._finder.find_best_candidate(
project_name=name,
specifier=specifier,
hashes=hashes,
)
for ican in found.iter_applicable():
if ican.version == installed_version and installed_candidate:
candidate = installed_candidate
else:
candidate = self._make_candidate_from_link(
link=ican.link,
extras=extras,
template=template,
name=name,
version=ican.version,
)
candidates[ican.version] = candidate
# Yield the installed version even if it is not found on the index.
if installed_version and installed_candidate:
candidates[installed_version] = installed_candidate
return six.itervalues(candidates)
def find_candidates(self, requirements, constraint):
# type: (Sequence[Requirement], SpecifierSet) -> Iterable[Candidate]
explicit_candidates = set() # type: Set[Candidate]
ireqs = [] # type: List[InstallRequirement]
for req in requirements:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(ireqs, constraint)
if constraint:
name = explicit_candidates.pop().name
raise InstallationError(
"Could not satisfy constraints for {!r}: installation from "
"path or url cannot be constrained to a version".format(name)
)
return (
c for c in explicit_candidates
if all(req.is_satisfied_by(c) for req in requirements)
)
def make_requirement_from_install_req(self, ireq, requested_extras):
# type: (InstallRequirement, Iterable[str]) -> Optional[Requirement]
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name, ireq.markers,
)
return None
if not ireq.link:
return SpecifierRequirement(ireq)
if ireq.link.is_wheel:
wheel = Wheel(ireq.link.filename)
if not wheel.supported(self._finder.target_python.get_tags()):
msg = "{} is not a supported wheel on this platform.".format(
wheel.filename,
)
raise UnsupportedWheel(msg)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
return self.make_requirement_from_candidate(cand)
def make_requirement_from_candidate(self, candidate):
# type: (Candidate) -> ExplicitRequirement
return ExplicitRequirement(candidate)
def make_requirement_from_spec(
self,
specifier, # type: str
comes_from, # type: InstallRequirement
requested_extras=(), # type: Iterable[str]
):
# type: (...) -> Optional[Requirement]
ireq = self._make_install_req_from_spec(specifier, comes_from)
return self.make_requirement_from_install_req(ireq, requested_extras)
def make_requires_python_requirement(self, specifier):
# type: (Optional[SpecifierSet]) -> Optional[Requirement]
if self._ignore_requires_python or specifier is None:
return None
return RequiresPythonRequirement(specifier, self._python_candidate)
def get_wheel_cache_entry(self, link, name):
# type: (Link, Optional[str]) -> Optional[CacheEntry]
"""Look up the link in the wheel cache.
If ``preparer.require_hashes`` is True, don't use the wheel cache,
because cached wheels, always built locally, have different hashes
than the files downloaded from the index server and thus throw false
hash mismatches. Furthermore, cached wheels at present have
nondeterministic contents due to file modification times.
"""
if self._wheel_cache is None or self.preparer.require_hashes:
return None
return self._wheel_cache.get_cache_entry(
link=link,
package_name=name,
supported_tags=get_supported(),
)
def get_dist_to_uninstall(self, candidate):
# type: (Candidate) -> Optional[Distribution]
# TODO: Are there more cases this needs to return True? Editable?
dist = self._installed_dists.get(candidate.name)
if dist is None: # Not installed, no uninstallation required.
return None
# We're installing into global site. The current installation must
# be uninstalled, no matter it's in global or user site, because the
# user site installation has precedence over global.
if not self._use_user_site:
return dist
# We're installing into user site. Remove the user site installation.
if dist_in_usersite(dist):
return dist
# We're installing into user site, but the installed incompatible
# package is in global site. We can't uninstall that, and would let
# the new user installation to "shadow" it. But shadowing won't work
# in virtual environments, so we error out.
if running_under_virtualenv() and dist_in_site_packages(dist):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to {} in {}".format(
dist.project_name, dist.location,
)
)
return None
def _report_requires_python_error(
self,
requirement, # type: RequiresPythonRequirement
template, # type: Candidate
):
# type: (...) -> UnsupportedPythonVersion
message_format = (
"Package {package!r} requires a different Python: "
"{version} not in {specifier!r}"
)
message = message_format.format(
package=template.name,
version=self._python_candidate.version,
specifier=str(requirement.specifier),
)
return UnsupportedPythonVersion(message)
def get_installation_error(self, e):
# type: (ResolutionImpossible) -> InstallationError
assert e.causes, "Installation error reported with no cause"
# If one of the things we can't solve is "we need Python X.Y",
# that is what we report.
for cause in e.causes:
if isinstance(cause.requirement, RequiresPythonRequirement):
return self._report_requires_python_error(
cause.requirement,
cause.parent,
)
# Otherwise, we have a set of causes which can't all be satisfied
# at once.
# The simplest case is when we have *one* cause that can't be
# satisfied. We just report that case.
if len(e.causes) == 1:
req, parent = e.causes[0]
if parent is None:
req_disp = str(req)
else:
req_disp = '{} (from {})'.format(req, parent.name)
logger.critical(
"Could not find a version that satisfies the requirement %s",
req_disp,
)
return DistributionNotFound(
'No matching distribution found for {}'.format(req)
)
# OK, we now have a list of requirements that can't all be
# satisfied at once.
# A couple of formatting helpers
def text_join(parts):
# type: (List[str]) -> str
if len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def readable_form(cand):
# type: (Candidate) -> str
return "{} {}".format(cand.name, cand.version)
def describe_trigger(parent):
# type: (Candidate) -> str
ireq = parent.get_install_requirement()
if not ireq or not ireq.comes_from:
return "{} {}".format(parent.name, parent.version)
if isinstance(ireq.comes_from, InstallRequirement):
return str(ireq.comes_from.name)
return str(ireq.comes_from)
triggers = []
for req, parent in e.causes:
if parent is None:
# This is a root requirement, so we can report it directly
trigger = req.format_for_error()
else:
trigger = describe_trigger(parent)
triggers.append(trigger)
if triggers:
info = text_join(triggers)
else:
info = "the requested packages"
msg = "Cannot install {} because these package versions " \
"have conflicting dependencies.".format(info)
logger.critical(msg)
msg = "\nThe conflict is caused by:"
for req, parent in e.causes:
msg = msg + "\n "
if parent:
msg = msg + "{} {} depends on ".format(
parent.name,
parent.version
)
else:
msg = msg + "The user requested "
msg = msg + req.format_for_error()
msg = msg + "\n\n" + \
"To fix this you could try to:\n" + \
"1. loosen the range of package versions you've specified\n" + \
"2. remove package versions to allow pip attempt to solve " + \
"the dependency conflict\n"
logger.info(msg)
return DistributionNotFound(
"ResolutionImpossible: for help visit "
"https://pip.pypa.io/en/latest/user_guide/"
"#fixing-conflicting-dependencies"
)
| {
"content_hash": "e0e04479d10f9fd116912463845379f4",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 78,
"avg_line_length": 37.40522875816993,
"alnum_prop": 0.5961908090162502,
"repo_name": "sserrot/champion_relationships",
"id": "dab23aa09d19b1e1988c30a882c4dd12d3f999bb",
"size": "17169",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
import json
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from PIL import Image, ImageDraw
def reformat_face_corners(df):
df['face_left_x'] = df.apply(lambda x: x.face_corners[0], axis=1)
df['face_upper_y'] = df.apply(lambda x: x.face_corners[1], axis=1)
df['face_right_x'] = df.apply(lambda x: x.face_corners[2], axis=1)
df['face_lower_y'] = df.apply(lambda x: x.face_corners[3], axis=1)
df = df.drop('face_corners', axis=1)
return df
def get_image_dims(df):
all_dims = df.local_img_path.apply(lambda x:Image.open(x).getbbox())
df['image_width'] = all_dims.apply(lambda x: x[2])
df['image_height'] = all_dims.apply(lambda x: x[3])
return df
def plot_with_boxed_face(df):
assert(df.shape[0]) < 100 #we almost never want to print that many images
for img in df.iterrows():
img = Image.open(img.local_img_path)
dr = ImageDraw.Draw(img)
dr.rectangle(xy=(face_left_x, face_upper_y, face_right_x, face_lower_y))
img.show()
return
def add_face_dims_and_margins(df):
output = (df.assign(face_width = lambda x: x.face_right_x - x.face_left_x,
face_height = lambda x: x.face_lower_y - x.face_upper_y,
face_left_margin = lambda x: x.face_left_x,
face_right_margin = lambda x: x.image_width - x.face_right_x,
face_top_margin = lambda x: x.face_upper_y,
face_lower_margin = lambda x: x.image_height - x.face_lower_y))
return output
def filter_by_face_size_and_loc(df,
min_face_size=55, # many of the smaller face boxes are inaccurate
min_pct_horiz_face_border=0.05,
min_pct_vert_face_border=0.25):
output = (df.query("face_width > @min_face_size")
.query("face_top_margin / face_height > @min_pct_vert_face_border")
.query("face_lower_margin / face_height > @min_pct_vert_face_border")
.query("face_right_margin / face_height > @min_pct_horiz_face_border")
.query("face_left_margin / face_height > @min_pct_horiz_face_border"))
return output
if __name__ == "__main__":
with open('work/facial_feats_data.json', 'r') as f:
img_dat = json.load(f)
imgs_with_faces = [i for i in img_dat if len(i['face_corners'])>0]
img_df = (pd.DataFrame(imgs_with_faces)
.pipe(get_image_dims)
.pipe(reformat_face_corners)
.pipe(add_face_dims_and_margins)
.pipe(filter_by_face_size_and_loc)
)
img_df.to_csv('work/image_and_face_dims.csv', index=False)
#sns.distplot(img_df.face_width, bins=40, kde=True)
#plt.show()
| {
"content_hash": "8c9b01348ac48f130f1ef63996c41ca0",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 98,
"avg_line_length": 43.55384615384615,
"alnum_prop": 0.5856587778170258,
"repo_name": "dansbecker/what-celebrity",
"id": "f30b3833b597d36adc0047fb79dbe9b6e88cb91c",
"size": "2831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_face_coords_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15393"
}
],
"symlink_target": ""
} |
"""Support for CO2 sensor connected to a serial port."""
from datetime import timedelta
import logging
from pmsensor import co2sensor
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_TEMPERATURE,
CONCENTRATION_PARTS_PER_MILLION,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
from homeassistant.util.temperature import celsius_to_fahrenheit
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_DEVICE = "serial_device"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
DEFAULT_NAME = "CO2 Sensor"
ATTR_CO2_CONCENTRATION = "co2_concentration"
SENSOR_TEMPERATURE = "temperature"
SENSOR_CO2 = "co2"
SENSOR_TYPES = {
SENSOR_TEMPERATURE: ["Temperature", None],
SENSOR_CO2: ["CO2", CONCENTRATION_PARTS_PER_MILLION],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[SENSOR_CO2]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available CO2 sensors."""
try:
co2sensor.read_mh_z19(config.get(CONF_SERIAL_DEVICE))
except OSError as err:
_LOGGER.error(
"Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE),
err,
)
return False
SENSOR_TYPES[SENSOR_TEMPERATURE][1] = hass.config.units.temperature_unit
data = MHZClient(co2sensor, config.get(CONF_SERIAL_DEVICE))
dev = []
name = config.get(CONF_NAME)
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(MHZ19Sensor(data, variable, SENSOR_TYPES[variable][1], name))
add_entities(dev, True)
return True
class MHZ19Sensor(SensorEntity):
"""Representation of an CO2 sensor."""
def __init__(self, mhz_client, sensor_type, temp_unit, name):
"""Initialize a new PM sensor."""
self._mhz_client = mhz_client
self._sensor_type = sensor_type
self._temp_unit = temp_unit
self._name = name
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._ppm = None
self._temperature = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name}: {SENSOR_TYPES[self._sensor_type][0]}"
@property
def state(self):
"""Return the state of the sensor."""
return self._ppm if self._sensor_type == SENSOR_CO2 else self._temperature
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Read from sensor and update the state."""
self._mhz_client.update()
data = self._mhz_client.data
self._temperature = data.get(SENSOR_TEMPERATURE)
if self._temperature is not None and self._temp_unit == TEMP_FAHRENHEIT:
self._temperature = round(celsius_to_fahrenheit(self._temperature), 1)
self._ppm = data.get(SENSOR_CO2)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
result = {}
if self._sensor_type == SENSOR_TEMPERATURE and self._ppm is not None:
result[ATTR_CO2_CONCENTRATION] = self._ppm
if self._sensor_type == SENSOR_CO2 and self._temperature is not None:
result[ATTR_TEMPERATURE] = self._temperature
return result
class MHZClient:
"""Get the latest data from the MH-Z sensor."""
def __init__(self, co2sens, serial):
"""Initialize the sensor."""
self.co2sensor = co2sens
self._serial = serial
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data the MH-Z19 sensor."""
self.data = {}
try:
result = self.co2sensor.read_mh_z19_with_temperature(self._serial)
if result is None:
return
co2, temperature = result
except OSError as err:
_LOGGER.error(
"Could not open serial connection to %s (%s)", self._serial, err
)
return
if temperature is not None:
self.data[SENSOR_TEMPERATURE] = temperature
if co2 is not None and 0 < co2 <= 5000:
self.data[SENSOR_CO2] = co2
| {
"content_hash": "de63277631fb55855d885da27a56aad3",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 82,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.6342771342771343,
"repo_name": "kennedyshead/home-assistant",
"id": "0f0735dd5dadb6afbf0db42b84e62de7e0cb80ee",
"size": "4662",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mhz19/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, Integer, String, Boolean, Sequence
from sqlalchemy import BigInteger, Date, DateTime, Float, Numeric
from pow_comments.powlib import relation
from pow_comments.sqldblib import Base
#@relation.has_many("<plural_other_models>")
@relation.is_tree()
@relation.setup_schema()
class Comment(Base):
#
# put your column definition here:
#
#
# sqlalchemy classic style
# which offer you all sqlalchemy options
#
#title = Column(String(50))
#text = Column(String)
#
# or the new (cerberus) schema style
# which offer you immediate validation
#
schema = {
# string sqltypes can be TEXT or UNICODE or nothing
'author': {
'type': 'string', 'maxlength' : 35,
# the sql "sub"key lets you declare "raw" sql(alchemy) Column options
# the options below are implemented so far.
"sql" : {
"primary_key" : False,
"default" : "No Author Name",
"unique" : True,
"nullable" : False
}
},
'text': {'type': 'string'}
}
# init
def __init__(self, **kwargs):
self.init_on_load(**kwargs)
# your methods down here
| {
"content_hash": "0cf103f25954a563b4dd43d1b2b07682",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 81,
"avg_line_length": 28.84090909090909,
"alnum_prop": 0.5728920409771474,
"repo_name": "pythononwheels/pow_devel",
"id": "ce2b562580db068aa1234038f50087c81326fde4",
"size": "1289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythononwheels/start/stuff/comment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3448946"
},
{
"name": "Dockerfile",
"bytes": "3363"
},
{
"name": "HTML",
"bytes": "1128"
},
{
"name": "JavaScript",
"bytes": "3002089"
},
{
"name": "Mako",
"bytes": "493"
},
{
"name": "PLSQL",
"bytes": "10276"
},
{
"name": "Python",
"bytes": "414691"
}
],
"symlink_target": ""
} |
import os
import time
import hashlib
import functools
import mimetypes
import itertools
import time
import logging
import asyncio
from datetime import datetime, timedelta
from ..toolbox.config import get_description
from . import server as web
from . import api
from .async import asyncweb, webcoroutine
from .settings import rename_example
from .utils import SessionPlugin, CachePlugin, shview, static_file_from_zip, abspath_to_zippath
from ..utils import episode_status_icon_info
from ..coffee import cscompile_with_cache
from ..config import config
log = logging.getLogger('stagehand.web.app')
web.install(SessionPlugin())
web.install(CachePlugin())
@web.get('/static/:filename#.*#')
def static(filename):
manager = web.request['stagehand.manager']
root = os.path.join(manager.paths.data, 'web')
ziproot = abspath_to_zippath(root)
response = None
if ziproot:
try:
target = filename + '.compiled' if filename.endswith('.coffee') else filename
response = static_file_from_zip(ziproot, target)
except AttributeError:
pass
if not response:
# Load static file from filesystem.
if filename.endswith('.coffee'):
# This is CoffeeScript, so we need to return the compiled JavaScript
# instead. Ok, not exactly static, strictly speaking. Close enough.
src = os.path.abspath(os.path.join(root, filename))
if not src.startswith(root):
raise web.HTTPError(403, 'Access denied.')
elif not os.path.exists(src):
# Before we give up, is there a pre-compiled version? If not,
# static_file() will return a 404.
response = web.static_file(filename + '.compiled', root=root)
else:
cached, data = cscompile_with_cache(src, web.request['coffee.cachedir'])
web.response.logextra = '(CS %s)' % 'cached' if cached else 'compiled on demand'
web.response.content_type = 'application/javascript'
web.response['Cache-Control'] = 'max-age=3600'
return data
else:
response = web.static_file(filename, root=root)
if filename.endswith('.gz') and not isinstance(response, web.HTTPError):
# static_file() does the right thing with respect to Content-Type
# and Content-Encoding for gz files. But if the client doesn't have
# gzip in Accept-Encoding, we need to decompress it on the fly.
if 'gzip' not in web.request.headers.get('Accept-Encoding', ''):
import gzip
response.body = gzip.GzipFile(fileobj=response.body)
#elif filename.endswith('.coffee'):
# response['X-SourceMap'] = '/static/' + filename + '.map'
return response
@web.get('/')
@shview('home.tmpl')
def home():
return {}
@web.get('/tv/')
@shview('tv/library.tmpl')
def tv_library():
return {}
@web.get('/tv/<id>', method='GET')
@shview('tv/show.tmpl')
def tv_show(id):
tvdb = web.request['stagehand.manager'].tvdb
series = tvdb.get_series_by_id(id)
if not series:
raise web.HTTPError(404, 'Invalid show.')
return {
'series': series,
'providers': tvdb.providers.values()
}
@web.get('/tv/add', method='GET')
@shview('tv/add.tmpl')
def tv_add():
return {}
@web.get('/tv/upcoming')
@shview('tv/upcoming.tmpl')
def tv_upcoming():
return {}
@web.get('/downloads/')
@shview('downloads/downloads.tmpl')
def downloads():
manager = web.request['stagehand.manager']
weeks = int(web.request.query.weeks) if web.request.query.weeks.isdigit() else 1
status = web.request.query.status or 'have'
# Construct a list of episodes, sorted by air date, that are either needed
# or match the criteria for inclusion (based on status and weeks). Episodes
# in the download queue aren't included as they're displayed separately.
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
# The most recent past Sunday (or today, if today is Sunday)
sunday = today if today.weekday() == 6 else today - timedelta(days=today.weekday() + 1)
episodes = []
for s in manager.tvdb.series:
for ep in s.episodes:
if ep.status != ep.STATUS_NEED_FORCED and (not ep.aired or manager.is_episode_queued_for_retrieval(ep)):
continue
if s.cfg.paused:
# Don't show episodes for paused series, even if they are needed.
continue
icon, title = episode_status_icon_info(ep)
if ep.airdate:
# week 0 is anything on or after sunday
week = (max(0, (sunday - ep.airdate).days) + 6) // 7
if (icon in ('ignore', 'have') and week >= weeks) or (icon == 'ignore' and status == 'have'):
continue
else:
# Episode is STATUS_NEED_FORCED without an airdate.
week = None
episodes.append((ep, icon, title, week))
# For episodes without an airdate, just use 1900-01-01 for sorting
# purposes, so they sorted last.
episodes.sort(key=lambda i: (i[0].airdatetime or datetime(1900, 1, 1), i[0].name), reverse=True)
return {
'weeks': weeks,
'status': status,
'episodes': episodes
}
@web.get('/settings/')
@shview('settings/general.tmpl')
def settings_general():
return {
'desc': lambda x: get_description(x).replace('\n\n', '<br/><br/>'),
'rename_example':
rename_example(config.misc.tvdir, config.naming.separator, config.naming.season_dir_format,
config.naming.code_style, config.naming.episode_format)
}
@web.get('/settings/rename_example')
def settings_rename_example():
q = web.request.query
return rename_example(config.misc.tvdir, q.separator, q.season_dir_format,
q.code_style, q.episode_format)
@web.get('/settings/searchers')
@shview('settings/searchers.tmpl')
def settings_searchers():
return {}
@web.get('/settings/retrievers')
@shview('settings/retrievers.tmpl')
def settings_retrievers():
return {}
@web.get('/settings/notifiers')
@shview('settings/notifiers.tmpl')
def settings_notifiers():
return {}
@web.get('/log/')
@shview('log/application.tmpl')
def log_application():
return {}
@web.get('/log/web')
@shview('log/web.tmpl')
def log_web():
return {}
| {
"content_hash": "899eb0cb701f2a98e5de1176be6c8f36",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 116,
"avg_line_length": 32.73232323232323,
"alnum_prop": 0.6292238852029007,
"repo_name": "jtackaberry/stagehand",
"id": "6e294e700dd6492d68416ebeaa7dadfe10542650",
"size": "6481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stagehand/web/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11070"
},
{
"name": "CoffeeScript",
"bytes": "8397"
},
{
"name": "JavaScript",
"bytes": "2294"
},
{
"name": "Python",
"bytes": "650370"
}
],
"symlink_target": ""
} |
import logging
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from random import randrange
from collections import namedtuple, deque
from copy import deepcopy
from urllib.parse import urljoin, urlparse
import requests
from .decorators import with_history
from .exceptions import CrawlerError
from .helpers import ForcedInteger
from .parser import HtmlParser
from .scraper import Scraper
from .descriptors import (
Useragent,
Proxy,
Headers
)
PARSERS = {
'text/html': HtmlParser,
'text/plain': HtmlParser,
'text/json': HtmlParser,
'application/xml': HtmlParser,
'application/json': HtmlParser
}
class Crawler(Scraper):
"""Browser mimicking object. Mostly wrapper on Requests and Lxml libraries.
:param history: (optional) bool, turns off/on history usage in Crawler
:param max_history: (optional) int, max items held in history
:param absolute_links: (optional) bool, makes always all links absolute
Features:
- To some extent, acts like a browser
- Allows visiting pages, form posting, content scraping, cookie handling etc.
- Wraps ``requests.Session()``
Simple usage::
>>> c = Crawler()
>>> response = c.open('https://httpbin.org/html')
>>> response.status_code
200
Form submit::
>>> c = Crawler()
>>> response = c.open('https://httpbin.org/forms/post')
>>> forms = c.forms()
Filling up fields values:
>>> form = forms[0]
>>> form.fields = {
... 'custname': 'Ruben Rybnik',
... 'custemail': 'ruben.rybnik@fakemail.com',
... 'size': 'medium',
... 'topping': ['bacon', 'cheese'],
... 'custtel': '+48606505888'
... }
>>> submit_result = c.submit(form)
>>> submit_result.status_code
200
Checking if form post ended with success:
>>> c.submit_check(
... form,
... phrase="Ruben Rybnik",
... url='https://httpbin.org/post',
... status_codes=[200])
True
Form file upload::
>>> c = Crawler()
>>> c.open('http://cgi-lib.berkeley.edu/ex/fup.html')
<Response [200]>
>>> forms = c.forms()
>>> upload_form = forms[0]
>>> upload_form.fields = {
... 'note': 'Text file with quote',
... 'upfile': open('test/test_file.txt', 'r')
... }
>>> c.submit(upload_form, action='http://cgi-lib.berkeley.edu/ex/fup.cgi')
<Response [200]>
>>> c.submit_check(
... upload_form,
... phrase="road is easy",
... status_codes=[200]
... )
True
Cookies handling::
>>> c = Crawler()
>>> c.open('https://httpbin.org/cookies', cookies={
... 'cookie_1': '1000101000101010',
... 'cookie_2': 'ABABHDBSBAJSLLWO',
... })
<Response [200]>
Find links::
>>> c = Crawler()
>>> c.open('https://httpbin.org/links/10/0')
<Response [200]>
Links can be filtered by some html tags and filters
like: id, text, title and class:
>>> links = c.links(
... tags = ('style', 'link', 'script', 'a'),
... filters = {
... 'text': '7'
... },
... match='NOT_EQUAL'
... )
>>> len(links)
8
Find images::
>>> c = Crawler()
>>> c.open('https://www.python.org/')
<Response [200]>
First image path with 'python-logo' in string:
>>> next(
... image_path for image_path in c.images()
... if 'python-logo' in image_path
... )
'https://www.python.org/static/img/python-logo.png'
Download file::
>>> import os
>>> c = Crawler()
>>> local_file_path = c.download(
... local_path='test',
... url='https://httpbin.org/image/png',
... name='test.png'
... )
>>> os.path.isfile(local_file_path)
True
Download files list in parallel::
>>> c = Crawler()
>>> c.open('https://xkcd.com/')
<Response [200]>
>>> full_images_urls = [c.join_url(src) for src in c.images()]
>>> downloaded_files = c.download_files('test', files=full_images_urls)
>>> len(full_images_urls) == len(downloaded_files)
True
Traversing through history::
>>> c = Crawler()
>>> c.open('http://quotes.toscrape.com/')
<Response [200]>
>>> tags_links = c.links(filters={'class': 'tag'})
>>> c.follow(tags_links[0])
<Response [200]>
>>> c.follow(tags_links[1])
<Response [200]>
>>> c.follow(tags_links[2])
<Response [200]>
>>> history = c.history()
>>> c.back()
>>> c.get_url() == history[-2].url
True
"""
useragent = Useragent()
proxy = Proxy()
headers = Headers()
max_retries = ForcedInteger('max_retries')
def __init__(self, history=True, max_history=5, absolute_links=True):
"""Crawler initialization
:param history: bool, turns on/off history handling
:param max_history: max items stored in flow
:param absolute_links: globally make links absolute
"""
super().__init__(
history=history,
max_history=max_history,
absolute_links=absolute_links
)
self._session = requests.Session()
self._history = history
self._max_history = max_history
self._flow = deque(maxlen=self._max_history)
self._index = 0
self._parser = None
self._current_response = None
self._absolute_links = absolute_links
self._useragent = None
self._headers = {}
self._proxy = {}
self._loop = None
self._executor = None
self._max_retries = 0
self._retries = 0
self._logging = False
self._logger = None
self._random_timeout = None
@property
def logging(self):
return self._logging
@logging.setter
def logging(self, value):
self._logging = value
self._logger = logging.getLogger(__name__) if value else None
@property
def random_timeout(self):
return self._random_timeout
@random_timeout.setter
def random_timeout(self, value):
if isinstance(value, (list, tuple)):
self._random_timeout = value
else:
raise TypeError('Expected list or tuple.')
def fit_parser(self, response):
"""Fits parser according to response type.
:param response: class::`Response <Response>` object
:return: matched parser object like: class::`HtmlParser <HtmlParser>` object
"""
content_type = response.headers.get('Content-type', '')
for _type, parser in PARSERS.items():
if _type in content_type:
self._parser = PARSERS[_type](response, session=self._session)
return self._parser
if self._logging:
self._logger.info("Couldn't fit parser for {}.".format(content_type))
def handle_response(self):
"""Called after request. Make operations accordng to attributes settings."""
if self._absolute_links:
self._parser.make_links_absolute()
if self._history:
self._flow.append({'parser': deepcopy(self._parser)})
def open(self, url, method='get', **kwargs):
"""Opens url. Wraps functionality of `Session` from `Requests` library.
:param url: visiting url str
:param method: 'get', 'post' etc. str
:param kwargs: additional keywords like headers, cookies etc.
:return: class::`Response <Response>` object
"""
self._retries = 0
self._current_response = None
flow_len = len(self._flow)
if flow_len < self._max_history:
self._index = flow_len
self.add_customized_kwargs(kwargs)
while True:
try:
self._current_response = self._session.request(method, url, **kwargs)
if self._random_timeout:
time.sleep(randrange(*self._random_timeout))
if self._logging:
self._logger.info(
'Open method: {} request: url={}, status code={}, kwargs={} '.format(
method.upper(),
url,
self._current_response.status_code,
kwargs
))
except requests.exceptions.ConnectionError:
self._retries += 1
time.sleep(self._retries)
if self.logging:
self._logger.error(
'Failed, try {}, method: {} request: url={}, kwargs={} '.format(
self._retries,
method.upper(),
url,
kwargs
))
if self._retries >= self._max_retries:
raise
continue
break
if self._current_response and self.fit_parser(self._current_response):
self.handle_response()
if self._history:
self._flow[self._index].update({'response': deepcopy(self._current_response)})
return self._current_response
def add_customized_kwargs(self, kwargs):
"""Adds request keyword arguments customized by setting `Crawler`
attributes like proxy, useragent, headers. Arguments won't be passed
if they are already set as `open` method kwargs.
"""
if self._proxy and 'proxies' not in kwargs:
kwargs.update({'proxies': self._proxy})
if self._headers and 'headers' not in kwargs:
kwargs.update({'headers': self._headers})
def response(self):
"""Get current response."""
return self._current_response
def get_url(self):
"""Get URL of current document."""
return self._current_response.url
def join_url(self, url_path):
"""Returns absolute_url. Path joined with url_root."""
return urljoin(
self._current_response.url,
url_path
)
@with_history
def back(self, step=1):
"""Go back n steps in history, and return response object"""
if self._index - step > 0:
self._index -= step
self._current_response = self._flow[self._index]['response']
else:
raise CrawlerError("Out of history boundaries")
@with_history
def forward(self, step=1):
"""Go forward n steps in history, and return response object"""
if self._index + step < self._max_history:
self._index += step
self._current_response = self._flow[self._index]['response']
else:
raise CrawlerError("Out of history boundaries")
def follow(self, url, method='get', **kwargs):
"""Follows url"""
self.add_customized_kwargs(kwargs)
return self.open(self.join_url(url), method, **kwargs)
@with_history
def flow(self):
"""Return flow"""
return self._flow
def clear(self):
"""Clears all flow, session, headers etc."""
self._flow.clear()
self._index = 0
self._session.cookies.clear()
self._headers = {}
self._proxy = {}
@with_history
def history(self):
"""Return urls history and status codes"""
Visit = namedtuple('Visited', 'url method response')
return [
Visit(
history['response'].url,
history['response'].request.method,
history['response'].status_code
)
for history in self._flow
]
def request_history(self):
"""Returns current request history (like list of redirects to finally accomplish request)
"""
return self._current_response.history
@property
def cookies(self):
"""Wraps `RequestsCookieJar` object from requests library.
:return: `RequestsCookieJar` object
"""
return self._current_response.cookies
def current_parser(self):
"""Return parser associated with current flow item.
:return: matched parser object like: class::`HtmlParser <HtmlParser>` object
"""
return self._flow[self._index]['parser']
def forms(self, filters=None):
"""Return iterable over forms. Doesn't find javascript forms yet (but will be).
example_filters = {
'id': 'searchbox',
'name': 'name,
'action': 'action',
'has_fields': ['field1', 'field2']
}
Usage::
>>> c = Crawler()
>>> response = c.open('http://cgi-lib.berkeley.edu/ex/fup.html')
>>> forms = c.forms()
>>> forms[0].fields['note'].get('tag')
'input'
"""
filters = filters or {}
if self._history:
return self.current_parser().find_forms(filters)
return self._parser.find_forms(filters)
def submit(self, form=None, action=None, data=None):
"""Submits form
:param form: `FormWrapper` object
:param action: custom action url
:param data: additional custom values to submit
:return: submit result
"""
if form:
action = action or form.action_url()
values = form.form_values()
form.append_extra_values(values, data)
form.result = self.open(
action,
form.method,
data=values,
files=form.files,
)
else:
self.direct_submit(url=action, data=data)
return self._current_response
def direct_submit(self, url=None, data=None):
"""Direct submit. Used when quick post to form is needed or if there are no forms found
by the parser.
Usage::
>>> data = {'name': 'Piccolo'}
>>> c = Crawler()
>>> result = c.submit(action='https://httpbin.org/post', data=data)
>>> result.status_code
200
:param url: submit url, form action url, str
:param data: submit parameters, dict
:return: class::`Response <Response>` object
"""
current_url = None
if self._current_response:
current_url = self._current_response.url
return self.open(
url or current_url,
method='post',
data=data or {}
)
def submit_check(self, form, phrase=None, url=None, status_codes=None):
"""Checks if success conditions of form submit are met
:param form: `FormWrapper` object
:param phrase: expected phrase in text
:param url: expected url
:param status_codes: list of expected status codes
:return: bool
"""
return all([
phrase in form.result.text if phrase else True,
form.result.url == url if url else True,
form.result.status_code in status_codes if status_codes else True
])
def encoding(self):
"""Returns current respose encoding."""
return self._flow[self._index].encoding
def download(self, local_path=None, url=None, name=None):
file_name = name or os.path.split(urlparse(url).path)[-1]
if file_name:
download_path = os.path.join(local_path, file_name)
with open(download_path, 'wb') as f:
f.write(self._session.get(url).content)
return download_path
def download_files(self, local_path, files=None, workers=10):
"""Download list of files in parallel.
:param workers: number of threads
:param local_path: download path
:param files: list of files
:return: list with downloaded files paths
"""
files = files or []
results = []
with ThreadPoolExecutor(max_workers=workers) as executor:
for future in as_completed(
executor.submit(self.download, local_path, file)
for file in files
):
results.append(future.result())
return results
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "093a6daea555d840f7aba1600872dcac",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 97,
"avg_line_length": 31.477358490566036,
"alnum_prop": 0.5407300845171732,
"repo_name": "nuncjo/Delver",
"id": "f77ba590f83bcbf14cd2fab161a328625750686c",
"size": "16708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "delver/crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2283"
},
{
"name": "Python",
"bytes": "61373"
}
],
"symlink_target": ""
} |
import logging
import requests
import json
from datetime import datetime
from django.core.management.base import BaseCommand
from seaserv import ccnet_api
from seahub.work_weixin.utils import handler_work_weixin_api_response, \
get_work_weixin_access_token, admin_work_weixin_departments_check
from seahub.work_weixin.settings import WORK_WEIXIN_DEPARTMENTS_URL, \
WORK_WEIXIN_PROVIDER
from seahub.auth.models import ExternalDepartment
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Fix sync the imported work-weixin departments to the database."
def println(self, msg):
self.stdout.write('[%s] %s\n' % (str(datetime.now()), msg))
def log_error(self, msg):
logger.error(msg)
self.println(msg)
def log_info(self, msg):
logger.info(msg)
self.println(msg)
def log_debug(self, msg):
logger.debug(msg)
self.println(msg)
def handle(self, *args, **options):
self.log_debug('Start fix sync work-weixin departments...')
self.do_action()
self.log_debug('Finish fix sync work-weixin departments.\n')
def get_group_by_name(self, group_name):
checked_groups = ccnet_api.search_groups(group_name, -1, -1)
for g in checked_groups:
if g.group_name == group_name:
return g
return None
def list_departments_from_work_weixin(self, access_token):
# https://work.weixin.qq.com/api/doc/90000/90135/90208
data = {
'access_token': access_token,
}
api_response = requests.get(WORK_WEIXIN_DEPARTMENTS_URL, params=data)
api_response_dic = handler_work_weixin_api_response(api_response)
if not api_response_dic:
self.log_error('can not get work weixin departments response')
return None
if 'department' not in api_response_dic:
self.log_error(json.dumps(api_response_dic))
self.log_error(
'can not get department list in work weixin departments response')
return None
return api_response_dic['department']
def do_action(self):
# work weixin check
if not admin_work_weixin_departments_check():
self.log_error('Feature is not enabled.')
return
access_token = get_work_weixin_access_token()
if not access_token:
self.log_error('can not get work weixin access_token')
return
# list departments from work weixin
api_department_list = self.list_departments_from_work_weixin(
access_token)
if api_department_list is None:
self.log_error('获取企业微信组织架构失败')
return
api_department_list = sorted(
api_department_list, key=lambda x: x['id'])
self.log_debug(
'Total %d work-weixin departments.' % len(api_department_list))
# main
count = 0
exists_count = 0
for department_obj in api_department_list:
# check department argument
group_name = department_obj.get('name')
department_obj_id = department_obj.get('id')
if department_obj_id is None or not group_name:
continue
# check department exist
exist_department = ExternalDepartment.objects.get_by_provider_and_outer_id(
WORK_WEIXIN_PROVIDER, department_obj_id)
if exist_department:
exists_count += 1
continue
# sync to db
group = self.get_group_by_name(group_name)
if group:
ExternalDepartment.objects.create(
group_id=group.id,
provider=WORK_WEIXIN_PROVIDER,
outer_id=department_obj_id,
)
count += 1
self.log_debug('%d work-weixin departments exists in db.' % exists_count)
self.log_debug('Sync %d work-weixin departments to db.' % count)
| {
"content_hash": "bad0595551b5802c75e3556c7e1b68cf",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 87,
"avg_line_length": 33.429752066115704,
"alnum_prop": 0.6032138442521632,
"repo_name": "miurahr/seahub",
"id": "0d7c08763fa05251f470bbd18dcab5ddc05a338e",
"size": "4069",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seahub/work_weixin/management/commands/fix_work_weixin_departments_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231001"
},
{
"name": "HTML",
"bytes": "750509"
},
{
"name": "JavaScript",
"bytes": "2430915"
},
{
"name": "Python",
"bytes": "1500021"
},
{
"name": "Shell",
"bytes": "8856"
}
],
"symlink_target": ""
} |
import kivy
kivy.require('1.7.1')
from kivy.app import App
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from plyer import tts
__version__ = '1.1.12'
class SayThis(BoxLayout):
saywhat_text = ObjectProperty(None)
def say_something(self, text):
try:
tts.speak(text)
except NotImplementedError:
popup = Popup(title='TTS Not Implemented',
content=Label(text='Sorry. TTS is not available.'),
size_hint=(None, None),
size=(300, 300))
popup.open()
def clear(self):
self.saywhat_text.text = ""
self.saywhat_text.focus = True
class SayThisApp(App):
def build(self):
return SayThis()
def on_pause(self):
return True
def on_resume(self):
pass
if __name__ == '__main__':
SayThisApp().run()
| {
"content_hash": "2583c86feff85ffc23b1a36ceea5ebb4",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 77,
"avg_line_length": 21.82608695652174,
"alnum_prop": 0.5796812749003984,
"repo_name": "brousch/saythis",
"id": "ade345f09e4d2c809fa6035f0679efcb156f9dc6",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saythis/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2382"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.core.files.storage
from django.db import models, migrations
import simple_classroom.apps.downloads
import simple_classroom.apps.downloads.models
class Migration(migrations.Migration):
dependencies = [
('downloads', '0008_auto_20150314_1327'),
]
operations = [
migrations.AlterModelOptions(
name='download',
options={'ordering': ('upload_date',), 'verbose_name': 'Descarga', 'verbose_name_plural': 'Descargas'},
),
migrations.AlterField(
model_name='download',
name='data',
field=models.FileField(storage=django.core.files.storage.FileSystemStorage(), null=True, upload_to=b'files', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='sitedownload',
name='data',
field=models.FileField(storage=django.core.files.storage.FileSystemStorage(), null=True, upload_to=simple_classroom.apps.downloads.models.get_upload_path, blank=True),
preserve_default=True,
),
]
| {
"content_hash": "9f0df2b79fc29995551ddef134c45e34",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 179,
"avg_line_length": 35.34375,
"alnum_prop": 0.6427939876215738,
"repo_name": "maxicecilia/simple_classroom",
"id": "f0c8b490bd3bbcb1cdcc4358aa4c04cd3bea70d9",
"size": "1155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_classroom/apps/downloads/migrations/0009_auto_20150409_0140.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48156"
},
{
"name": "HTML",
"bytes": "30114"
},
{
"name": "JavaScript",
"bytes": "1896"
},
{
"name": "Python",
"bytes": "84195"
}
],
"symlink_target": ""
} |
import configparser
import multiprocessing
import os
import pickle
import random
import sys
import time
from Modes import Base_Mode
from multiprocessing import Manager
from InterfaceAPI import InterfaceAPI, ApiError, ApiError404, ApiError403
ATTEMPTS = 3
class DataDownloader:
def __init__(self, database, patch, region, leagues, timestamped_patches):
self.api = InterfaceAPI()
self.database = database
self.region = region
self.patch = patch
self.timestamped_patches = timestamped_patches
self.db = os.path.join(self.database, 'patches', self.patch, self.region)
if not os.path.exists(self.db):
os.makedirs(self.db)
downloadedFile_name = self.region + '.txt'
self.downloadedGamesPath = os.path.join(self.database, 'patches', self.patch, downloadedFile_name)
if os.path.isfile(self.downloadedGamesPath):
with open(self.downloadedGamesPath, 'r') as f:
self.downloadedGames = [x.strip() for x in f.readlines()]
else:
self.downloadedGames = []
self.summonerIDs = []
if os.path.isfile(os.path.join(database, 'player_listing', region, 'players')):
players = pickle.load(open(os.path.join(database, 'player_listing', region, 'players'), 'rb'))
for league in leagues:
self.summonerIDs.extend(players[league])
random.shuffle(self.summonerIDs)
def downloadData(self):
while self.summonerIDs: # if the API in unavailable, or the sumID is unreachable for w/e reason, just take the skip to the next
sumID = self.summonerIDs.pop()
try:
accountID = self.api.getData('https://%s.api.riotgames.com/lol/summoner/v4/summoners/%s' % (self.region, sumID))['accountId']
games = \
self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matchlists/by-account/%s' % (self.region, accountID), {'queue': 420})[
'matches']
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
for game in games: # from most recent to oldest
gameID = str(game['gameId'])
# Already downloaded ? This means we are up-to-date
if gameID in self.downloadedGames:
break
# Wrong timestamp?
timestamp = game['timestamp']
previous_patch = self.patch
previous_patch = previous_patch.split('.')
previous_patch[1] = str(int(previous_patch[1]) - 1)
previous_patch = '.'.join(previous_patch)
if previous_patch in self.timestamped_patches and self.timestamped_patches[previous_patch][1] > timestamp: # game is too old
break # all the next games are too old
next_patch = self.patch
next_patch = next_patch.split('.')
next_patch[1] = str(int(next_patch[1]) + 1)
next_patch = '.'.join(next_patch)
if next_patch in self.timestamped_patches and self.timestamped_patches[next_patch][0] < timestamp: # game is too recent
continue # need to go further
try:
gameData = self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matches/%s' % (self.region, gameID))
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError404 as e:
print(e, file=sys.stderr)
break
except ApiError as e:
print(e, file=sys.stderr)
continue
# update timestamps: gameData['gameCreation'] == game['timestamp']
gamePatch = '.'.join(gameData['gameVersion'].split('.')[:2])
timestamp = gameData['gameCreation']
if gamePatch not in self.timestamped_patches:
self.timestamped_patches[gamePatch] = [timestamp, timestamp]
else: # first seen and last seen
if self.timestamped_patches[gamePatch][0] > timestamp:
self.timestamped_patches[gamePatch][0] = timestamp
elif self.timestamped_patches[gamePatch][1] < timestamp:
self.timestamped_patches[gamePatch][1] = timestamp
# Game too old ?
# formatting both so we can compare
gameVersion = gameData['gameVersion'].split('.')[:2]
gameVersion = tuple(list(map(int, gameVersion)))
patchVersion = tuple(list(map(int, self.patch.split('.'))))
if gameVersion < patchVersion: # too old history
break
if gameVersion > patchVersion: # too recent history
continue
# saving game
file_path = os.path.join(self.db, gameID)
try:
pickle.dump(gameData, open(file_path, 'wb'))
except FileNotFoundError as e:
print(e, file=sys.stderr)
time.sleep(1)
continue
self.downloadedGames.append(gameID)
print(self.patch, self.region, gameID)
try:
with open(self.downloadedGamesPath, 'a+') as f:
f.write(gameID + '\n')
except FileNotFoundError as e:
print(e, file=sys.stderr)
time.sleep(1)
continue
return None # No data left to download
def keepDownloading(database, patches, region, leagues, timestamped_patches, attempts=ATTEMPTS):
print('Starting data collection for', region, patches, file=sys.stderr)
for patch in patches:
dd = None
while True:
if not dd:
try:
dd = DataDownloader(database, patch, region, leagues, timestamped_patches)
except ApiError403 as e:
print('FATAL ERROR', patch, region, e, file=sys.stderr)
return
except ApiError as e:
print(e, file=sys.stderr)
attempts -= 1
if attempts <= 0:
print(region, 'initial connection failed. End of connection attempts.', file=sys.stderr)
return
print(region, 'initial connection failed. Retrying in 5 minutes. Attempts left:', attempts, file=sys.stderr)
time.sleep(300)
continue
e = dd.downloadData()
while e is not None:
print('FATAL ERROR', patch, region, e, file=sys.stderr)
e = dd.downloadData()
print(region, patch, 'all games downloaded', file=sys.stderr)
break
print(region, 'download complete')
def saveLastSeen(timestamped_patches, save_interval, end):
cfg = configparser.ConfigParser()
cfg.read('config.ini')
last_save = time.time()
while not end.is_set():
if last_save + save_interval < time.time():
# we save the dictionnary
for key, value in timestamped_patches.items():
cfg['PATCHES'][key] = ','.join(list(map(str, value)))
with open('config.ini', 'w') as configfile:
cfg.write(configfile)
print('patch timestamps saved')
last_save = time.time()
time.sleep(1)
# we save the final state of the dictionnary
for key, value in timestamped_patches.items():
cfg['PATCHES'][key] = ','.join(list(map(str, value)))
with open('config.ini', 'w') as configfile:
cfg.write(configfile)
print('patch timestamps saved')
def run(mode):
assert isinstance(mode, Base_Mode), 'Unrecognized mode {}'.format(mode)
manager = Manager()
last_seen_from_patch = manager.dict()
endUpdate = manager.Event()
for key, value in mode.config['PATCHES'].items():
last_seen_from_patch[key] = list(map(int, value.split(','))) # first seen and last seen
kdprocs = []
for region in mode.REGIONS:
kdprocs.append(
multiprocessing.Process(target=keepDownloading,
args=(mode.DATABASE, mode.PATCHES_TO_DOWNLOAD, region, mode.LEAGUES, last_seen_from_patch)))
kdprocs[-1].start()
slsproc = multiprocessing.Process(target=saveLastSeen, args=(last_seen_from_patch, 300, endUpdate))
slsproc.start()
for kdproc in kdprocs:
kdproc.join()
endUpdate.set()
slsproc.join()
endUpdate.set()
print('-- Download complete --')
if __name__ == '__main__':
m = Base_Mode()
run(m)
| {
"content_hash": "f5f8365e36b714b800ffb604b8b24cfd",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 150,
"avg_line_length": 41.440366972477065,
"alnum_prop": 0.5551250830197033,
"repo_name": "vingtfranc/LoLAnalyzer",
"id": "8d2b96034eae66294b9a67ae355f94845a3b45ab",
"size": "9110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DataDownloader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100372"
}
],
"symlink_target": ""
} |
import pytest
import sh
from molecule import config
from molecule.verifier.lint import flake8
@pytest.fixture
def molecule_verifier_lint_section_data():
return {
'verifier': {
'name': 'testinfra',
'lint': {
'name': 'flake8',
'options': {
'foo': 'bar',
},
'env': {
'foo': 'bar',
},
}
}
}
@pytest.fixture
def flake8_instance(molecule_verifier_lint_section_data, config_instance):
config_instance.merge_dicts(config_instance.config,
molecule_verifier_lint_section_data)
return flake8.Flake8(config_instance)
def test_config_private_member(flake8_instance):
assert isinstance(flake8_instance._config, config.Config)
def test_default_options_property(flake8_instance):
assert {} == flake8_instance.default_options
def test_default_env_property(flake8_instance):
assert 'MOLECULE_FILE' in flake8_instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in flake8_instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in flake8_instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in flake8_instance.default_env
def test_name_property(flake8_instance):
assert 'flake8' == flake8_instance.name
def test_enabled_property(flake8_instance):
assert flake8_instance.enabled
def test_options_property(flake8_instance):
x = {
'foo': 'bar',
}
assert x == flake8_instance.options
def test_options_property_handles_cli_args(flake8_instance):
flake8_instance._config.args = {'debug': True}
x = {
'foo': 'bar',
}
# Does nothing. The `flake8` command does not support
# a `debug` flag.
assert x == flake8_instance.options
def test_bake(flake8_instance):
flake8_instance._tests = ['test1', 'test2', 'test3']
flake8_instance.bake()
x = '{} --foo=bar test1 test2 test3'.format(str(sh.flake8))
assert x == flake8_instance._flake8_command
def test_execute(patched_logger_info, patched_logger_success,
patched_run_command, flake8_instance):
flake8_instance._tests = ['test1', 'test2', 'test3']
flake8_instance._flake8_command = 'patched-command'
flake8_instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Executing Flake8 on files found in {}/...'.format(
flake8_instance._config.verifier.directory)
patched_logger_info.assert_called_once_with(msg)
msg = 'Lint completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn,
flake8_instance):
flake8_instance._config.config['verifier']['lint']['enabled'] = False
flake8_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, verifier_lint is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_does_not_execute_without_tests(patched_run_command,
patched_logger_warn, flake8_instance):
flake8_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, no tests found.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes(patched_run_command, flake8_instance):
flake8_instance._tests = ['test1', 'test2', 'test3']
flake8_instance.execute()
assert flake8_instance._flake8_command is not None
cmd = '{} --foo=bar test1 test2 test3'.format(str(sh.flake8))
patched_run_command.assert_called_once_with(cmd, debug=False)
def test_executes_catches_and_exits_return_code(
patched_run_command, patched_get_tests, flake8_instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(sh.flake8, b'', b'')
with pytest.raises(SystemExit) as e:
flake8_instance.execute()
assert 1 == e.value.code
| {
"content_hash": "86b7ff9905a75e1de5f9fa9462a9c403",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 29.204379562043794,
"alnum_prop": 0.6548362909272681,
"repo_name": "kireledan/molecule",
"id": "be4741f0ec6406d551b7030412188827475d0b63",
"size": "5121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unit/verifier/lint/test_flake8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "577315"
}
],
"symlink_target": ""
} |
import functools
from oslo.config import cfg
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_gwmode_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.extensions import portbindings
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.ibm.common import config # noqa
from neutron.plugins.ibm.common import constants
from neutron.plugins.ibm.common import exceptions as sdnve_exc
from neutron.plugins.ibm import sdnve_api as sdnve
from neutron.plugins.ibm import sdnve_api_fake as sdnve_fake
LOG = logging.getLogger(__name__)
class SdnveRpcCallbacks():
def __init__(self, notifier):
self.notifier = notifier # used to notify the agent
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return n_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
def sdnve_info(self, rpc_context, **kwargs):
'''Update new information.'''
info = kwargs.get('info')
# Notify all other listening agents
self.notifier.info_update(rpc_context, info)
return info
class AgentNotifierApi(proxy.RpcProxy):
'''Agent side of the SDN-VE rpc API.'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_info_update = topics.get_topic_name(topic,
constants.INFO,
topics.UPDATE)
def info_update(self, context, info):
self.fanout_cast(context,
self.make_msg('info_update',
info=info),
topic=self.topic_info_update)
def _ha(func):
'''Supports the high availability feature of the controller.'''
@functools.wraps(func)
def hawrapper(self, *args, **kwargs):
'''This wrapper sets the new controller if necessary
When a controller is detected to be not responding, and a
new controller is chosen to be used in its place, this decorator
makes sure the existing integration bridges are set to point
to the new controleer by calling the set_controller method.
'''
ret_func = func(self, *args, **kwargs)
self.set_controller(args[0])
return ret_func
return hawrapper
class SdnvePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
portbindings_db.PortBindingMixin,
l3_gwmode_db.L3_NAT_db_mixin,
agents_db.AgentDbMixin,
):
'''
Implement the Neutron abstractions using SDN-VE SDN Controller.
'''
__native_bulk_support = False
__native_pagination_support = False
__native_sorting_support = False
supported_extension_aliases = ["binding", "router", "external-net",
"agent", "quotas"]
def __init__(self, configfile=None):
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.VIF_DETAILS: {portbindings.CAP_PORT_FILTER: False}}
super(SdnvePluginV2, self).__init__()
self.setup_rpc()
self.sdnve_controller_select()
if self.fake_controller:
self.sdnve_client = sdnve_fake.FakeClient()
else:
self.sdnve_client = sdnve.Client()
def sdnve_controller_select(self):
self.fake_controller = cfg.CONF.SDNVE.use_fake_controller
def setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.callbacks = SdnveRpcCallbacks(self.notifier)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def _update_base_binding_dict(self, tenant_type):
if tenant_type == constants.TENANT_TYPE_OVERLAY:
self.base_binding_dict[
portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE
if tenant_type == constants.TENANT_TYPE_OF:
self.base_binding_dict[
portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS
def set_controller(self, context):
LOG.info(_("Set a new controller if needed."))
new_controller = self.sdnve_client.sdnve_get_controller()
if new_controller:
self.notifier.info_update(
context,
{'new_controller': new_controller})
LOG.info(_("Set the controller to a new controller: %s"),
new_controller)
def _process_request(self, request, current):
new_request = dict(
(k, v) for k, v in request.items()
if v != current.get(k))
msg = _("Original SDN-VE HTTP request: %(orig)s; New request: %(new)s")
LOG.debug(msg, {'orig': request, 'new': new_request})
return new_request
#
# Network
#
@_ha
def create_network(self, context, network):
LOG.debug(_("Create network in progress: %r"), network)
session = context.session
tenant_id = self._get_tenant_id_for_create(context, network['network'])
# Create a new SDN-VE tenant if need be
sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant(
tenant_id)
if sdnve_tenant is None:
raise sdnve_exc.SdnveException(
msg=_('Create net failed: no SDN-VE tenant.'))
with session.begin(subtransactions=True):
net = super(SdnvePluginV2, self).create_network(context, network)
self._process_l3_create(context, net, network['network'])
# Create SDN-VE network
(res, data) = self.sdnve_client.sdnve_create('network', net)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_network(context, net['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create net failed in SDN-VE: %s') % res))
LOG.debug(_("Created network: %s"), net['id'])
return net
@_ha
def update_network(self, context, id, network):
LOG.debug(_("Update network in progress: %r"), network)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_network = super(SdnvePluginV2, self).get_network(
context, id)
processed_request['network'] = self._process_request(
network['network'], original_network)
net = super(SdnvePluginV2, self).update_network(
context, id, network)
self._process_l3_update(context, net, network['network'])
if processed_request['network']:
(res, data) = self.sdnve_client.sdnve_update(
'network', id, processed_request['network'])
if res not in constants.HTTP_ACCEPTABLE:
net = super(SdnvePluginV2, self).update_network(
context, id, {'network': original_network})
raise sdnve_exc.SdnveException(
msg=(_('Update net failed in SDN-VE: %s') % res))
return net
@_ha
def delete_network(self, context, id):
LOG.debug(_("Delete network in progress: %s"), id)
session = context.session
with session.begin(subtransactions=True):
self._process_l3_delete(context, id)
super(SdnvePluginV2, self).delete_network(context, id)
(res, data) = self.sdnve_client.sdnve_delete('network', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(
_("Delete net failed after deleting the network in DB: %s"),
res)
@_ha
def get_network(self, context, id, fields=None):
LOG.debug(_("Get network in progress: %s"), id)
return super(SdnvePluginV2, self).get_network(context, id, fields)
@_ha
def get_networks(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
LOG.debug(_("Get networks in progress"))
return super(SdnvePluginV2, self).get_networks(
context, filters, fields, sorts, limit, marker, page_reverse)
#
# Port
#
@_ha
def create_port(self, context, port):
LOG.debug(_("Create port in progress: %r"), port)
session = context.session
# Set port status as 'ACTIVE' to avoid needing the agent
port['port']['status'] = n_const.PORT_STATUS_ACTIVE
port_data = port['port']
with session.begin(subtransactions=True):
port = super(SdnvePluginV2, self).create_port(context, port)
if 'id' not in port:
return port
# If the tenant_id is set to '' by create_port, add the id to
# the request being sent to the controller as the controller
# requires a tenant id
tenant_id = port.get('tenant_id')
if not tenant_id:
LOG.debug(_("Create port does not have tenant id info"))
original_network = super(SdnvePluginV2, self).get_network(
context, port['network_id'])
original_tenant_id = original_network['tenant_id']
port['tenant_id'] = original_tenant_id
LOG.debug(
_("Create port does not have tenant id info; "
"obtained is: %s"),
port['tenant_id'])
os_tenant_id = tenant_id
id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid(
os_tenant_id)
self._update_base_binding_dict(tenant_type)
self._process_portbindings_create_and_update(context,
port_data, port)
# NOTE(mb): Remove this block when controller is updated
# Remove the information that the controller does not accept
sdnve_port = port.copy()
sdnve_port.pop('device_id', None)
sdnve_port.pop('device_owner', None)
(res, data) = self.sdnve_client.sdnve_create('port', sdnve_port)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_port(context, port['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create port failed in SDN-VE: %s') % res))
LOG.debug(_("Created port: %s"), port.get('id', 'id not found'))
return port
@_ha
def update_port(self, context, id, port):
LOG.debug(_("Update port in progress: %r"), port)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_port = super(SdnvePluginV2, self).get_port(
context, id)
processed_request['port'] = self._process_request(
port['port'], original_port)
updated_port = super(SdnvePluginV2, self).update_port(
context, id, port)
os_tenant_id = updated_port['tenant_id']
id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid(
os_tenant_id)
self._update_base_binding_dict(tenant_type)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
if processed_request['port']:
(res, data) = self.sdnve_client.sdnve_update(
'port', id, processed_request['port'])
if res not in constants.HTTP_ACCEPTABLE:
updated_port = super(SdnvePluginV2, self).update_port(
context, id, {'port': original_port})
raise sdnve_exc.SdnveException(
msg=(_('Update port failed in SDN-VE: %s') % res))
return updated_port
@_ha
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Delete port in progress: %s"), id)
# if needed, check to see if this is a port owned by
# an l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
self.disassociate_floatingips(context, id)
super(SdnvePluginV2, self).delete_port(context, id)
(res, data) = self.sdnve_client.sdnve_delete('port', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(
_("Delete port operation failed in SDN-VE "
"after deleting the port from DB: %s"), res)
#
# Subnet
#
@_ha
def create_subnet(self, context, subnet):
LOG.debug(_("Create subnet in progress: %r"), subnet)
new_subnet = super(SdnvePluginV2, self).create_subnet(context, subnet)
# Note(mb): Use of null string currently required by controller
sdnve_subnet = new_subnet.copy()
if subnet.get('gateway_ip') is None:
sdnve_subnet['gateway_ip'] = 'null'
(res, data) = self.sdnve_client.sdnve_create('subnet', sdnve_subnet)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_subnet(context,
new_subnet['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create subnet failed in SDN-VE: %s') % res))
LOG.debug(_("Subnet created: %s"), new_subnet['id'])
return new_subnet
@_ha
def update_subnet(self, context, id, subnet):
LOG.debug(_("Update subnet in progress: %r"), subnet)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_subnet = super(SdnvePluginV2, self).get_subnet(
context, id)
processed_request['subnet'] = self._process_request(
subnet['subnet'], original_subnet)
updated_subnet = super(SdnvePluginV2, self).update_subnet(
context, id, subnet)
if processed_request['subnet']:
# Note(mb): Use of string containing null required by controller
if 'gateway_ip' in processed_request['subnet']:
if processed_request['subnet'].get('gateway_ip') is None:
processed_request['subnet']['gateway_ip'] = 'null'
(res, data) = self.sdnve_client.sdnve_update(
'subnet', id, processed_request['subnet'])
if res not in constants.HTTP_ACCEPTABLE:
for key in subnet['subnet'].keys():
subnet['subnet'][key] = original_subnet[key]
super(SdnvePluginV2, self).update_subnet(
context, id, subnet)
raise sdnve_exc.SdnveException(
msg=(_('Update subnet failed in SDN-VE: %s') % res))
return updated_subnet
@_ha
def delete_subnet(self, context, id):
LOG.debug(_("Delete subnet in progress: %s"), id)
super(SdnvePluginV2, self).delete_subnet(context, id)
(res, data) = self.sdnve_client.sdnve_delete('subnet', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(_("Delete subnet operation failed in SDN-VE after "
"deleting the subnet from DB: %s"), res)
#
# Router
#
@_ha
def create_router(self, context, router):
LOG.debug(_("Create router in progress: %r"), router)
if router['router']['admin_state_up'] is False:
LOG.warning(_('Ignoring admin_state_up=False for router=%r. '
'Overriding with True'), router)
router['router']['admin_state_up'] = True
tenant_id = self._get_tenant_id_for_create(context, router['router'])
# Create a new Pinnaacles tenant if need be
sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant(
tenant_id)
if sdnve_tenant is None:
raise sdnve_exc.SdnveException(
msg=_('Create router failed: no SDN-VE tenant.'))
new_router = super(SdnvePluginV2, self).create_router(context, router)
# Create Sdnve router
(res, data) = self.sdnve_client.sdnve_create('router', new_router)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_router(context, new_router['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create router failed in SDN-VE: %s') % res))
LOG.debug(_("Router created: %r"), new_router)
return new_router
@_ha
def update_router(self, context, id, router):
LOG.debug(_("Update router in progress: id=%(id)s "
"router=%(router)r"),
{'id': id, 'router': router})
session = context.session
processed_request = {}
if not router['router'].get('admin_state_up', True):
raise n_exc.NotImplementedError(_('admin_state_up=False '
'routers are not '
'supported.'))
with session.begin(subtransactions=True):
original_router = super(SdnvePluginV2, self).get_router(
context, id)
processed_request['router'] = self._process_request(
router['router'], original_router)
updated_router = super(SdnvePluginV2, self).update_router(
context, id, router)
if processed_request['router']:
egw = processed_request['router'].get('external_gateway_info')
# Check for existing empty set (different from None) in request
if egw == {}:
processed_request['router'][
'external_gateway_info'] = {'network_id': 'null'}
(res, data) = self.sdnve_client.sdnve_update(
'router', id, processed_request['router'])
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).update_router(
context, id, {'router': original_router})
raise sdnve_exc.SdnveException(
msg=(_('Update router failed in SDN-VE: %s') % res))
return updated_router
@_ha
def delete_router(self, context, id):
LOG.debug(_("Delete router in progress: %s"), id)
super(SdnvePluginV2, self).delete_router(context, id)
(res, data) = self.sdnve_client.sdnve_delete('router', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(
_("Delete router operation failed in SDN-VE after "
"deleting the router in DB: %s"), res)
@_ha
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("Add router interface in progress: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r"),
{'router_id': router_id, 'interface_info': interface_info})
new_interface = super(SdnvePluginV2, self).add_router_interface(
context, router_id, interface_info)
LOG.debug(
_("SdnvePluginV2.add_router_interface called. Port info: %s"),
new_interface)
request_info = interface_info.copy()
request_info['port_id'] = new_interface['port_id']
# Add the subnet_id to the request sent to the controller
if 'subnet_id' not in interface_info:
request_info['subnet_id'] = new_interface['subnet_id']
(res, data) = self.sdnve_client.sdnve_update(
'router', router_id + '/add_router_interface', request_info)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).remove_router_interface(
context, router_id, interface_info)
raise sdnve_exc.SdnveException(
msg=(_('Update router-add-interface failed in SDN-VE: %s') %
res))
LOG.debug(_("Added router interface: %r"), new_interface)
return new_interface
def _add_router_interface_only(self, context, router_id, interface_info):
LOG.debug(_("Add router interface only called: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r"),
{'router_id': router_id, 'interface_info': interface_info})
port_id = interface_info.get('port_id')
if port_id:
(res, data) = self.sdnve_client.sdnve_update(
'router', router_id + '/add_router_interface', interface_info)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(_("SdnvePluginV2._add_router_interface_only: "
"failed to add the interface in the roll back."
" of a remove_router_interface operation"))
@_ha
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("Remove router interface in progress: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r"),
{'router_id': router_id, 'interface_info': interface_info})
subnet_id = interface_info.get('subnet_id')
port_id = interface_info.get('port_id')
if not subnet_id:
if not port_id:
raise sdnve_exc.BadInputException(msg=_('No port ID'))
myport = super(SdnvePluginV2, self).get_port(context, port_id)
LOG.debug(_("SdnvePluginV2.remove_router_interface port: %s"),
myport)
myfixed_ips = myport.get('fixed_ips')
if not myfixed_ips:
raise sdnve_exc.BadInputException(msg=_('No fixed IP'))
subnet_id = myfixed_ips[0].get('subnet_id')
if subnet_id:
interface_info['subnet_id'] = subnet_id
LOG.debug(
_("SdnvePluginV2.remove_router_interface subnet_id: %s"),
subnet_id)
else:
if not port_id:
# The backend requires port id info in the request
subnet = super(SdnvePluginV2, self).get_subnet(context,
subnet_id)
df = {'device_id': [router_id],
'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF],
'network_id': [subnet['network_id']]}
ports = self.get_ports(context, filters=df)
if ports:
pid = ports[0]['id']
interface_info['port_id'] = pid
msg = ("SdnvePluginV2.remove_router_interface "
"subnet_id: %(sid)s port_id: %(pid)s")
LOG.debug(msg, {'sid': subnet_id, 'pid': pid})
(res, data) = self.sdnve_client.sdnve_update(
'router', router_id + '/remove_router_interface', interface_info)
if res not in constants.HTTP_ACCEPTABLE:
raise sdnve_exc.SdnveException(
msg=(_('Update router-remove-interface failed SDN-VE: %s') %
res))
session = context.session
with session.begin(subtransactions=True):
try:
info = super(SdnvePluginV2, self).remove_router_interface(
context, router_id, interface_info)
except Exception:
with excutils.save_and_reraise_exception():
self._add_router_interface_only(context,
router_id, interface_info)
return info
#
# Floating Ip
#
@_ha
def create_floatingip(self, context, floatingip):
LOG.debug(_("Create floatingip in progress: %r"),
floatingip)
new_floatingip = super(SdnvePluginV2, self).create_floatingip(
context, floatingip)
(res, data) = self.sdnve_client.sdnve_create(
'floatingip', {'floatingip': new_floatingip})
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_floatingip(
context, new_floatingip['id'])
raise sdnve_exc.SdnveException(
msg=(_('Creating floating ip operation failed '
'in SDN-VE controller: %s') % res))
LOG.debug(_("Created floatingip : %r"), new_floatingip)
return new_floatingip
@_ha
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("Update floatingip in progress: %r"), floatingip)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_floatingip = super(
SdnvePluginV2, self).get_floatingip(context, id)
processed_request['floatingip'] = self._process_request(
floatingip['floatingip'], original_floatingip)
updated_floatingip = super(
SdnvePluginV2, self).update_floatingip(context, id, floatingip)
if processed_request['floatingip']:
(res, data) = self.sdnve_client.sdnve_update(
'floatingip', id,
{'floatingip': processed_request['floatingip']})
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).update_floatingip(
context, id, {'floatingip': original_floatingip})
raise sdnve_exc.SdnveException(
msg=(_('Update floating ip failed in SDN-VE: %s') % res))
return updated_floatingip
@_ha
def delete_floatingip(self, context, id):
LOG.debug(_("Delete floatingip in progress: %s"), id)
super(SdnvePluginV2, self).delete_floatingip(context, id)
(res, data) = self.sdnve_client.sdnve_delete('floatingip', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(_("Delete floatingip failed in SDN-VE: %s"), res)
| {
"content_hash": "8ba058349f1a0a48213a2abf4c2e335a",
"timestamp": "",
"source": "github",
"line_count": 657,
"max_line_length": 79,
"avg_line_length": 41.23896499238965,
"alnum_prop": 0.5711596663467926,
"repo_name": "SnabbCo/neutron",
"id": "3d2ecd381c3f0294c6c06f8199fbff097d4076e6",
"size": "27766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/plugins/ibm/sdnve_neutron_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test case for the bug report:
"[ 1088979 ] Unnecessary rebuild with generated header file"
(<http://sourceforge.net/tracker/index.php?func=detail&aid=1088979&group_id=30337&atid=398971>).
Unnecessary rebuild with generated header file
Scons rebuilds some nodes when invoked twice. The
trigger seems to be a generated C++ source file that
includes a header file that also is generated.
A tarball with a minimal test case is attached.
Transcript for reproducing:
cd /tmp
tar xzf scons_rebuild_debug.tar.gz
cd scons_rebuild_debug
scons target.o
scons target.o
Note that the bug is not triggered when scons is run
without arguments.
This may be a duplicate to bug 1019683.
"""
import os
import sys
import TestSCons
test = TestSCons.TestSCons()
_obj = TestSCons._obj
if sys.platform == 'win32':
generator_name = 'generator.bat'
test.write(generator_name, '@echo #include "header.hh"')
kernel_action = "$SOURCES > $TARGET"
else:
generator_name = 'generator.sh'
test.write(generator_name, 'echo \'#include "header.hh"\'')
kernel_action = "sh $SOURCES > $TARGET"
test.write('SConstruct', """\
env = Environment()
kernelDefines = env.Command("header.hh",
"header.hh.in",
Copy('$TARGET', '$SOURCE'))
kernelImporterSource = env.Command(
"generated.cc", ["%s"],
"%s")
kernelImporter = env.Program(
kernelImporterSource + ["main.cc"])
kernelImports = env.Command(
"KernelImport.hh", kernelImporter,
".%s$SOURCE > $TARGET")
osLinuxModule = env.StaticObject(
["target.cc"])
""" % (generator_name, kernel_action, os.sep))
test.write('main.cc', """\
int
main(int, char *[])
{
return (0);
}
""")
test.write('target.cc', """\
#if 0
#include "KernelImport.hh"
#endif
""")
test.write("header.hh.in", "#define HEADER_HH 1\n")
test.run(arguments = 'target' + _obj)
expected_stdout = """\
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
scons: `target%(_obj)s' is up to date.
scons: done building targets.
""" % locals()
test.run(arguments = 'target' + _obj, stdout=expected_stdout)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "902a38f7d950601136b6bd8cedb5cb39",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 96,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.6654008438818565,
"repo_name": "andrewyoung1991/scons",
"id": "6beba7419e34c0209e23f60b29dd7b9cbe9372ae",
"size": "3472",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/rebuild-generated.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "746"
},
{
"name": "C++",
"bytes": "518"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "44714"
},
{
"name": "Python",
"bytes": "7385906"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52194"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
"""
Unit tests for the nova-policy-check CLI interfaces.
"""
import fixtures
import mock
from six.moves import StringIO
from nova.cmd import policy_check
import nova.conf
from nova import context as nova_context
from nova import db
from nova import exception
from nova.policies import base as base_policies
from nova.policies import instance_actions as ia_policies
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import policy_fixture
CONF = nova.conf.CONF
class TestPolicyCheck(test.NoDBTestCase):
def setUp(self):
super(TestPolicyCheck, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.cmd = policy_check.PolicyCommands()
@mock.patch.object(policy_check.PolicyCommands, '_filter_rules')
@mock.patch.object(policy_check.PolicyCommands, '_get_target')
@mock.patch.object(policy_check.PolicyCommands, '_get_context')
def test_check(self, mock_get_context, mock_get_target,
mock_filter_rules):
fake_rules = ['fake:rule', 'faux:roule']
mock_filter_rules.return_value = fake_rules
self.cmd.check(target=mock.sentinel.target)
mock_get_context.assert_called_once_with()
mock_get_target.assert_called_once_with(mock_get_context.return_value,
mock.sentinel.target)
mock_filter_rules.assert_called_once_with(
mock_get_context.return_value, '', mock_get_target.return_value)
self.assertEqual('\n'.join(fake_rules) + '\n', self.output.getvalue())
@mock.patch.object(nova_context, 'RequestContext')
@mock.patch.object(policy_check, 'CONF')
def test_get_context(self, mock_CONF, mock_RequestContext):
context = self.cmd._get_context()
self.assertEqual(mock_RequestContext.return_value, context)
mock_RequestContext.assert_called_once_with(
roles=mock_CONF.os_roles,
user_id=mock_CONF.os_user_id,
project_id=mock_CONF.os_tenant_id)
def test_get_target_none(self):
target = self.cmd._get_target(mock.sentinel.context, None)
self.assertIsNone(target)
def test_get_target_invalid_attribute(self):
self.assertRaises(exception.InvalidAttribute, self.cmd._get_target,
mock.sentinel.context, ['nope=nada'])
def test_get_target(self):
expected_target = {
'project_id': 'fake-proj',
'user_id': 'fake-user',
'quota_class': 'fake-quota-class',
'availability_zone': 'fake-az',
}
given_target = ['='.join([key, val])
for key, val in expected_target.items()]
actual_target = self.cmd._get_target(mock.sentinel.context,
given_target)
self.assertDictEqual(expected_target, actual_target)
@mock.patch.object(nova_context, 'get_admin_context')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_target_instance(self, mock_instance_get,
mock_get_admin_context):
admin_context = nova_context.RequestContext(is_admin=True)
mock_get_admin_context.return_value = admin_context
given_target = ['instance_id=fake_id']
mock_instance_get.return_value = fake_instance.fake_db_instance()
target = self.cmd._get_target(mock.sentinel.context,
given_target)
self.assertEqual(target,
{'user_id': 'fake-user', 'project_id': 'fake-project'})
mock_instance_get.assert_called_once_with(admin_context,
'fake_id')
def _check_filter_rules(self, context=None, target=None,
expected_rules=None):
context = context or nova_context.get_admin_context()
if expected_rules is None:
expected_rules = [
r.name for r in ia_policies.list_rules()]
passing_rules = self.cmd._filter_rules(
context, 'os-instance-actions', target)
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
context = nova_context.RequestContext()
rule_conditions = [base_policies.RULE_ANY,
base_policies.RULE_ADMIN_OR_OWNER]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
self._check_filter_rules()
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
context = nova_context.RequestContext()
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
def test_filter_rules_instance_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
self._check_filter_rules(target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
rule_conditions = [base_policies.RULE_ANY,
base_policies.RULE_ADMIN_OR_OWNER]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
@mock.patch.object(policy_check.config, 'parse_args')
@mock.patch.object(policy_check, 'CONF')
def _check_main(self, mock_CONF, mock_parse_args,
category_name='check', expected_return_value=0):
mock_CONF.category.name = category_name
return_value = policy_check.main()
self.assertEqual(expected_return_value, return_value)
mock_CONF.register_cli_opts.assert_called_once_with(
policy_check.cli_opts)
mock_CONF.register_cli_opt.assert_called_once_with(
policy_check.category_opt)
@mock.patch.object(policy_check.version, 'version_string_with_package',
return_value="x.x.x")
def test_main_version(self, mock_version_string):
self._check_main(category_name='version')
self.assertEqual("x.x.x\n", self.output.getvalue())
@mock.patch.object(policy_check.cmd_common, 'print_bash_completion')
def test_main_bash_completion(self, mock_print_bash):
self._check_main(category_name='bash-completion')
mock_print_bash.assert_called_once_with(policy_check.CATEGORIES)
@mock.patch.object(policy_check.cmd_common, 'get_action_fn')
def test_main(self, mock_get_action_fn):
mock_fn = mock.Mock()
mock_fn_args = [mock.sentinel.arg]
mock_fn_kwargs = {'key': mock.sentinel.value}
mock_get_action_fn.return_value = (mock_fn, mock_fn_args,
mock_fn_kwargs)
self._check_main(expected_return_value=mock_fn.return_value)
mock_fn.assert_called_once_with(mock.sentinel.arg,
key=mock.sentinel.value)
@mock.patch.object(policy_check.cmd_common, 'get_action_fn')
def test_main_error(self, mock_get_action_fn):
mock_fn = mock.Mock(side_effect=Exception)
mock_get_action_fn.return_value = (mock_fn, [], {})
self._check_main(expected_return_value=1)
self.assertIn("error: ", self.output.getvalue())
| {
"content_hash": "1bb74181d2ace4100bafdc4ea2bff467",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 78,
"avg_line_length": 44.05913978494624,
"alnum_prop": 0.6183038438071995,
"repo_name": "Juniper/nova",
"id": "bfb719885a4f0ac7246a54479b469aca4fb384a7",
"size": "8834",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/unit/cmd/test_policy_check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "23962"
},
{
"name": "Python",
"bytes": "19816434"
},
{
"name": "Shell",
"bytes": "27717"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
} |
"""
Domino Public API
Public API endpoints for Custom Metrics # noqa: E501
The version of the OpenAPI document: 5.3.0
Generated by: https://openapi-generator.tech
"""
__version__ = "1.0.0"
# import ApiClient
from domino._impl.custommetrics.api_client import ApiClient
# import Configuration
from domino._impl.custommetrics.configuration import Configuration
# import exceptions
from domino._impl.custommetrics.exceptions import OpenApiException
from domino._impl.custommetrics.exceptions import ApiAttributeError
from domino._impl.custommetrics.exceptions import ApiTypeError
from domino._impl.custommetrics.exceptions import ApiValueError
from domino._impl.custommetrics.exceptions import ApiKeyError
from domino._impl.custommetrics.exceptions import ApiException
| {
"content_hash": "4b75514614ede4b5080dcbde740558ba",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.8058375634517766,
"repo_name": "dominodatalab/python-domino",
"id": "d9593b53f24cb2c80a8b0698433be709302fe9eb",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domino/_impl/custommetrics/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "518781"
},
{
"name": "Shell",
"bytes": "142"
}
],
"symlink_target": ""
} |
"""
Thinkful Python Unit 1 Lesson 6 Assignment 4
Implement Bicycle in Python using Classes
Qasim Ijaz
"""
import random
class Wheels(object):
def __init__(self, weight, cost, model):
self.weight = weight
self.cost = cost
self.model = model
class Frames(object):
def __init__(self, material, weight, cost):
self.material = material
self.weight = weight
self.cost = cost
class Manufacturers(object):
def __init__(self, name, percent):
self.name = name
self.percent = percent
class Bicycles(object):
def __init__(self, name, manufacturer, frame, wheel):
self.name = name
self.wheel = wheel
self.frame = frame
self.weight = self.frame.weight + self.wheel.weight * 2
self.cost = self.frame.cost + self.wheel.cost * 2
self.manufacturer = manufacturer
#add manufacturer's percentage to wholesale cost
# def wholesale_cost(self):
# return self.cost + (self.cost // 100) * self.manufacturer.percent
#
# def retail_cost(self, shop):
#
# return self.cost + ((self.cost // 100) * self.manufacturer.percent) + ((self.cost // 100) * martys.margin)
class Shops(object):
def __init__(self, name, margin):
self.inventory = []
self.margin = 20
self.name = name
self.profit = 0 # we start with no profit
#Now we create bikes for inventory that will be set by bicycleScript.py
def add_inventory(self, bicycle):
self.inventory.append(bicycle)
class Customers(object):
def __init__(self, name, fund):
self.name = name
self.fund = fund
self.bikes = []
def buy_bike(self, bicycle):
self.bikes.append(bicycle)
print("add to customer inventory: " + bicycle)
| {
"content_hash": "32272c734edf49421674b50714fa8d99",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 117,
"avg_line_length": 24.91780821917808,
"alnum_prop": 0.6118746564046179,
"repo_name": "qasimchadhar/thinkful",
"id": "554b8a05c601b4d0984f35c7360124206826f2f7",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "bicycle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "421"
},
{
"name": "Python",
"bytes": "6892"
}
],
"symlink_target": ""
} |
from selenium import webdriver
from page_objects import *
from datetime import datetime
import unittest
import argparse
tender_id = None
class SeleniumMixin(unittest.TestCase):
def assertlementIsPresentById(self, elem_id):
try:
self.driver.find_element_by_id(elem_id)
except NoSuchElementException as error:
self.fail(error)
class TestCreateTender(SeleniumMixin):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.get(broker['url'])
self.driver.set_window_size(1200, 1000)
self.login_page_owner = LoginPage(
owner_users['email'], owner_users['password'], self.driver
)
self.create_tender_page = CreateTenderPage(self.driver)
self.find_tender = FindTenderPage(self.driver)
def test_create_tender(self):
self.login_page_owner.login_as_owner()
self.create_tender_page.create_tender()
is_found = False
for i in range(1, 10):
try:
given_tender_id = self.find_tender.get_tender_id()
print given_tender_id
if given_tender_id:
with open('load_results_create.txt', 'a') as f:
f.write(
'Tender_Owner created tender with {}, finished at {} ------------------ PASSED\n'.format(
given_tender_id, datetime.now())
)
f.close()
is_found = True
break
except NoSuchElementException or UnicodeEncodeError:
sleep(30)
self.driver.refresh()
self.driver.execute_script("window.scrollTo(0, 1582);")
if not is_found:
with open('load_results_create.txt', 'a') as f:
f.write('Tender_Owner did NOT create tender, finished at {} ------------ FAILED\n'.format(
datetime.now())
)
f.close()
return False
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='create_tender')
suite = unittest.TestSuite()
suite.addTest(TestCreateTender("test_create_tender"))
runner = unittest.TextTestRunner()
runner.run(suite) | {
"content_hash": "26beb1d8bfc9e3be69ec4ef393d6f28b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 117,
"avg_line_length": 34.710144927536234,
"alnum_prop": 0.5436325678496868,
"repo_name": "lesiavl/selenium_perfomance_tests",
"id": "530ffb81accf2b531b3935a1c514a2ede6db80a0",
"size": "2441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aps_load/create_tender.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "170994"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ValidationError
from .factories import PhotoSizeFactory
from .helpers import PhotologueBaseTest
class PhotoSizeNameTest(PhotologueBaseTest):
def test_valid_name(self):
"""We are restricted in what names we can enter."""
photosize = PhotoSizeFactory()
photosize.name = None
with self.assertRaisesMessage(ValidationError, 'This field cannot be null.'):
photosize.full_clean()
photosize = PhotoSizeFactory(name='')
with self.assertRaisesMessage(ValidationError, 'This field cannot be blank.'):
photosize.full_clean()
for name in ('a space', 'UPPERCASE', 'bad?chars'):
photosize = PhotoSizeFactory(name=name)
with self.assertRaisesMessage(ValidationError, 'Use only plain lowercase letters (ASCII), numbers and underscores.'):
photosize.full_clean()
for name in ('label', '2_words'):
photosize = PhotoSizeFactory(name=name)
photosize.full_clean()
| {
"content_hash": "3c4402636b447e686c8890b6d9f503e9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 129,
"avg_line_length": 34.8,
"alnum_prop": 0.6657088122605364,
"repo_name": "MathieuDuponchelle/my_patched_photologue",
"id": "47da0ff277a2fc99b2269be6d7482eb945fd3dfe",
"size": "1044",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "photologue/tests/test_photosize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296"
},
{
"name": "Python",
"bytes": "231864"
},
{
"name": "Shell",
"bytes": "5118"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machines_operations import (
build_capture_request,
build_convert_to_managed_disks_request,
build_create_or_update_request,
build_deallocate_request,
build_delete_request,
build_generalize_request,
build_get_extensions_request,
build_get_request,
build_instance_view_request,
build_list_all_request,
build_list_available_sizes_request,
build_list_by_location_request,
build_list_request,
build_perform_maintenance_request,
build_power_off_request,
build_redeploy_request,
build_restart_request,
build_run_command_request,
build_start_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2017_03_30.aio.ComputeManagementClient`'s
:attr:`virtual_machines` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get_extensions(
self, resource_group_name: str, vm_name: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.VirtualMachineExtensionsListResult:
"""The operation to get all extensions of a Virtual Machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension. Required.
:type vm_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionsListResult or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionsListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineExtensionsListResult]
request = build_get_extensions_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get_extensions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtensionsListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_extensions.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions"} # type: ignore
@distributed_trace
def list_by_location(self, location: str, **kwargs: Any) -> AsyncIterable["_models.VirtualMachine"]:
"""Gets all the virtual machines under the specified subscription for the specified location.
:param location: The location for which virtual machines under the subscription are queried.
Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachine or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_location.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_location.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines"} # type: ignore
async def _capture_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: Union[_models.VirtualMachineCaptureParameters, IO],
**kwargs: Any
) -> Optional[_models.VirtualMachineCaptureResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.VirtualMachineCaptureResult]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "VirtualMachineCaptureParameters")
request = build_capture_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._capture_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachineCaptureResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_capture_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture"} # type: ignore
@overload
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: _models.VirtualMachineCaptureParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineCaptureResult]:
"""Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used
to create similar VMs.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine operation. Required.
:type parameters: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineCaptureParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineCaptureResult]:
"""Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used
to create similar VMs.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: Union[_models.VirtualMachineCaptureParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineCaptureResult]:
"""Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used
to create similar VMs.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineCaptureParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineCaptureResult]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._capture_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineCaptureResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_capture.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture"} # type: ignore
async def _create_or_update_initial(
self, resource_group_name: str, vm_name: str, parameters: Union[_models.VirtualMachine, IO], **kwargs: Any
) -> _models.VirtualMachine:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachine]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "VirtualMachine")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachine", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("VirtualMachine", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
parameters: _models.VirtualMachine,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachine]:
"""The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine operation. Required.
:type parameters: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachine
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachine]:
"""The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self, resource_group_name: str, vm_name: str, parameters: Union[_models.VirtualMachine, IO], **kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachine]:
"""The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachine or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachine]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachine", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"} # type: ignore
async def _delete_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_delete_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"} # type: ignore
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, vm_name: str, expand: str = "instanceView", **kwargs: Any
) -> _models.VirtualMachine:
"""Retrieves information about the model view or the instance view of a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param expand: The expand expression to apply on the operation. Known values are "instanceView"
and None. Default value is "instanceView".
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachine or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachine
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachine]
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachine", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"} # type: ignore
@distributed_trace_async
async def instance_view(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> _models.VirtualMachineInstanceView:
"""Retrieves information about the run-time state of a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineInstanceView or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineInstanceView
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineInstanceView]
request = build_instance_view_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.instance_view.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineInstanceView", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
instance_view.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView"} # type: ignore
async def _convert_to_managed_disks_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_convert_to_managed_disks_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._convert_to_managed_disks_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_convert_to_managed_disks_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks"} # type: ignore
@distributed_trace_async
async def begin_convert_to_managed_disks(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""Converts virtual machine disks from blob-based to managed disks. Virtual machine must be
stop-deallocated before invoking this operation.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._convert_to_managed_disks_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_convert_to_managed_disks.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks"} # type: ignore
async def _deallocate_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_deallocate_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._deallocate_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_deallocate_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate"} # type: ignore
@distributed_trace_async
async def begin_deallocate(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""Shuts down the virtual machine and releases the compute resources. You are not billed for the
compute resources that this virtual machine uses.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._deallocate_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate"} # type: ignore
@distributed_trace_async
async def generalize(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> _models.OperationStatusResponse:
"""Sets the state of the virtual machine to generalized.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationStatusResponse or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
request = build_generalize_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.generalize.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generalize.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize"} # type: ignore
@distributed_trace
def list(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.VirtualMachine"]:
"""Lists all of the virtual machines in the specified resource group. Use the nextLink property in
the response to get the next page of virtual machines.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachine or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines"} # type: ignore
@distributed_trace
def list_all(self, **kwargs: Any) -> AsyncIterable["_models.VirtualMachine"]:
"""Lists all of the virtual machines in the specified subscription. Use the nextLink property in
the response to get the next page of virtual machines.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachine or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_all.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_all.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines"} # type: ignore
@distributed_trace
def list_available_sizes(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSize"]:
"""Lists all available virtual machine sizes to which the specified virtual machine can be
resized.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSize or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineSize]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineSizeListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_available_sizes.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_available_sizes.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes"} # type: ignore
async def _power_off_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_power_off_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._power_off_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_power_off_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff"} # type: ignore
@distributed_trace_async
async def begin_power_off(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""The operation to power off (stop) a virtual machine. The virtual machine can be restarted with
the same provisioned resources. You are still charged for this virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._power_off_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff"} # type: ignore
async def _restart_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_restart_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._restart_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_restart_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart"} # type: ignore
@distributed_trace_async
async def begin_restart(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""The operation to restart a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart"} # type: ignore
async def _start_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_start_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start"} # type: ignore
@distributed_trace_async
async def begin_start(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""The operation to start a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start"} # type: ignore
async def _redeploy_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_redeploy_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._redeploy_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_redeploy_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy"} # type: ignore
@distributed_trace_async
async def begin_redeploy(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""Shuts down the virtual machine, moves it to a new node, and powers it back on.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._redeploy_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_redeploy.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy"} # type: ignore
async def _perform_maintenance_initial(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_perform_maintenance_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._perform_maintenance_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_perform_maintenance_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance"} # type: ignore
@distributed_trace_async
async def begin_perform_maintenance(
self, resource_group_name: str, vm_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""The operation to perform maintenance on a virtual machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._perform_maintenance_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_perform_maintenance.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance"} # type: ignore
async def _run_command_initial(
self, resource_group_name: str, vm_name: str, parameters: Union[_models.RunCommandInput, IO], **kwargs: Any
) -> Optional[_models.RunCommandResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.RunCommandResult]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "RunCommandInput")
request = build_run_command_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._run_command_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("RunCommandResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_run_command_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand"} # type: ignore
@overload
async def begin_run_command(
self,
resource_group_name: str,
vm_name: str,
parameters: _models.RunCommandInput,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.RunCommandResult]:
"""Run command on the VM.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Run command operation. Required.
:type parameters: ~azure.mgmt.compute.v2017_03_30.models.RunCommandInput
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.RunCommandResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_run_command(
self,
resource_group_name: str,
vm_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.RunCommandResult]:
"""Run command on the VM.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Run command operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Known values are: 'application/json', 'text/json'. Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.RunCommandResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_run_command(
self, resource_group_name: str, vm_name: str, parameters: Union[_models.RunCommandInput, IO], **kwargs: Any
) -> AsyncLROPoller[_models.RunCommandResult]:
"""Run command on the VM.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine. Required.
:type vm_name: str
:param parameters: Parameters supplied to the Run command operation. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2017_03_30.models.RunCommandInput or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json',
'text/json'. Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.RunCommandResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.RunCommandResult]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._run_command_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("RunCommandResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_run_command.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand"} # type: ignore
| {
"content_hash": "eed318111bbe19d13f71c695756be248",
"timestamp": "",
"source": "github",
"line_count": 2262,
"max_line_length": 217,
"avg_line_length": 47.98187444739169,
"alnum_prop": 0.6376192011793431,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ee3f45f46a9ad78fa63313b6b20f195fa55422a7",
"size": "109035",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/aio/operations/_virtual_machines_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^add-for/(?P<app_label>[\w\-]+)/(?P<module_name>[\w\-]+)/(?P<pk>\d+)/$', 'attachments.views.add_attachment', name="add_attachment"),
url(r'^delete/(?P<attachment_pk>\d+)/$', 'attachments.views.delete_attachment', name="delete_attachment"),
) | {
"content_hash": "9432c5ab00be888885a81da551c66af3",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 142,
"avg_line_length": 53.833333333333336,
"alnum_prop": 0.6470588235294118,
"repo_name": "vitan/django-attachments",
"id": "d7c94d42f80b223f014baf97e3acdeb4802e8309",
"size": "323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "attachments/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9210"
}
],
"symlink_target": ""
} |
from django.core.cache import cache
from devserver.modules import DevServerModule
class CacheSummaryModule(DevServerModule):
"""
Outputs a summary of cache events once a response is ready.
"""
real_time = False
logger_name = 'cache'
attrs_to_track = ['set', 'get', 'delete', 'add', 'get_many']
def process_init(self, request):
from devserver.utils.stats import track
# save our current attributes
self.old = dict((k, getattr(cache, k)) for k in self.attrs_to_track)
for k in self.attrs_to_track:
setattr(cache, k, track(getattr(cache, k), 'cache', self.logger if self.real_time else None))
def process_complete(self, request):
from devserver.utils.stats import stats
calls = stats.get_total_calls('cache')
hits = stats.get_total_hits('cache')
misses = stats.get_total_misses_for_function('cache', cache.get) + stats.get_total_misses_for_function('cache', cache.get_many)
if calls and (hits or misses):
ratio = int(hits / float(misses + hits) * 100)
else:
ratio = 100
if not self.real_time:
self.logger.info('%(calls)s calls made with a %(ratio)d%% hit percentage (%(misses)s misses)' % dict(
calls=calls,
ratio=ratio,
hits=hits,
misses=misses,
), duration=stats.get_total_time('cache'))
# set our attributes back to their defaults
for k, v in self.old.items():
setattr(cache, k, v)
class CacheRealTimeModule(CacheSummaryModule):
real_time = True
| {
"content_hash": "cfa4ea3a6307f391fbeb9d443eb4f085",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 135,
"avg_line_length": 32.09803921568628,
"alnum_prop": 0.6078191814294441,
"repo_name": "coagulant/django-devserver",
"id": "e8e9b1333c27cfbde196732f4974131880ce4b82",
"size": "1637",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "devserver/modules/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "47850"
}
],
"symlink_target": ""
} |
import Functions
from operator import itemgetter
f = open("PrideAndPrejudice.txt")
Text = f.read()
LetterFrequency = {}
WordFrequency = {}
#The following code uses CreateList and DictionaryUpdate to create a list then
#updating the associated dictionary. The list of letters is then displayed in
#descending order of their usage.
for a in Text:
LetterList = Functions.CreateList(a)
for Letter in LetterList:
Functions.DictionaryUpdate(Letter,LetterFrequency)
#The dictionary is sorted then reverse to display in descending order.
print "This program displays the letter and words used in Pride and Prejudice, sorted by their amount of usage.\n"
print "The Frequency of Letters is given in the following table:\n"
print "{:<8} {:<15}".format('Letter','Frequency')
for key, value in sorted(LetterFrequency.items(), key=itemgetter(1), reverse=True):
print "{:<8} {:<15}".format(key,value)
#This portion of the code deals with counting words and displaying them in descending order.
#For limits, displaying has been restricted to words with 500 occurences in the text.
print "\nThe Frequency of Words is given in the following table (Restricted to usage of 500 and above)\n"
print "{:<8} {:<15}".format('Word','Frequency')
Functions.WordCount(Text,WordFrequency)
for key,value in sorted(WordFrequency.items(), key=itemgetter(1), reverse=True):
if value >= 500:
print "{:<8} {:<15}".format(key,value)
f.close()
raw_input("\nPress Enter to Exit.")
| {
"content_hash": "84cfdb67a04548782588b095e31f4330",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 114,
"avg_line_length": 44.73529411764706,
"alnum_prop": 0.7232084155161078,
"repo_name": "samarthmenon/word-count-prideandprejudice",
"id": "c8de1cbd4c41e9cfe6b861e5655a3b5e159296e8",
"size": "1785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WordAndLetterFrequency.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3373"
}
],
"symlink_target": ""
} |
import re
import urllib
import urlparse
import collections
class HeatIdentifier(collections.Mapping):
FIELDS = (
TENANT, STACK_NAME, STACK_ID, PATH
) = (
'tenant', 'stack_name', 'stack_id', 'path'
)
path_re = re.compile(r'stacks/([^/]+)/([^/]+)(.*)')
def __init__(self, tenant, stack_name, stack_id, path=''):
'''
Initialise a HeatIdentifier from a Tenant ID, Stack name, Stack ID
and optional path. If a path is supplied and it does not begin with
"/", a "/" will be prepended.
'''
if path and not path.startswith('/'):
path = '/' + path
if '/' in stack_name:
raise ValueError('Stack name may not contain "/"')
self.identity = {
self.TENANT: tenant,
self.STACK_NAME: stack_name,
self.STACK_ID: str(stack_id),
self.PATH: path,
}
@classmethod
def from_arn(cls, arn):
'''
Return a new HeatIdentifier generated by parsing the supplied ARN.
'''
fields = arn.split(':')
if len(fields) < 6 or fields[0].lower() != 'arn':
raise ValueError('"%s" is not a valid ARN' % arn)
id_fragment = ':'.join(fields[5:])
path = cls.path_re.match(id_fragment)
if fields[1] != 'openstack' or fields[2] != 'heat' or not path:
raise ValueError('"%s" is not a valid Heat ARN' % arn)
return cls(urllib.unquote(fields[4]),
urllib.unquote(path.group(1)),
urllib.unquote(path.group(2)),
urllib.unquote(path.group(3)))
@classmethod
def from_arn_url(cls, url):
'''
Return a new HeatIdentifier generated by parsing the supplied URL
The URL is expected to contain a valid arn as part of the path
'''
# Sanity check the URL
urlp = urlparse.urlparse(url)
if (urlp.scheme not in ('http', 'https') or
not urlp.netloc or not urlp.path):
raise ValueError('"%s" is not a valid URL' % url)
# Remove any query-string and extract the ARN
arn_url_prefix = '/arn%3Aopenstack%3Aheat%3A%3A'
match = re.search(arn_url_prefix, urlp.path, re.IGNORECASE)
if match is None:
raise ValueError('"%s" is not a valid ARN URL' % url)
# the +1 is to skip the leading /
url_arn = urlp.path[match.start() + 1:]
arn = urllib.unquote(url_arn)
return cls.from_arn(arn)
def arn(self):
'''
Return an ARN of the form:
arn:openstack:heat::<tenant>:stacks/<stack_name>/<stack_id><path>
'''
return 'arn:openstack:heat::%s:%s' % (urllib.quote(self.tenant, ''),
self._tenant_path())
def arn_url_path(self):
'''
Return an ARN quoted correctly for use in a URL
'''
return '/' + urllib.quote(self.arn(), '')
def url_path(self):
'''
Return a URL-encoded path segment of a URL in the form:
<tenant>/stacks/<stack_name>/<stack_id><path>
'''
return '/'.join((urllib.quote(self.tenant, ''), self._tenant_path()))
def _tenant_path(self):
'''
Return a URL-encoded path segment of a URL within a particular tenant,
in the form:
stacks/<stack_name>/<stack_id><path>
'''
return 'stacks/%s/%s%s' % (urllib.quote(self.stack_name, ''),
urllib.quote(self.stack_id, ''),
urllib.quote(self.path))
def _path_components(self):
'''Return a list of the path components.'''
return self.path.lstrip('/').split('/')
def __getattr__(self, attr):
'''
Return one of the components of the identity when accessed as an
attribute.
'''
if attr not in self.FIELDS:
raise AttributeError('Unknown attribute "%s"' % attr)
return self.identity[attr]
def __getitem__(self, key):
'''Return one of the components of the identity.'''
if key not in self.FIELDS:
raise KeyError('Unknown attribute "%s"' % key)
return self.identity[key]
def __len__(self):
'''Return the number of components in an identity.'''
return len(self.FIELDS)
def __contains__(self, key):
return key in self.FIELDS
def __iter__(self):
return iter(self.FIELDS)
def __repr__(self):
return repr(dict(self))
class ResourceIdentifier(HeatIdentifier):
'''An identifier for a resource.'''
RESOURCE_NAME = 'resource_name'
def __init__(self, tenant, stack_name, stack_id, path,
resource_name=None):
'''
Return a new Resource identifier based on the identifier components of
the owning stack and the resource name.
'''
if resource_name is not None:
if '/' in resource_name:
raise ValueError('Resource name may not contain "/"')
path = '/'.join([path.rstrip('/'), 'resources', resource_name])
super(ResourceIdentifier, self).__init__(tenant,
stack_name,
stack_id,
path)
def __getattr__(self, attr):
'''
Return one of the components of the identity when accessed as an
attribute.
'''
if attr == self.RESOURCE_NAME:
return self._path_components()[-1]
return HeatIdentifier.__getattr__(self, attr)
def stack(self):
'''
Return a HeatIdentifier for the owning stack
'''
return HeatIdentifier(self.tenant, self.stack_name, self.stack_id,
'/'.join(self._path_components()[:-2]))
class EventIdentifier(HeatIdentifier):
'''An identifier for an event.'''
(RESOURCE_NAME, EVENT_ID) = (ResourceIdentifier.RESOURCE_NAME, 'event_id')
def __init__(self, tenant, stack_name, stack_id, path,
event_id=None):
'''
Return a new Event identifier based on the identifier components of
the associated resource and the event ID.
'''
if event_id is not None:
path = '/'.join([path.rstrip('/'), 'events', event_id])
super(EventIdentifier, self).__init__(tenant,
stack_name,
stack_id,
path)
def __getattr__(self, attr):
'''
Return one of the components of the identity when accessed as an
attribute.
'''
if attr == self.RESOURCE_NAME:
return getattr(self.resource(), attr)
if attr == self.EVENT_ID:
return self._path_components()[-1]
return HeatIdentifier.__getattr__(self, attr)
def resource(self):
'''
Return a HeatIdentifier for the owning resource
'''
return ResourceIdentifier(self.tenant, self.stack_name, self.stack_id,
'/'.join(self._path_components()[:-2]))
def stack(self):
'''
Return a HeatIdentifier for the owning stack
'''
return self.resource().stack()
| {
"content_hash": "f0934e3e4d1fbee6fc559fd93002ae51",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 78,
"avg_line_length": 33.17857142857143,
"alnum_prop": 0.5291980624327234,
"repo_name": "rickerc/heat_audit",
"id": "9a44aaf305dba8071efc1958c9468eb34caae3bc",
"size": "8051",
"binary": false,
"copies": "4",
"ref": "refs/heads/cis-havana-staging",
"path": "heat/common/identifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2811491"
},
{
"name": "Shell",
"bytes": "21618"
}
],
"symlink_target": ""
} |
import os
import unittest
from external.wip import work_in_progress
from nose.plugins.attrib import attr
from rmgpy import settings
from rmgpy.data.rmg import RMGDatabase, database
from rmgpy.rmg.main import RMG
from rmgpy.rmg.model import Species
from rmgpy.data.thermo import *
from rmgpy.molecule.molecule import Molecule
import rmgpy
################################################################################
def setUpModule():
"""A function that is run ONCE before all unit tests in this module."""
global database
database = RMGDatabase()
database.loadThermo(os.path.join(settings['database.directory'], 'thermo'))
def tearDownModule():
"""A function that is run ONCE after all unit tests in this module."""
from rmgpy.data import rmg
rmg.database = None
class TestThermoDatabaseLoading(unittest.TestCase):
def testFailingLoadsThermoLibraries(self):
database = ThermoDatabase()
libraries = ['primaryThermoLibrary', 'GRI-Mech3.0', 'I am a library not existing in official RMG']
path = os.path.join(settings['database.directory'], 'thermo')
with self.assertRaises(Exception):
database.loadLibraries(os.path.join(path, 'libraries'), libraries)
class TestThermoDatabase(unittest.TestCase):
"""
Contains unit tests of the ThermoDatabase class.
"""
@classmethod
def setUpClass(self):
"""A function that is run ONCE before all unit tests in this class."""
global database
self.database = database.thermo
self.databaseWithoutLibraries = ThermoDatabase()
self.databaseWithoutLibraries.load(os.path.join(settings['database.directory'], 'thermo'),libraries = [])
def testPickle(self):
"""
Test that a ThermoDatabase object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
thermodb0 = cPickle.loads(cPickle.dumps(self.database))
self.assertEqual(thermodb0.libraryOrder, self.database.libraryOrder)
self.assertEqual(sorted(thermodb0.depository.keys()),
sorted(self.database.depository.keys()))
self.assertEqual(sorted(thermodb0.libraries.keys()),
sorted(self.database.libraries.keys()))
self.assertEqual(sorted(thermodb0.groups.keys()),
sorted(self.database.groups.keys()))
for key, depository0 in thermodb0.depository.iteritems():
depository = self.database.depository[key]
self.assertTrue(type(depository0), type(depository))
self.assertEqual(sorted(depository0.entries.keys()), sorted(depository.entries.keys()))
for key, library0 in thermodb0.libraries.iteritems():
library = self.database.libraries[key]
self.assertTrue(type(library0), type(library))
self.assertEqual(sorted(library0.entries.keys()), sorted(library.entries.keys()))
for key, group0 in thermodb0.groups.iteritems():
group = self.database.groups[key]
self.assertTrue(type(group0), type(group))
self.assertEqual(sorted(group0.entries.keys()), sorted(group.entries.keys()))
def testSymmetryAddedByGetThermoData(self):
"""
Test that `getThermoData` properly accounts for symmetry in thermo
by comping with the method `estimateThermoViaGroupAdditivity`
"""
spc = Species(molecule=[Molecule().fromSMILES('C[CH]C=CC')])
thermoWithSym = self.databaseWithoutLibraries.getThermoData(spc)
thermoWithoutSym = self.databaseWithoutLibraries.estimateThermoViaGroupAdditivity(spc.molecule[0])
symmetryNumber = spc.getSymmetryNumber()
self.assertNotEqual(symmetryNumber, spc.molecule[0].getSymmetryNumber(),
'For this test to be robust, species symmetry ({}) and molecule symmetry ({}) must be different'.format(symmetryNumber, spc.molecule[0].getSymmetryNumber()))
symmetryContributionToEntropy = - constants.R * math.log(symmetryNumber)
self.assertAlmostEqual(thermoWithSym.getEntropy(298.),
thermoWithoutSym.getEntropy(298.) + symmetryContributionToEntropy,
'The symmetry contribution is wrong {:.3f} /= {:.3f} + {:.3f}'.format(thermoWithSym.getEntropy(298.), thermoWithoutSym.getEntropy(298.), symmetryContributionToEntropy))
def testSymmetryContributionRadicals(self):
"""
Test that the symmetry contribution is correctly added for radicals
estimated via the HBI method.
This is done by testing thermoData from a database and from group
additivity and ensuring they give the correct value.
"""
spc = Species(molecule=[Molecule().fromSMILES('[CH3]')])
thermoData_lib = self.database.getThermoData(spc)
thermoData_ga = self.databaseWithoutLibraries.getThermoData(spc)
self.assertAlmostEqual(thermoData_lib.getEntropy(298.), thermoData_ga.getEntropy(298.), 0)
def testParseThermoComments(self):
"""
Test that the ThermoDatabase.extractSourceFromComments function works properly
on various thermo comments.
"""
from rmgpy.thermo import NASA, NASAPolynomial
# Pure group additivity thermo.
GAVspecies = Species(index=3, label="c1c(O)c(O)c(CC(C)CC)cc1", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-1.18833,0.11272,-4.26393e-05,-2.12017e-08,1.441e-11,-51642.9,38.8904], Tmin=(100,'K'), Tmax=(1078.35,'K')),
NASAPolynomial(coeffs=[26.6057,0.0538434,-2.22538e-05,4.22393e-09,-3.00808e-13,-60208.4,-109.218], Tmin=(1078.35,'K'), Tmax=(5000,'K'))],
Tmin=(100,'K'), Tmax=(5000,'K'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) +
group(Cs-CbCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cb-Cs) + group(Cb-Os) + group(Cb-Os) + group(Cb-H) +
group(Cb-H) + group(Cb-H) + group(Os-CbH) + group(Os-CbH) + longDistanceInteraction_cyclic(o_OH_OH) +
longDistanceInteraction_cyclic(o_OH_OH) + ring(Benzene)"""), molecule=[Molecule(SMILES="c1c(O)c(O)c(CC(C)CC)cc1")])
source = self.database.extractSourceFromComments(GAVspecies)
self.assertTrue('GAV' in source, 'Should have found that the thermo source is GAV.')
self.assertEqual(len(source['GAV']['group']), 8)
self.assertEqual(len(source['GAV']['longDistanceInteraction_noncyclic']), 1)
self.assertEqual(len(source['GAV']['longDistanceInteraction_cyclic']), 1)
self.assertEqual(len(source['GAV']['ring']), 1)
# Pure library thermo
dipk = Species(index=1, label="DIPK", thermo=
NASA(polynomials=[NASAPolynomial(coeffs=[3.35002,0.017618,-2.46235e-05,1.7684e-08,-4.87962e-12,35555.7,5.75335], Tmin=(100,'K'), Tmax=(888.28,'K')),
NASAPolynomial(coeffs=[6.36001,0.00406378,-1.73509e-06,5.05949e-10,-4.49975e-14,35021,-8.41155], Tmin=(888.28,'K'), Tmax=(5000,'K'))],
Tmin=(100,'K'), Tmax=(5000,'K'), comment="""Thermo library: DIPK"""), molecule=[Molecule(SMILES="CC(C)C(=O)C(C)C")])
source = self.database.extractSourceFromComments(dipk)
self.assertTrue('Library' in source)
# Mixed library and HBI thermo
dipk_rad = Species(index=4, label="R_tert", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.90061,0.0298018,-7.06268e-05,6.9636e-08,-2.42414e-11,54431,5.44492], Tmin=(100,'K'), Tmax=(882.19,'K')),
NASAPolynomial(coeffs=[6.70999,0.000201027,6.65617e-07,-7.99543e-11,4.08721e-15,54238.6,-9.73662], Tmin=(882.19,'K'), Tmax=(5000,'K'))],
Tmin=(100,'K'), Tmax=(5000,'K'), comment="""Thermo library: DIPK + radical(C2CJCHO)"""), molecule=[Molecule(SMILES="C[C](C)C(=O)C(C)C"), Molecule(SMILES="CC(C)=C([O])C(C)C")])
source = self.database.extractSourceFromComments(dipk_rad)
self.assertTrue('Library' in source)
self.assertTrue('GAV' in source)
self.assertEqual(len(source['GAV']['radical']),1)
# Pure QM thermo
cineole = Species(index=6, label="Cineole", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.324129,0.0619667,9.71008e-05,-1.60598e-07,6.28285e-11,-38699.9,29.3686], Tmin=(100,'K'), Tmax=(985.52,'K')),
NASAPolynomial(coeffs=[20.6043,0.0586913,-2.22152e-05,4.19949e-09,-3.06046e-13,-46791,-91.4152], Tmin=(985.52,'K'), Tmax=(5000,'K'))],
Tmin=(100,'K'), Tmax=(5000,'K'), comment="""QM MopacMolPM3 calculation attempt 1"""), molecule=[Molecule(SMILES="CC12CCC(CC1)C(C)(C)O2")])
source = self.database.extractSourceFromComments(cineole)
self.assertTrue('QM' in source)
# Mixed QM and HBI thermo
cineole_rad = Species(index=7, label="CineoleRad", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.2897,0.0627717,8.63299e-05,-1.47868e-07,5.81665e-11,-14017.6,31.0266], Tmin=(100,'K'), Tmax=(988.76,'K')),
NASAPolynomial(coeffs=[20.4836,0.0562555,-2.13903e-05,4.05725e-09,-2.96023e-13,-21915,-88.1205], Tmin=(988.76,'K'), Tmax=(5000,'K'))],
Tmin=(100,'K'), Tmax=(5000,'K'), comment="""QM MopacMolPM3 calculation attempt 1 + radical(Cs_P)"""), molecule=[Molecule(SMILES="[CH2]C12CCC(CC1)C(C)(C)O2")])
source = self.database.extractSourceFromComments(cineole_rad)
self.assertTrue('QM' in source)
self.assertTrue('GAV' in source)
self.assertEqual(len(source['GAV']['radical']),1)
# No thermo comments
other = Species(index=7, label="CineoleRad", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.2897,0.0627717,8.63299e-05,-1.47868e-07,5.81665e-11,-14017.6,31.0266], Tmin=(100,'K'), Tmax=(988.76,'K')),
NASAPolynomial(coeffs=[20.4836,0.0562555,-2.13903e-05,4.05725e-09,-2.96023e-13,-21915,-88.1205], Tmin=(988.76,'K'), Tmax=(5000,'K'))],
Tmin=(100,'K'), Tmax=(5000,'K'), ), molecule=[Molecule(SMILES="[CH2]C12CCC(CC1)C(C)(C)O2")])
# Function should complain if there's no thermo comments
self.assertRaises(self.database.extractSourceFromComments(cineole_rad))
# Check a dummy species that has plus and minus thermo group contributions
polycyclic = Species(index=7, label="dummy", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.2897,0.0627717,8.63299e-05,-1.47868e-07,5.81665e-11,-14017.6,31.0266], Tmin=(100,'K'), Tmax=(988.76,'K')),
NASAPolynomial(coeffs=[20.4836,0.0562555,-2.13903e-05,4.05725e-09,-2.96023e-13,-21915,-88.1205], Tmin=(988.76,'K'), Tmax=(5000,'K'))],
Tmin=(100,'K'), Tmax=(5000,'K'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) - ring(Benzene)"""), molecule=[Molecule(SMILES="[CH2]C12CCC(CC1)C(C)(C)O2")])
source = self.database.extractSourceFromComments(polycyclic)
self.assertTrue('GAV' in source)
self.assertEqual(source['GAV']['ring'][0][1],-1) # the weight of benzene contribution should be -1
self.assertEqual(source['GAV']['group'][0][1],2) # weight of the group(Cs-CsCsHH) conbtribution should be 2
def testSpeciesThermoGenerationHBILibrary(self):
"""Test thermo generation for species objects for HBI correction on library value.
Ensure that molecule list is only reordered, and not changed after matching library value"""
spec = Species().fromSMILES('C[CH]c1ccccc1')
spec.generate_resonance_structures()
initial = list(spec.molecule) # Make a copy of the list
thermo = self.database.getThermoData(spec)
self.assertEqual(len(initial), len(spec.molecule))
self.assertEqual(set(initial), set(spec.molecule))
self.assertTrue('library' in thermo.comment, 'Thermo not found from library, test purpose not fulfilled.')
def testSpeciesThermoGenerationHBIGAV(self):
"""Test thermo generation for species objects for HBI correction on group additivity value.
Ensure that molecule list is only reordered, and not changed after group additivity"""
spec = Species().fromSMILES('C[CH]c1ccccc1')
spec.generate_resonance_structures()
initial = list(spec.molecule) # Make a copy of the list
thermo = self.databaseWithoutLibraries.getThermoData(spec)
self.assertEqual(len(initial), len(spec.molecule))
self.assertEqual(set(initial), set(spec.molecule))
self.assertTrue('group additivity' in thermo.comment, 'Thermo not found from GAV, test purpose not fulfilled.')
def testSpeciesThermoGenerationLibrary(self):
"""Test thermo generation for species objects for library value.
Ensure that the matched molecule is placed at the beginning of the list."""
spec = Species().fromSMILES('c12ccccc1c(C=[CH])ccc2')
arom = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,B} {3,B} {5,B}
2 C u0 p0 c0 {1,B} {4,B} {7,B}
3 C u0 p0 c0 {1,B} {6,B} {11,S}
4 C u0 p0 c0 {2,B} {8,B} {13,S}
5 C u0 p0 c0 {1,B} {9,B} {16,S}
6 C u0 p0 c0 {3,B} {10,B} {17,S}
7 C u0 p0 c0 {2,B} {10,B} {19,S}
8 C u0 p0 c0 {4,B} {9,B} {14,S}
9 C u0 p0 c0 {5,B} {8,B} {15,S}
10 C u0 p0 c0 {6,B} {7,B} {18,S}
11 C u0 p0 c0 {3,S} {12,D} {20,S}
12 C u1 p0 c0 {11,D} {21,S}
13 H u0 p0 c0 {4,S}
14 H u0 p0 c0 {8,S}
15 H u0 p0 c0 {9,S}
16 H u0 p0 c0 {5,S}
17 H u0 p0 c0 {6,S}
18 H u0 p0 c0 {10,S}
19 H u0 p0 c0 {7,S}
20 H u0 p0 c0 {11,S}
21 H u0 p0 c0 {12,S}
""")
spec.generate_resonance_structures()
self.assertTrue(arom.isIsomorphic(spec.molecule[1])) # The aromatic structure should be the second one
initial = list(spec.molecule) # Make a copy of the list
thermo = self.database.getThermoData(spec)
self.assertEqual(len(initial), len(spec.molecule))
self.assertEqual(set(initial), set(spec.molecule))
self.assertTrue(arom.isIsomorphic(spec.molecule[0])) # The aromatic structure should now be the first one
self.assertTrue('library' in thermo.comment, 'Thermo not found from library, test purpose not fulfilled.')
def testThermoEstimationNotAffectDatabase(self):
poly_root = self.database.groups['polycyclic'].entries['PolycyclicRing']
previous_enthalpy = poly_root.data.getEnthalpy(298)/4184.0
smiles = 'C1C2CC1C=CC=C2'
spec = Species().fromSMILES(smiles)
spec.generate_resonance_structures()
thermo_gav = self.database.getThermoDataFromGroups(spec)
_, polycyclicGroups = self.database.getRingGroupsFromComments(thermo_gav)
polycyclicGroupLabels = [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('PolycyclicRing', polycyclicGroupLabels)
latter_enthalpy = poly_root.data.getEnthalpy(298)/4184.0
self.assertAlmostEqual(previous_enthalpy, latter_enthalpy, 2)
class TestThermoAccuracy(unittest.TestCase):
"""
Contains tests for accuracy of thermo estimates and symmetry calculations.
"""
@classmethod
def setUpClass(self):
"""A function that is run ONCE before all unit tests in this class."""
global database
self.database = database.thermo
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tlist = [300, 400, 500, 600, 800, 1000, 1500]
self.testCases = [
# SMILES symm H298 S298 Cp300 Cp400 Cp500 Cp600 Cp800 Cp1000 Cp1500
# 1,3-hexadiene decomposition products
['C=CC=CCC', 3, 13.45, 86.37, 29.49, 37.67, 44.54, 50.12, 58.66, 64.95, 74.71],
['[CH]=CC=CCC', 3, 72.55, 87.76, 29.30, 36.92, 43.18, 48.20, 55.84, 61.46, 70.18],
['C=[C]C=CCC', 3, 61.15, 87.08, 29.68, 36.91, 43.03, 48.11, 55.96, 61.78, 71.54],
['C=C[C]=CCC', 3, 61.15, 87.08, 29.68, 36.91, 43.03, 48.11, 55.96, 61.78, 71.54],
['C=CC=[C]CC', 3, 70.35, 88.18, 29.15, 36.46, 42.6, 47.6, 55.32, 61.04, 69.95],
['C=CC=C[CH]C', 6, 38.24, 84.41, 27.79, 35.46, 41.94, 47.43, 55.74, 61.92, 71.86],
['C=CC=CC[CH2]', 2, 62.45, 89.78, 28.72, 36.31, 42.63, 47.72, 55.50, 61.21, 70.05],
['[CH3]', 6, 34.81, 46.37, 9.14, 10.18, 10.81, 11.34, 12.57, 13.71, 15.2],
['C=CC=C[CH2]', 2, 46.11, 75.82, 22.54, 28.95, 34.24, 38.64, 45.14, 49.97, 57.85],
['[CH2]C', 6, 28.6, 59.87, 11.73, 14.47, 17.05, 19.34, 23.02, 25.91, 31.53],
['C=CC=[CH]', 1, 85.18, 69.37, 18.93, 23.55, 27.16, 29.92, 34.02, 37.03, 41.81],
['C=[CH]', 1, 71.62, 56.61, 10.01, 11.97, 13.66, 15.08, 17.32, 19.05, 21.85],
['[CH]=CCC', 3, 58.99, 75.0, 20.38, 25.34, 29.68, 33.36, 39.14, 43.48, 50.22],
# Cyclic Structures
['C1CCCCC1', 12, -29.45, 69.71, 27.20, 37.60, 46.60, 54.80, 67.50, 76.20, 88.50],
['C1CCC1', 8, 6.51, 63.35, 17.39, 23.91, 29.86, 34.76, 42.40, 47.98, 56.33],
['C1C=CC=C1', 2, 32.5, 65.5, 18.16, 24.71, 30.25, 34.7, 41.25, 45.83, 52.61],
]
@work_in_progress
def testNewThermoGeneration(self):
"""
Test that the new ThermoDatabase generates appropriate thermo data.
"""
for smiles, symm, H298, S298, Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500 in self.testCases:
Cplist = [Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500]
species = Species().fromSMILES(smiles)
species.generate_resonance_structures()
thermoData = self.database.getThermoDataFromGroups(species)
molecule = species.molecule[0]
for mol in species.molecule[1:]:
thermoData0 = self.database.getAllThermoData(Species(molecule=[mol]))[0][0]
for data in self.database.getAllThermoData(Species(molecule=[mol]))[1:]:
if data[0].getEnthalpy(298) < thermoData0.getEnthalpy(298):
thermoData0 = data[0]
if thermoData0.getEnthalpy(298) < thermoData.getEnthalpy(298):
thermoData = thermoData0
molecule = mol
self.assertAlmostEqual(H298, thermoData.getEnthalpy(298) / 4184, places=1,
msg="H298 error for {0}. Expected {1}, but calculated {2}.".format(smiles, H298, thermoData.getEnthalpy(298) / 4184))
self.assertAlmostEqual(S298, thermoData.getEntropy(298) / 4.184, places=1,
msg="S298 error for {0}. Expected {1}, but calculated {2}.".format(smiles, S298, thermoData.getEntropy(298) / 4.184))
for T, Cp in zip(self.Tlist, Cplist):
self.assertAlmostEqual(Cp, thermoData.getHeatCapacity(T) / 4.184, places=1,
msg="Cp{3} error for {0}. Expected {1} but calculated {2}.".format(smiles, Cp, thermoData.getHeatCapacity(T) / 4.184, T))
@work_in_progress
def testSymmetryNumberGeneration(self):
"""
Test we generate symmetry numbers correctly.
This uses the new thermo database to generate the H298, used
to select the stablest resonance isomer.
"""
for smiles, symm, H298, S298, Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500 in self.testCases:
species = Species().fromSMILES(smiles)
species.generate_resonance_structures()
thermoData = self.database.getThermoDataFromGroups(species)
# pick the molecule with lowest H298
molecule = species.molecule[0]
for mol in species.molecule[1:]:
thermoData0 = self.database.getAllThermoData(Species(molecule=[mol]))[0][0]
for data in self.database.getAllThermoData(Species(molecule=[mol]))[1:]:
if data[0].getEnthalpy(298) < thermoData0.getEnthalpy(298):
thermoData0 = data[0]
if thermoData0.getEnthalpy(298) < thermoData.getEnthalpy(298):
thermoData = thermoData0
molecule = mol
self.assertEqual(symm, molecule.calculateSymmetryNumber(),
msg="Symmetry number error for {0}. Expected {1} but calculated {2}.".format(smiles, symm, molecule.calculateSymmetryNumber()))
class TestThermoAccuracyAromatics(TestThermoAccuracy):
"""
Contains tests for accuracy of thermo estimates and symmetry calculations for aromatics only.
A copy of the above class, but with different test compounds.
"""
def setUp(self):
self.Tlist = [300, 400, 500, 600, 800, 1000, 1500]
self.testCases = [
# SMILES symm H298 S298 Cp300 Cp400 Cp500 Cp600 Cp800 Cp1000 Cp1500
['c1ccccc1', 12, 19.80, 64.24, 19.44, 26.64, 32.76, 37.80, 45.24, 50.46, 58.38],
['c1ccc2ccccc2c1', 4, 36.0, 79.49, 31.94, 42.88, 52.08, 59.62, 70.72, 78.68, 90.24],
]
def __init__(self, *args, **kwargs):
super(TestThermoAccuracyAromatics, self).__init__(*args, **kwargs)
self._testMethodDoc = self._testMethodDoc.strip().split('\n')[0] + " for Aromatics.\n"
def testLongDistanceInteractionInAromaticMolecule(self):
"""
Test long distance interaction is properly caculated for aromatic molecule.
"""
spec = Species().fromSMILES('c(O)1c(O)c(C=O)c(C=O)c(O)c(C=O)1')
spec.generate_resonance_structures()
thermo = self.database.getThermoDataFromGroups(spec)
self.assertIn('o_OH_OH', thermo.comment)
self.assertIn('o_OH_CHO', thermo.comment)
self.assertIn('o_CHO_CHO', thermo.comment)
self.assertIn('m_CHO_CHO', thermo.comment)
self.assertIn('p_OH_OH', thermo.comment)
self.assertIn('p_OH_CHO', thermo.comment)
self.assertIn('p_CHO_CHO', thermo.comment)
def testLongDistanceInteractionInAromaticRadical(self):
"""
Test long distance interaction is properly caculated for aromatic radical.
"""
spec = Species().fromSMILES('c([O])1c(C=O)c(C=O)c(OC)cc1')
spec.generate_resonance_structures()
thermo = self.database.getThermoDataFromGroups(spec)
self.assertNotIn('o_OH_CHO', thermo.comment)
self.assertNotIn('p_OH_MeO', thermo.comment)
self.assertIn('o_Oj_CHO', thermo.comment)
self.assertIn('m_Oj_CHO', thermo.comment)
self.assertIn('p_Oj_OCH3', thermo.comment)
self.assertIn('o_CHO_CHO', thermo.comment)
self.assertIn('o_CHO_MeO', thermo.comment)
def testLongDistanceInteractionInAromaticBiradical(self):
"""
Test long distance interaction is properly caculated for aromatic biradical.
"""
spec = Species().fromSMILES('c([O])1c([C]=O)cc(C=O)cc1')
spec.generate_resonance_structures()
thermo = self.database.getThermoDataFromGroups(spec)
thermo = self.database.getThermoDataFromGroups(spec)
self.assertNotIn('o_OH_CHO', thermo.comment)
self.assertNotIn('m_CHO_CHO', thermo.comment)
self.assertNotIn('p_OH_CHO', thermo.comment)
self.assertNotIn('o_Oj_CHO', thermo.comment)
self.assertIn('m_Cj=O_CHO', thermo.comment)
class TestCyclicThermo(unittest.TestCase):
"""
Contains unit tests of the ThermoDatabase class.
"""
@classmethod
def setUpClass(self):
"""A function that is run ONCE before all unit tests in this class."""
global database
self.database = database.thermo
def testComputeGroupAdditivityThermoForTwoRingMolecule(self):
"""
The molecule being tested has two rings, one is 13cyclohexadiene5methylene
the other is benzene ring. This method is to test thermo estimation will
give two different corrections accordingly.
"""
spec = Species().fromSMILES('CCCCCCCCCCCC(CC=C1C=CC=CC1)c1ccccc1')
spec.generate_resonance_structures()
thermo = self.database.getThermoDataFromGroups(spec)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermo)
self.assertEqual(len(ringGroups),2)
self.assertEqual(len(polycyclicGroups),0)
expected_matchedRingsLabels = ['13cyclohexadiene5methylene', 'Benzene']
expected_matchedRings = [self.database.groups['ring'].entries[label] for label in expected_matchedRingsLabels]
self.assertEqual(set(ringGroups), set(expected_matchedRings))
def testThermoForMonocyclicAndPolycyclicSameMolecule(self):
"""
Test a molecule that has both a polycyclic and a monocyclic ring in the same molecule
"""
spec = Species().fromSMILES('C(CCC1C2CCC1CC2)CC1CCC1')
spec.generate_resonance_structures()
thermo = self.database.getThermoDataFromGroups(spec)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermo)
self.assertEqual(len(ringGroups),1)
self.assertEqual(len(polycyclicGroups),1)
expected_matchedRingsLabels = ['Cyclobutane']
expected_matchedRings = [self.database.groups['ring'].entries[label] for label in expected_matchedRingsLabels]
self.assertEqual(set(ringGroups), set(expected_matchedRings))
expected_matchedPolyringsLabels = ['s3_5_5_ane']
expected_matchedPolyrings = [self.database.groups['polycyclic'].entries[label] for label in expected_matchedPolyringsLabels]
self.assertEqual(set(polycyclicGroups), set(expected_matchedPolyrings))
def testGetRingGroupsFromComments(self):
"""
Test that getRingGroupsFromComments method works for fused polycyclics.
"""
from rmgpy.thermo.thermoengine import generateThermoData
smi = 'C12C(C3CCC2C3)C4CCC1C4'#two norbornane rings fused together
spc = Species().fromSMILES(smi)
spc.thermo = generateThermoData(spc)
self.database.getRingGroupsFromComments(spc.thermo)
def testRemoveGroup(self):
"""
Test that removing groups using nodes near the root of radical.py
"""
#load up test data designed for this test
database2 = ThermoDatabase()
path = os.path.join(os.path.dirname(rmgpy.__file__),'data/test_data/')
database2.load(os.path.join(path, 'thermo'), depository = False)
#load up the thermo radical database as a test
radGroup = database2.groups['radical']
#use root as removed groups parent, which should be an LogicOr node
root = radGroup.top[0]
#use group to remove as
groupToRemove = radGroup.entries['RJ']
children = groupToRemove.children
#remove the group
radGroup.removeGroup(groupToRemove)
#afterwards groupToRemove should not be in the database or root's children
self.assertFalse(groupToRemove in radGroup.entries.values())
self.assertFalse(groupToRemove in root.children)
for child in children:
#groupToRemove children should all be in roots item.component and children attribuetes
self.assertTrue(child.label in root.item.components)
self.assertTrue(child in root.children)
#the children should all have root a their parent now
self.assertTrue(child.parent is root)
#Specific to ThermoDatabase, (above test apply to all base class Database)
#we check that unicode entry.data pointers are correctly reassigned
#if groupToRemove is a pointer and another node pointed to it, we copy
#groupToRemove pointer
self.assertTrue(radGroup.entries['OJ'].data is groupToRemove.data)
#Remove an entry with a ThermoData object
groupToRemove2 = radGroup.entries['CsJ']
radGroup.removeGroup(groupToRemove2)
#If groupToRemove was a data object, we point toward parent instead
self.assertTrue(radGroup.entries['RJ2_triplet'].data == groupToRemove2.parent.label)
#If the parent pointed toward groupToRemove, we need should have copied data object
Tlist=[300, 400, 500, 600, 800, 1000, 1500]
self.assertFalse(isinstance(groupToRemove2.parent.data, basestring))
self.assertTrue(groupToRemove2.parent.data.getEnthalpy(298) == groupToRemove2.data.getEnthalpy(298))
self.assertTrue(groupToRemove2.parent.data.getEntropy(298) == groupToRemove2.data.getEntropy(298))
self.assertFalse(False in [groupToRemove2.parent.data.getHeatCapacity(x) == groupToRemove2.data.getHeatCapacity(x) for x in Tlist])
def testIsRingPartialMatched(self):
# create testing molecule
smiles = 'C1CC2CCCC3CCCC(C1)C23'
mol = Molecule().fromSMILES(smiles)
polyring = [atom for atom in mol.atoms if atom.isNonHydrogen()]
# create matched group
matched_group = self.database.groups['polycyclic'].entries['PolycyclicRing'].item
# test
self.assertTrue(isRingPartialMatched(polyring, matched_group))
def testAddRingCorrectionThermoDataFromTreeForExistingTricyclic(self):
# create testing molecule: C1CC2C3CCC(C3)C2C1
# this tricyclic molecule is already in polycyclic database
# so algorithm should give complete match: s2-3_5_5_5_ane
smiles = 'C1CC2C3CCC(C3)C2C1'
mol = Molecule().fromSMILES(smiles)
polyring = mol.getDisparateRings()[1][0]
poly_groups = self.database.groups['polycyclic']
_, matched_entry, _ = self.database._ThermoDatabase__addRingCorrectionThermoDataFromTree(None, poly_groups, mol, polyring)
self.assertEqual(matched_entry.label, 's2-3_5_5_5_ane')
def testAddPolyRingCorrectionThermoDataFromHeuristicUsingPyrene(self):
# create testing molecule: Pyrene with two ring of aromatic version
# the other two ring of kekulized version
#
# creating it seems not natural in RMG, that's because
# RMG cannot parse the adjacencyList of that isomer correctly
# so here we start with pyrene radical and get the two aromatic ring isomer
# then saturate it.
smiles = 'C1C=C2C=CC=C3C=CC4=CC=CC=1C4=C23'
spe = Species().fromSMILES(smiles)
spe.generate_resonance_structures()
mols = []
for mol in spe.molecule:
sssr0 = mol.getSmallestSetOfSmallestRings()
aromaticRingNum = 0
for sr0 in sssr0:
sr0mol = Molecule(atoms=sr0)
if isAromaticRing(sr0mol):
aromaticRingNum += 1
if aromaticRingNum == 2:
mols.append(mol)
ringGroupLabels = []
polycyclicGroupLabels = []
for mol in mols:
polyring = mol.getDisparateRings()[1][0]
thermoData = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([0.0,0.0,0.0,0.0,0.0,0.0,0.0],"J/(mol*K)"),
H298 = (0.0,"kJ/mol"),
S298 = (0.0,"J/(mol*K)"),
)
self.database._ThermoDatabase__addPolyRingCorrectionThermoDataFromHeuristic(
thermoData, polyring)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermoData)
ringGroupLabels += [ringGroup.label for ringGroup in ringGroups]
polycyclicGroupLabels += [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('Benzene', ringGroupLabels)
self.assertIn('Cyclohexene', ringGroupLabels)
self.assertIn('s2_6_6_ben_ene_1', polycyclicGroupLabels)
self.assertIn('s2_6_6_diene_2_7', polycyclicGroupLabels)
def testAddPolyRingCorrectionThermoDataFromHeuristicUsingAromaticTricyclic(self):
# create testing molecule
#
# creating it seems not natural in RMG, that's because
# RMG cannot parse the adjacencyList of that isomer correctly
# so here we start with kekulized version and generate_resonance_structures
# and pick the one with two aromatic rings
smiles = 'C1=CC2C=CC=C3C=CC(=C1)C=23'
spe = Species().fromSMILES(smiles)
spe.generate_resonance_structures()
for mol in spe.molecule:
sssr0 = mol.getSmallestSetOfSmallestRings()
aromaticRingNum = 0
for sr0 in sssr0:
sr0mol = Molecule(atoms=sr0)
if isAromaticRing(sr0mol):
aromaticRingNum += 1
if aromaticRingNum == 2:
break
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
thermoData = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([0.0,0.0,0.0,0.0,0.0,0.0,0.0],"J/(mol*K)"),
H298 = (0.0,"kJ/mol"),
S298 = (0.0,"J/(mol*K)"),
)
self.database._ThermoDatabase__addPolyRingCorrectionThermoDataFromHeuristic(
thermoData, polyring)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermoData)
ringGroupLabels = [ringGroup.label for ringGroup in ringGroups]
polycyclicGroupLabels = [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('Benzene', ringGroupLabels)
self.assertIn('Cyclopentene', ringGroupLabels)
self.assertIn('s2_5_6_indene', polycyclicGroupLabels)
self.assertIn('s2_6_6_naphthalene', polycyclicGroupLabels)
def testAddPolyRingCorrectionThermoDataFromHeuristicUsingAlkaneTricyclic(self):
# create testing molecule
smiles = 'C1CC2CCCC3C(C1)C23'
mol = Molecule().fromSMILES(smiles)
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
thermoData = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([0.0,0.0,0.0,0.0,0.0,0.0,0.0],"J/(mol*K)"),
H298 = (0.0,"kJ/mol"),
S298 = (0.0,"J/(mol*K)"),
)
self.database._ThermoDatabase__addPolyRingCorrectionThermoDataFromHeuristic(
thermoData, polyring)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermoData)
ringGroupLabels = [ringGroup.label for ringGroup in ringGroups]
polycyclicGroupLabels = [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('Cyclohexane', ringGroupLabels)
self.assertIn('Cyclopropane', ringGroupLabels)
self.assertIn('s2_6_6_ane', polycyclicGroupLabels)
self.assertIn('s2_3_6_ane', polycyclicGroupLabels)
def testAddPolyRingCorrectionThermoDataFromHeuristicUsingHighlyUnsaturatedPolycyclics1(self):
"""
Test proper thermo estimation for highly unsaturated polycyclic whose decomposed
bicyclics are not stored in database. Those bicyclics thermo will be estimated through
a heuristic formula.
In the future, the test assertion may be updated if some of the decomposed bicyclics
have been added to database.
"""
# create testing molecule
smiles = '[CH]=C1C2=C=C3C=CC1C=C32'
mol = Molecule().fromSMILES(smiles)
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
thermoData = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([0.0,0.0,0.0,0.0,0.0,0.0,0.0],"J/(mol*K)"),
H298 = (0.0,"kJ/mol"),
S298 = (0.0,"J/(mol*K)"),
)
self.database._ThermoDatabase__addPolyRingCorrectionThermoDataFromHeuristic(
thermoData, polyring)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermoData)
ringGroupLabels = [ringGroup.label for ringGroup in ringGroups]
polycyclicGroupLabels = [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('1,4-Cyclohexadiene', ringGroupLabels)
self.assertIn('Cyclopentene', ringGroupLabels)
self.assertIn('cyclobutadiene_13', ringGroupLabels)
self.assertIn('s3_5_6_ane', polycyclicGroupLabels)
self.assertIn('s2_4_6_ane', polycyclicGroupLabels)
self.assertIn('s2_4_5_ane', polycyclicGroupLabels)
def testAddPolyRingCorrectionThermoDataFromHeuristicUsingHighlyUnsaturatedPolycyclics2(self):
"""
Test proper thermo estimation for highly unsaturated polycyclic whose decomposed
bicyclics are not stored in database. Those bicyclics thermo will be estimated through
a heuristic formula.
In the future, the test assertion may be updated if some of the decomposed bicyclics
have been added to database.
"""
# create testing molecule
smiles = 'C1=C2C#CC3C=CC1C=C23'
mol = Molecule().fromSMILES(smiles)
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
thermoData = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],"K"),
Cpdata = ([0.0,0.0,0.0,0.0,0.0,0.0,0.0],"J/(mol*K)"),
H298 = (0.0,"kJ/mol"),
S298 = (0.0,"J/(mol*K)"),
)
self.database._ThermoDatabase__addPolyRingCorrectionThermoDataFromHeuristic(
thermoData, polyring)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermoData)
ringGroupLabels = [ringGroup.label for ringGroup in ringGroups]
polycyclicGroupLabels = [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('1,4-Cyclohexadiene', ringGroupLabels)
self.assertIn('Cyclopentyne', ringGroupLabels)
self.assertIn('Cyclopentadiene', ringGroupLabels)
self.assertIn('s3_5_6_ane', polycyclicGroupLabels)
self.assertIn('s2_5_6_ane', polycyclicGroupLabels)
self.assertIn('s2_5_5_ane', polycyclicGroupLabels)
def testGetBicyclicCorrectionThermoDataFromHeuristic1(self):
"""
Test bicyclic correction estimated properly from heuristic formula
The test molecule "C1=CCC2C1=C2" has a shared atom with Cd atomtype,
but in the correction estimation we stil expect the five-member ring
part to match Cyclopentene
"""
smiles = 'C1=CCC2C1=C2'
mol = Molecule().fromSMILES(smiles)
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
thermoData = self.database.getBicyclicCorrectionThermoDataFromHeuristic(polyring)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermoData)
ringGroupLabels = [ringGroup.label for ringGroup in ringGroups]
polycyclicGroupLabels = [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('Cyclopentane', ringGroupLabels)
self.assertIn('Cyclopropane', ringGroupLabels)
self.assertIn('Cyclopentene', ringGroupLabels)
self.assertIn('Cyclopropene', ringGroupLabels)
self.assertIn('s2_3_5_ane', polycyclicGroupLabels)
def testGetBicyclicCorrectionThermoDataFromHeuristic2(self):
"""
Test bicyclic correction estimated properly from heuristic formula
The test molecule "C1=CCC2=C1C2" doesn't have controversial shared
atomtypes in correction estimation, which is regarded as a simple case.
"""
smiles = 'C1=CCC2=C1C2'
mol = Molecule().fromSMILES(smiles)
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
thermoData = self.database.getBicyclicCorrectionThermoDataFromHeuristic(polyring)
ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermoData)
ringGroupLabels = [ringGroup.label for ringGroup in ringGroups]
polycyclicGroupLabels = [polycyclicGroup.label for polycyclicGroup in polycyclicGroups]
self.assertIn('Cyclopentane', ringGroupLabels)
self.assertIn('Cyclopropane', ringGroupLabels)
self.assertIn('Cyclopentadiene', ringGroupLabels)
self.assertIn('Cyclopropene', ringGroupLabels)
self.assertIn('s2_3_5_ane', polycyclicGroupLabels)
class TestMolecularManipulationInvolvedInThermoEstimation(unittest.TestCase):
"""
Contains unit tests for methods of molecular manipulations for thermo estimation
"""
def testConvertRingToSubMolecule(self):
# list out testing moleculess
smiles1 = 'C1CCCCC1'
smiles2 = 'C1CCC2CCCCC2C1'
smiles3 = 'C1CC2CCCC3CCCC(C1)C23'
mol1 = Molecule().fromSMILES(smiles1)
mol2 = Molecule().fromSMILES(smiles2)
mol3 = Molecule().fromSMILES(smiles3)
# get ring structure by only extracting non-hydrogens
ring1 = [atom for atom in mol1.atoms if atom.isNonHydrogen()]
ring2 = [atom for atom in mol2.atoms if atom.isNonHydrogen()]
ring3 = [atom for atom in mol3.atoms if atom.isNonHydrogen()]
# convert to submolecules
submol1, _ = convertRingToSubMolecule(ring1)
submol2, _ = convertRingToSubMolecule(ring2)
submol3, _ = convertRingToSubMolecule(ring3)
# test against expected submolecules
self.assertEqual(len(submol1.atoms), 6)
self.assertEqual(len(submol2.atoms), 10)
self.assertEqual(len(submol3.atoms), 13)
bonds1 = []
for atom in submol1.atoms:
for bondAtom, bond in atom.edges.iteritems():
if bond not in bonds1:
bonds1.append(bond)
bonds2 = []
for atom in submol2.atoms:
for bondAtom, bond in atom.edges.iteritems():
if bond not in bonds2:
bonds2.append(bond)
bonds3 = []
for atom in submol3.atoms:
for bondAtom, bond in atom.edges.iteritems():
if bond not in bonds3:
bonds3.append(bond)
self.assertEqual(len(bonds1), 6)
self.assertEqual(len(bonds2), 11)
self.assertEqual(len(bonds3), 15)
def testGetCopyForOneRing(self):
"""
This method tests the getCopyForOneRing method, which returns
an atom object list that contains deep copies of the atoms
"""
testAtomList=Molecule(SMILES='C1CCCCC1').atoms
copiedAtomList=getCopyForOneRing(testAtomList)
testMolecule=Molecule(atoms=testAtomList)
copiedMolecule=Molecule(atoms=copiedAtomList)
self.assertTrue(testAtomList!=copiedAtomList)
self.assertTrue(len(testAtomList)==len(copiedAtomList))
self.assertTrue(testMolecule.is_equal(copiedMolecule))
def testToFailCombineTwoRingsIntoSubMolecule(self):
"""
Test that if two non-overlapped rings lead to AssertionError
"""
smiles1 = 'C1CCCCC1'
smiles2 = 'C1CCCCC1'
mol1 = Molecule().fromSMILES(smiles1)
mol2 = Molecule().fromSMILES(smiles2)
ring1 = [atom for atom in mol1.atoms if atom.isNonHydrogen()]
ring2 = [atom for atom in mol2.atoms if atom.isNonHydrogen()]
with self.assertRaises(AssertionError):
combined = combineTwoRingsIntoSubMolecule(ring1, ring2)
def testCombineTwoRingsIntoSubMolecule(self):
# create testing molecule
smiles1 = 'C1CCC2CCCCC2C1'
mol1 = Molecule().fromSMILES(smiles1)
# get two SSSRs
SSSR = mol1.getSmallestSetOfSmallestRings()
ring1 = SSSR[0]
ring2 = SSSR[1]
# combine two rings into submolecule
submol, _ = combineTwoRingsIntoSubMolecule(ring1, ring2)
self.assertEqual(len(submol.atoms), 10)
bonds = []
for atom in submol.atoms:
for bondAtom, bond in atom.edges.iteritems():
if bond not in bonds:
bonds.append(bond)
self.assertEqual(len(bonds), 11)
def testIsAromaticRing(self):
# create testing rings
smiles1 = 'C1CCC1'
smiles2 = 'C1CCCCC1'
adj3 = """1 C u0 p0 c0 {2,B} {6,B} {7,S}
2 C u0 p0 c0 {1,B} {3,B} {8,S}
3 C u0 p0 c0 {2,B} {4,B} {9,S}
4 C u0 p0 c0 {3,B} {5,B} {10,S}
5 C u0 p0 c0 {4,B} {6,B} {11,S}
6 C u0 p0 c0 {1,B} {5,B} {12,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
"""
mol1 = Molecule().fromSMILES(smiles1)
mol2 = Molecule().fromSMILES(smiles2)
mol3 = Molecule().fromAdjacencyList(adj3)
ring1mol = Molecule(atoms=[atom for atom in mol1.atoms if atom.isNonHydrogen()])
ring2mol = Molecule(atoms=[atom for atom in mol2.atoms if atom.isNonHydrogen()])
ring3mol = Molecule(atoms=[atom for atom in mol3.atoms if atom.isNonHydrogen()])
# check with expected results
self.assertEqual(isAromaticRing(ring1mol), False)
self.assertEqual(isAromaticRing(ring2mol), False)
self.assertEqual(isAromaticRing(ring3mol), True)
def testIsBicyclic1(self):
"""
Test isBicyclic identifies bicyclic correctly
The test molecule is bicyclic, we expect isBicyclic()
returns True.
"""
smiles = 'C1=CCC2C1=C2'
mol = Molecule().fromSMILES(smiles)
polyring = mol.getDisparateRings()[1][0]
self.assertTrue(isBicyclic(polyring))
def testIsBicyclic2(self):
"""
Test isBicyclic identifies bicyclic correctly
The test molecule is tetracyclic, we expect
isBicyclic() returns False
"""
smiles = 'C1C=C2C=CC=C3C=CC4=CC=CC=1C4=C23'
mol = Molecule().fromSMILES(smiles)
polyring = mol.getDisparateRings()[1][0]
self.assertFalse(isBicyclic(polyring))
def testFindAromaticBondsFromSubMolecule(self):
smiles = "C1=CC=C2C=CC=CC2=C1"
spe = Species().fromSMILES(smiles)
spe.generate_resonance_structures()
mol = spe.molecule[1]
# get two SSSRs
SSSR = mol.getSmallestSetOfSmallestRings()
ring1 = SSSR[0]
ring2 = SSSR[1]
# create two testing submols
submol1 = Molecule(atoms=ring1)
submol2 = Molecule(atoms=ring2)
# check with expected results
self.assertEqual(len(findAromaticBondsFromSubMolecule(submol1)), 6)
self.assertEqual(len(findAromaticBondsFromSubMolecule(submol2)), 6)
def testBicyclicDecompositionForPolyringUsingPyrene(self):
# create testing molecule: Pyrene with two ring of aromatic version
# the other two ring of kekulized version
#
# creating it seems not natural in RMG, that's because
# RMG cannot parse the adjacencyList of that isomer correctly
# so here we start with pyrene radical and get the two aromatic ring isomer
# then saturate it.
smiles = 'C1C=C2C=CC=C3C=CC4=CC=CC=1C4=C23'
spe = Species().fromSMILES(smiles)
spe.generate_resonance_structures()
for mol in spe.molecule:
sssr0 = mol.getSmallestSetOfSmallestRings()
aromaticRingNum = 0
for sr0 in sssr0:
sr0mol = Molecule(atoms=sr0)
if isAromaticRing(sr0mol):
aromaticRingNum += 1
if aromaticRingNum == 2:
break
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
bicyclicList, ringOccurancesDict = bicyclicDecompositionForPolyring(polyring)
# 1st test: number of cores
self.assertEqual(len(bicyclicList), 5)
# 2nd test: ringOccurancesDict
ringInCoreOccurances = sorted(ringOccurancesDict.values())
expectedRingInCoreOccurances = [2, 2, 3, 3]
self.assertEqual(ringInCoreOccurances, expectedRingInCoreOccurances)
# 3rd test: size of each bicyclic core
bicyclicSizes = sorted([len(bicyclic.atoms) for bicyclic in bicyclicList])
expectedBicyclicSizes = [10, 10, 10, 10, 10]
self.assertEqual(bicyclicSizes, expectedBicyclicSizes)
# 4th test: bond info for members of each core
aromaticBondNumInBicyclics = []
for bicyclic in bicyclicList:
aromaticBondNum = len(findAromaticBondsFromSubMolecule(bicyclic))
aromaticBondNumInBicyclics.append(aromaticBondNum)
aromaticBondNumInBicyclics = sorted(aromaticBondNumInBicyclics)
expectedAromaticBondNumInBicyclics = [0, 6, 6, 6, 6]
self.assertEqual(aromaticBondNumInBicyclics, expectedAromaticBondNumInBicyclics)
def testBicyclicDecompositionForPolyringUsingAromaticTricyclic(self):
# create testing molecule
#
# creating it seems not natural in RMG, that's because
# RMG cannot parse the adjacencyList of that isomer correctly
# so here we start with kekulized version and generate_resonance_structures
# and pick the one with two aromatic rings
smiles = 'C1=CC2C=CC=C3C=CC(=C1)C=23'
spe = Species().fromSMILES(smiles)
spe.generate_resonance_structures()
for mol in spe.molecule:
sssr0 = mol.getSmallestSetOfSmallestRings()
aromaticRingNum = 0
for sr0 in sssr0:
sr0mol = Molecule(atoms=sr0)
if isAromaticRing(sr0mol):
aromaticRingNum += 1
if aromaticRingNum == 2:
break
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
bicyclicList, ringOccurancesDict = bicyclicDecompositionForPolyring(polyring)
# 1st test: number of cores
self.assertEqual(len(bicyclicList), 3)
# 2nd test: ringOccurancesDict
ringInCoreOccurances = sorted(ringOccurancesDict.values())
expectedRingInCoreOccurances = [2, 2, 2]
self.assertEqual(ringInCoreOccurances, expectedRingInCoreOccurances)
# 3rd test: size of each bicyclic core
bicyclicSizes = sorted([len(bicyclic.atoms) for bicyclic in bicyclicList])
expectedBicyclicSizes = [9, 9, 10]
self.assertEqual(bicyclicSizes, expectedBicyclicSizes)
# 4th test: bond info for members of each core
aromaticBondNumInBicyclics = []
for bicyclic in bicyclicList:
aromaticBondNum = len(findAromaticBondsFromSubMolecule(bicyclic))
aromaticBondNumInBicyclics.append(aromaticBondNum)
aromaticBondNumInBicyclics = sorted(aromaticBondNumInBicyclics)
expectedAromaticBondNumInBicyclics = [6, 6, 11]
self.assertEqual(aromaticBondNumInBicyclics, expectedAromaticBondNumInBicyclics)
def testBicyclicDecompositionForPolyringUsingAlkaneTricyclic(self):
# create testing molecule
smiles = 'C1CC2CCCC3C(C1)C23'
mol = Molecule().fromSMILES(smiles)
# extract polyring from the molecule
polyring = mol.getDisparateRings()[1][0]
bicyclicList, ringOccurancesDict = bicyclicDecompositionForPolyring(polyring)
# 1st test: number of cores
self.assertEqual(len(bicyclicList), 3)
# 2nd test: ringOccurancesDict
ringInCoreOccurances = sorted(ringOccurancesDict.values())
expectedRingInCoreOccurances = [2, 2, 2]
self.assertEqual(ringInCoreOccurances, expectedRingInCoreOccurances)
# 3rd test: size of each bicyclic core
bicyclicSizes = sorted([len(bicyclic.atoms) for bicyclic in bicyclicList])
expectedBicyclicSizes = [7, 7, 10]
self.assertEqual(bicyclicSizes, expectedBicyclicSizes)
# 4th test: bond info for members of each core
aromaticBondNumInBicyclics = []
for bicyclic in bicyclicList:
aromaticBondNum = len(findAromaticBondsFromSubMolecule(bicyclic))
aromaticBondNumInBicyclics.append(aromaticBondNum)
aromaticBondNumInBicyclics = sorted(aromaticBondNumInBicyclics)
expectedAromaticBondNumInBicyclics = [0, 0, 0]
self.assertEqual(aromaticBondNumInBicyclics, expectedAromaticBondNumInBicyclics)
def testCombineCycles(self):
"""
This method tests the combineCycles method, which simply joins two lists
together without duplication.
"""
mainCycle=Molecule(SMILES='C1CCC2CCCCC2C1').atoms
testCycle1=mainCycle[0:8]
testCycle2=mainCycle[6:]
joinedCycle=combineCycles(testCycle1,testCycle2)
self.assertTrue(sorted(mainCycle)==sorted(joinedCycle))
def testSplitBicyclicIntoSingleRings1(self):
"""
Test bicyclic molecule "C1=CCC2C1=C2" can be divided into
individual rings properly
"""
smiles = 'C1=CCC2C1=C2'
mol = Molecule().fromSMILES(smiles)
bicyclic = mol.getDisparateRings()[1][0]
bicyclic_submol = convertRingToSubMolecule(bicyclic)[0]
single_ring_submols = splitBicyclicIntoSingleRings(bicyclic_submol)
self.assertEqual(len(single_ring_submols), 2)
single_ring_submol_a, single_ring_submol_b = sorted(single_ring_submols,
key=lambda submol: len(submol.atoms))
single_ring_submol_a.updateAtomTypes()
single_ring_submol_b.updateAtomTypes()
expected_submol_a = Molecule().fromSMILES('C1=CC1')
expected_submol_a.deleteHydrogens()
expected_submol_a.updateConnectivityValues()
expected_submol_b = Molecule().fromSMILES('C1=CCCC1')
expected_submol_b.deleteHydrogens()
expected_submol_b.updateConnectivityValues()
self.assertTrue(single_ring_submol_a.isIsomorphic(expected_submol_a))
self.assertTrue(single_ring_submol_b.isIsomorphic(expected_submol_b))
def testSplitBicyclicIntoSingleRings2(self):
"""
Test bicyclic molecule "C1=CCC2=C1C2" can be divided into
individual rings properly
"""
smiles = 'C1=CCC2=C1C2'
mol = Molecule().fromSMILES(smiles)
bicyclic = mol.getDisparateRings()[1][0]
bicyclic_submol = convertRingToSubMolecule(bicyclic)[0]
single_ring_submols = splitBicyclicIntoSingleRings(bicyclic_submol)
self.assertEqual(len(single_ring_submols), 2)
single_ring_submol_a, single_ring_submol_b = sorted(single_ring_submols,
key=lambda submol: len(submol.atoms))
single_ring_submol_a.updateAtomTypes()
single_ring_submol_b.updateAtomTypes()
expected_submol_a = Molecule().fromSMILES('C1=CC1')
# remove hydrogen
expected_submol_a.deleteHydrogens()
expected_submol_a.updateConnectivityValues()
expected_submol_b = Molecule().fromSMILES('C1=CC=CC1')
# remove hydrogen
expected_submol_b.deleteHydrogens()
expected_submol_b.updateConnectivityValues()
self.assertTrue(single_ring_submol_a.isIsomorphic(expected_submol_a))
self.assertTrue(single_ring_submol_b.isIsomorphic(expected_submol_b))
def testSaturateRingBonds1(self):
"""
Test unsaturated bonds of "C1=CCC2=C1C2" to be saturated properly
"""
smiles = 'C1=CCC2=C1C2'
mol = Molecule().fromSMILES(smiles)
ring_submol = convertRingToSubMolecule(mol.getDisparateRings()[1][0])[0]
saturated_ring_submol, alreadySaturated = saturateRingBonds(ring_submol)
expected_saturated_ring_submol = Molecule().fromSMILES('C1CCC2C1C2')
# remove hydrogen
expected_saturated_ring_submol.deleteHydrogens()
expected_saturated_ring_submol.updateConnectivityValues()
self.assertFalse(alreadySaturated)
self.assertEqual(saturated_ring_submol.multiplicity,
expected_saturated_ring_submol.multiplicity)
self.assertTrue(saturated_ring_submol.isIsomorphic(expected_saturated_ring_submol))
def testSaturateRingBonds2(self):
"""
Test unsaturated bonds of "C1=CC=C2CCCCC2=C1" to be saturated properly
"""
smiles = 'C1=CC=C2CCCCC2=C1'
spe = Species().fromSMILES(smiles)
spe.generate_resonance_structures()
mol = spe.molecule[1]
ring_submol = convertRingToSubMolecule(mol.getDisparateRings()[1][0])[0]
saturated_ring_submol, alreadySaturated = saturateRingBonds(ring_submol)
expected_spe = Species().fromSMILES('C1=CC=C2CCCCC2=C1')
expected_spe.generate_resonance_structures()
expected_saturated_ring_submol = expected_spe.molecule[1]
# remove hydrogen
expected_saturated_ring_submol.deleteHydrogens()
expected_saturated_ring_submol.updateConnectivityValues()
self.assertTrue(alreadySaturated)
self.assertEqual(saturated_ring_submol.multiplicity,
expected_saturated_ring_submol.multiplicity)
self.assertTrue(saturated_ring_submol.isIsomorphic(expected_saturated_ring_submol))
def testSaturateRingBonds3(self):
"""
Test unsaturated bonds of "C1=CC=C2CC=CCC2=C1" to be saturated properly
"""
smiles = 'C1=CC=C2CC=CCC2=C1'
spe = Species().fromSMILES(smiles)
spe.generate_resonance_structures()
mol = spe.molecule[1]
ring_submol = convertRingToSubMolecule(mol.getDisparateRings()[1][0])[0]
saturated_ring_submol, alreadySaturated = saturateRingBonds(ring_submol)
expected_spe = Species().fromSMILES('C1=CC=C2CCCCC2=C1')
expected_spe.generate_resonance_structures()
expected_saturated_ring_submol = expected_spe.molecule[1]
# remove hydrogen
expected_saturated_ring_submol.deleteHydrogens()
expected_saturated_ring_submol.updateConnectivityValues()
self.assertFalse(alreadySaturated)
self.assertEqual(saturated_ring_submol.multiplicity,
expected_saturated_ring_submol.multiplicity)
self.assertTrue(saturated_ring_submol.isIsomorphic(expected_saturated_ring_submol))
@attr('auth')
class TestThermoCentralDatabaseInterface(unittest.TestCase):
"""
Contains unit tests for methods of ThermoCentralDatabaseInterface
"""
@classmethod
def setUpClass(self):
"""A function that is run ONCE before all unit tests in this class."""
global database
self.database = database.thermo
def connectToTestCentralDatabase(self):
host, port, username, password = getTestingTCDAuthenticationInfo()
application = 'test'
tcdi = ThermoCentralDatabaseInterface(host, port, username, password, application)
return tcdi
def testConnectFailure(self):
host = 'somehost'
port = 27017
username = 'me'
password = 'pswd'
application = 'test'
tcdi = ThermoCentralDatabaseInterface(host, port, username, password, application)
self.assertTrue(tcdi.client is None)
def testConnectSuccess(self):
tcdi = self.connectToTestCentralDatabase()
self.assertTrue(tcdi.client is not None)
def testSatisfyRegistrationRequirements1(self):
"""
the species is non-cyclic, currently regarded no need to
register in thermo central database
"""
tcdi = self.connectToTestCentralDatabase()
species = Species().fromSMILES('C[CH2]')
thermoData = self.database.getThermoDataFromGroups(species)
self.assertFalse(tcdi.satisfyRegistrationRequirements(species, thermoData, self.database))
def testSatisfyRegistrationRequirements2(self):
"""
the species is for non-cyclic, so no need to register in
thermo central database
"""
tcdi = self.connectToTestCentralDatabase()
species = Species().fromSMILES('CC')
thermoData = self.database.getThermoDataFromGroups(species)
self.assertFalse(tcdi.satisfyRegistrationRequirements(species, thermoData, self.database))
def testSatisfyRegistrationRequirements3(self):
"""
the thermo is exact match, so no need to register in
thermo central database
"""
tcdi = self.connectToTestCentralDatabase()
species = Species().fromSMILES('C1CC1')
thermoData = self.database.getThermoDataFromGroups(species)
self.assertFalse(tcdi.satisfyRegistrationRequirements(species, thermoData, self.database))
def testSatisfyRegistrationRequirements4(self):
"""
the thermo is from library, so no need to register in
thermo central database
"""
tcdi = self.connectToTestCentralDatabase()
species = Species().fromSMILES('[H][H]')
thermoData = self.database.getThermoDataFromLibraries(species)
self.assertFalse(tcdi.satisfyRegistrationRequirements(species, thermoData, self.database))
def testSatisfyRegistrationRequirements5(self):
"""
the thermo is matching generic node, so it needs to register in
thermo central database
In the future, if RMG-database includes corresponding exact match
this test should be modified.
"""
tcdi = self.connectToTestCentralDatabase()
species = Species().fromSMILES('C1C=CC2C=CC2=C1')
thermoData = self.database.getThermoDataFromGroups(species)
self.assertTrue(tcdi.satisfyRegistrationRequirements(species, thermoData, self.database))
def testSatisfyRegistrationRequirements6(self):
"""
the thermo is matching generic node, so it needs to register in
thermo central database
In the future, if RMG-database includes corresponding exact match
this test should be modified.
"""
tcdi = self.connectToTestCentralDatabase()
species = Species().fromSMILES('C1=C=C2CC23C=CC=1C=C3')
thermoData = self.database.getThermoDataFromGroups(species)
self.assertTrue(tcdi.satisfyRegistrationRequirements(species, thermoData, self.database))
def testRegisterInCentralThermoDB1(self):
"""
Test situation where both registration_table and results_table have no
species as the one going to be registered
"""
# connect to thermo central database
host, port, username, password = getTestingTCDAuthenticationInfo()
application = 'test'
tcdi = ThermoCentralDatabaseInterface(host, port, username, password, application)
# prepare species to register
species = Species().fromSMILES('C1=C=C2CC23C=CC=1C=C3')
expected_aug_inchi = "InChI=1S/C10H6/c1-2-9-7-10(9)5-3-8(1)4-6-10/h3-6H,7H2"
# select registration table
# and clean previous data
db = getattr(tcdi.client, 'thermoCentralDB')
registration_table = getattr(db, 'registration_table')
results_table = getattr(db, 'results_table')
registration_table.delete_many({"aug_inchi": expected_aug_inchi})
results_table.delete_many({"aug_inchi": expected_aug_inchi})
tcdi.registerInCentralThermoDB(species)
registered_species_entries = list(registration_table.find({"aug_inchi": expected_aug_inchi}))
# should expect only one registered such species
self.assertEqual(len(registered_species_entries), 1)
registered_species_entry = registered_species_entries[0]
# check all the columns are expected
registered_species = Species().fromSMILES(str(registered_species_entry['SMILES_input']))
self.assertEqual(registered_species_entry['aug_inchi'], expected_aug_inchi)
self.assertTrue(registered_species.isIsomorphic(species))
self.assertIn(registered_species_entry['status'], ['pending', 'submitted'])
# clean up the table
registration_table.delete_many({"aug_inchi": expected_aug_inchi})
def testRegisterInCentralThermoDB2(self):
"""
Test situation where registration_table has species as the one going
to be registered
"""
# connect to thermo central database
host, port, username, password = getTestingTCDAuthenticationInfo()
application = 'test'
tcdi = ThermoCentralDatabaseInterface(host, port, username, password, application)
# prepare species to register
species = Species().fromSMILES('C1=C=C2CC23C=CC=1C=C3')
expected_aug_inchi = "InChI=1S/C10H6/c1-2-9-7-10(9)5-3-8(1)4-6-10/h3-6H,7H2"
# select registration table
# and clean previous data
db = getattr(tcdi.client, 'thermoCentralDB')
registration_table = getattr(db, 'registration_table')
results_table = getattr(db, 'results_table')
registration_table.delete_many({"aug_inchi": expected_aug_inchi})
registration_table.insert_one({"aug_inchi": expected_aug_inchi})
results_table.delete_many({"aug_inchi": expected_aug_inchi})
tcdi.registerInCentralThermoDB(species)
registered_species_entries = list(registration_table.find({"aug_inchi": expected_aug_inchi}))
# should expect only one registered such species
self.assertEqual(len(registered_species_entries), 1)
registered_species_entry = registered_species_entries[0]
# check all the columns are expected
self.assertEqual(registered_species_entry['aug_inchi'], expected_aug_inchi)
self.assertTrue(len(registered_species_entry), 2)
# clean up the table
registration_table.delete_many({"aug_inchi": expected_aug_inchi})
def testRegisterInCentralThermoDB3(self):
"""
Test situation where results_table has species as the one going
to be registered
"""
# connect to thermo central database
host, port, username, password = getTestingTCDAuthenticationInfo()
application = 'test'
tcdi = ThermoCentralDatabaseInterface(host, port, username, password, application)
# prepare species to register
species = Species().fromSMILES('C1=C=C2CC23C=CC=1C=C3')
expected_aug_inchi = "InChI=1S/C10H6/c1-2-9-7-10(9)5-3-8(1)4-6-10/h3-6H,7H2"
# select registration table
# and clean previous data
db = getattr(tcdi.client, 'thermoCentralDB')
registration_table = getattr(db, 'registration_table')
results_table = getattr(db, 'results_table')
registration_table.delete_many({"aug_inchi": expected_aug_inchi})
results_table.delete_many({"aug_inchi": expected_aug_inchi})
results_table.insert_one({"aug_inchi": expected_aug_inchi})
tcdi.registerInCentralThermoDB(species)
registered_species_entries = list(registration_table.find({"aug_inchi": expected_aug_inchi}))
# should expect only one registered such species
self.assertEqual(len(registered_species_entries), 0)
# clean up the table
results_table.delete_many({"aug_inchi": expected_aug_inchi})
def getTestingTCDAuthenticationInfo():
try:
host = os.environ['TCD_HOST']
port = int(os.environ['TCD_PORT'])
username = os.environ['TCD_USER']
password = os.environ['TCD_PW']
except KeyError:
print('Thermo Central Database Authentication Environment Variables Not Completely Set!')
return 'None', 0, 'None', 'None'
return host, port, username, password
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| {
"content_hash": "fc51844adaea1f7847691161fb6c4c0c",
"timestamp": "",
"source": "github",
"line_count": 1543,
"max_line_length": 227,
"avg_line_length": 44.081011017498376,
"alnum_prop": 0.6497640295808401,
"repo_name": "Molecular-Image-Recognition/Molecular-Image-Recognition",
"id": "33115fe90e07e8fabcfeb8872fb050091600c0ee",
"size": "69504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/rmgpy/data/thermoTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4715"
},
{
"name": "Python",
"bytes": "5599677"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9886
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| {
"content_hash": "0905077ee4c690947cf2e17be1d208ee",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 90,
"avg_line_length": 26.365384615384617,
"alnum_prop": 0.6542669584245077,
"repo_name": "inkvisit/sarmacoins",
"id": "0dff8733f0530402a64431fe699502eff66aabcb",
"size": "3036",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/linearize/linearize-hashes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "1014042"
},
{
"name": "C++",
"bytes": "4171326"
},
{
"name": "CSS",
"bytes": "39920"
},
{
"name": "Groff",
"bytes": "18192"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "141795"
},
{
"name": "Makefile",
"bytes": "87610"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7241"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "211499"
},
{
"name": "QMake",
"bytes": "26363"
},
{
"name": "Shell",
"bytes": "40963"
}
],
"symlink_target": ""
} |
import os.path as op
from copy import deepcopy
import numpy as np
import pandas as pd
import nibabel as nib
import nipype
import pytest
from .. import model
class TestModelWorkflows(object):
def test_model_fit_workflow_creation(self, lyman_info):
info = lyman_info["info"]
subjects = lyman_info["subjects"]
sessions = lyman_info["sessions"]
wf = model.define_model_fit_workflow(info, subjects, sessions)
# Check basic information about the workflow
assert isinstance(wf, nipype.Workflow)
assert wf.name == "model_fit"
assert wf.base_dir == op.join(info.cache_dir, info.experiment_name)
# Check root directory of output
data_out = wf.get_node("data_output")
assert data_out.inputs.base_directory == info.proc_dir
# Check the list of nodes we expect
expected_nodes = ["subject_source", "run_source", "save_info",
"data_input", "fit_model", "data_output"]
expected_nodes.sort()
assert wf.list_node_names() == expected_nodes
def test_model_results_workflow_creation(self, lyman_info):
info = lyman_info["info"]
subjects = lyman_info["subjects"]
wf = model.define_model_results_workflow(info, subjects)
# Check basic information about the workflow
assert isinstance(wf, nipype.Workflow)
assert wf.name == "model_results"
assert wf.base_dir == op.join(info.cache_dir, info.experiment_name)
# Check root directory of output
run_out = wf.get_node("run_output")
assert run_out.inputs.base_directory == info.proc_dir
subject_out = wf.get_node("subject_output")
assert subject_out.inputs.base_directory == info.proc_dir
# Check the list of nodes we expect
expected_nodes = ["subject_source", "run_source", "data_input",
"estimate_contrasts", "model_results", "save_info",
"run_output", "results_path", "subject_output"]
expected_nodes.sort()
assert wf.list_node_names() == expected_nodes
def test_model_iterables(self, lyman_info):
info = lyman_info["info"]
scan_info = info.scan_info
# -- Test full iterables
iterables = model.generate_iterables(
scan_info, "exp_alpha", ["subj01", "subj02"],
)
expected_iterables = (
["subj01", "subj02"],
{"subj01":
[("sess01", "run01"),
("sess01", "run02"),
("sess02", "run01")],
"subj02":
[("sess01", "run01"),
("sess01", "run02"),
("sess01", "run03")]},
)
assert iterables == expected_iterables
# -- Test iterables as set in workflow
wf = model.define_model_fit_workflow(info, ["subj01", "subj02"], None)
subject_source = wf.get_node("subject_source")
assert subject_source.iterables == ("subject", iterables[0])
run_source = wf.get_node("run_source")
assert run_source.iterables == ("run", iterables[1])
wf = model.define_model_results_workflow(info, ["subj01", "subj02"])
subject_source = wf.get_node("subject_source")
assert subject_source.iterables == ("subject", iterables[0])
run_source = wf.get_node("run_source")
assert run_source.iterables == ("run", iterables[1])
# -- Test single subject
iterables = model.generate_iterables(
scan_info, "exp_alpha", ["subj01"],
)
expected_iterables = (
["subj01"],
{"subj01":
[("sess01", "run01"),
("sess01", "run02"),
("sess02", "run01")]}
)
assert iterables == expected_iterables
# -- Test different experiment
iterables = model.generate_iterables(
scan_info, "exp_beta", ["subj01", "subj02"],
)
expected_iterables = (
["subj01"],
{"subj01":
[("sess02", "run01"),
("sess02", "run02"),
("sess02", "run03")]},
)
assert iterables == expected_iterables
# -- Test single subject, single session
iterables = model.generate_iterables(
scan_info, "exp_alpha", ["subj01"], ["sess02"],
)
expected_iterables = (
["subj01"],
{"subj01":
[("sess02", "run01")]},
)
assert iterables == expected_iterables
def test_model_results_path(self):
proc_dir = op.realpath(".")
subject = "subj01"
experiment = "exp_a"
model_name = "model_alpha"
ifc = model.ModelResultsPath(
proc_dir=str(proc_dir),
subject=subject,
experiment=experiment,
model=model_name,
)
out = ifc.run().outputs
expected_path = op.join(proc_dir, subject,
experiment, model_name, "results")
assert out.output_path == expected_path
def test_model_fit_input(self, timeseries):
subject = timeseries["subject"]
run_tuple = session, run = timeseries["session"], timeseries["run"]
exp_name = timeseries["info"].experiment_name
model_name = timeseries["info"].model_name
out = model.ModelFitInput(
experiment=exp_name,
model=model_name,
proc_dir=str(timeseries["proc_dir"]),
subject=subject,
run_tuple=run_tuple,
).run().outputs
assert out.subject == subject
assert out.session == session
assert out.run == run
assert out.seg_file == timeseries["seg_file"]
assert out.surf_file == timeseries["surf_file"]
assert out.mask_file == timeseries["mask_file"]
assert out.edge_file == timeseries["edge_file"]
assert out.ts_file == timeseries["ts_file"]
assert out.noise_file == timeseries["noise_file"]
assert out.mc_file == timeseries["mc_file"]
assert out.output_path == timeseries["model_dir"]
def test_model_results_input(self, modelfit):
subject = modelfit["subject"]
run_tuple = session, run = modelfit["session"], modelfit["run"]
exp_name = modelfit["info"].experiment_name
model_name = modelfit["info"].model_name
out = model.ModelResultsInput(
experiment=exp_name,
model=model_name,
proc_dir=str(modelfit["proc_dir"]),
subject=subject,
run_tuple=run_tuple,
).run().outputs
assert out.subject == subject
assert out.session == session
assert out.run == run
assert out.anat_file == modelfit["anat_file"]
assert out.mask_file == modelfit["mask_file"]
assert out.beta_file == modelfit["beta_file"]
assert out.ols_file == modelfit["ols_file"]
assert out.error_file == modelfit["error_file"]
assert out.output_path == modelfit["model_dir"]
@pytest.mark.parametrize(
"percent_change,nuisance_regression",
[(True, True), (False, False)],
)
def test_model_fit(self, execdir, timeseries,
percent_change, nuisance_regression):
info = timeseries["info"]
info.percent_change = percent_change
if not nuisance_regression:
info.nuisance_components = {}
out = model.ModelFit(
subject=timeseries["subject"],
session=timeseries["session"],
run=timeseries["run"],
data_dir=str(timeseries["data_dir"]),
info=info.trait_get(),
seg_file=timeseries["seg_file"],
surf_file=timeseries["surf_file"],
edge_file=timeseries["edge_file"],
ts_file=timeseries["ts_file"],
mask_file=timeseries["mask_file"],
noise_file=timeseries["noise_file"],
mc_file=timeseries["mc_file"],
).run().outputs
# Test output file names
assert out.mask_file == execdir.join("mask.nii.gz")
assert out.beta_file == execdir.join("beta.nii.gz")
assert out.error_file == execdir.join("error.nii.gz")
assert out.ols_file == execdir.join("ols.nii.gz")
assert out.resid_file == execdir.join("resid.nii.gz")
assert out.model_file == execdir.join("model.csv")
assert out.resid_plot == execdir.join("resid.png")
assert out.model_plot == execdir.join("model.png")
assert out.error_plot == execdir.join("error.png")
if nuisance_regression:
assert out.nuisance_plot == execdir.join("nuisance.png")
n_x, n_y, n_z = timeseries["vol_shape"]
n_tp = timeseries["n_tp"]
X = pd.read_csv(out.model_file)
n_params = X.shape[1]
# Test output image shapes
mask_img = nib.load(out.mask_file)
assert mask_img.shape == (n_x, n_y, n_z)
beta_img = nib.load(out.beta_file)
assert beta_img.shape == (n_x, n_y, n_z, n_params)
error_img = nib.load(out.error_file)
assert error_img.shape == (n_x, n_y, n_z)
ols_img = nib.load(out.ols_file)
assert ols_img.shape == (n_x, n_y, n_z, n_params ** 2)
resid_img = nib.load(out.resid_file)
assert resid_img.shape == (n_x, n_y, n_z, n_tp)
model_matrix = pd.read_csv(out.model_file)
assert model_matrix.shape == (n_tp, n_params)
def test_estimate_contrasts(self, execdir, modelfit):
out = model.EstimateContrasts(
info=modelfit["info"].trait_get(),
mask_file=modelfit["mask_file"],
beta_file=modelfit["beta_file"],
ols_file=modelfit["ols_file"],
error_file=modelfit["error_file"],
model_file=modelfit["model_file"],
).run().outputs
# Test output file names
assert out.contrast_file == execdir.join("contrast.nii.gz")
assert out.variance_file == execdir.join("variance.nii.gz")
assert out.tstat_file == execdir.join("tstat.nii.gz")
assert out.name_file == execdir.join("contrast.txt")
# Test output image shapes
n_contrasts = len(modelfit["info"].contrasts)
assert nib.load(out.contrast_file).shape[-1] == n_contrasts
assert nib.load(out.variance_file).shape[-1] == n_contrasts
assert nib.load(out.tstat_file).shape[-1] == n_contrasts
assert len(np.loadtxt(out.name_file, str)) == n_contrasts
def test_missing_contrasts(self, execdir, modelfit):
info = deepcopy(modelfit["info"].trait_get())
n_contrasts = len(info["contrasts"])
info["contrasts"].append(("d", ["d"], [1]))
out = model.EstimateContrasts(
info=info,
mask_file=modelfit["mask_file"],
beta_file=modelfit["beta_file"],
ols_file=modelfit["ols_file"],
error_file=modelfit["error_file"],
model_file=modelfit["model_file"],
).run().outputs
# Test output image shapes
assert nib.load(out.contrast_file).shape[-1] == n_contrasts
assert nib.load(out.variance_file).shape[-1] == n_contrasts
assert nib.load(out.tstat_file).shape[-1] == n_contrasts
assert len(np.loadtxt(out.name_file, str)) == n_contrasts
def test_model_results(self, execdir, modelres):
out = model.ModelResults(
info=modelres["info"].trait_get(),
anat_file=modelres["anat_file"],
contrast_files=modelres["contrast_files"],
variance_files=modelres["variance_files"],
name_files=modelres["name_files"],
).run().outputs
contrast_names = [c for c, _, _ in modelres["info"].contrasts]
result_directories = [execdir.join(c) for c in contrast_names]
assert out.result_directories == result_directories
| {
"content_hash": "4d640a2e4bd52584affb35b31241b1e9",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 78,
"avg_line_length": 35.358823529411765,
"alnum_prop": 0.5686241889868574,
"repo_name": "mwaskom/lyman",
"id": "ff49c7cbd9adefb99e91775524706b07b09265b7",
"size": "12022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lyman/workflows/tests/test_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "152"
},
{
"name": "Python",
"bytes": "321558"
}
],
"symlink_target": ""
} |
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
import functools
import re
import threading
from importlib import import_module
from urllib.parse import quote
from django.conf import settings
from django.core.checks import Warning
from django.core.checks.urls import check_resolver
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import MultiValueDict
from django.utils.functional import cached_property
from django.utils.http import RFC3986_SUBDELIMS
from django.utils.regex_helper import normalize
from django.utils.translation import get_language
from .exceptions import NoReverseMatch, Resolver404
from .utils import get_callable
class ResolverMatch:
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
self.namespaces = [x for x in namespaces if x] if namespaces else []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = func.__class__.__module__ + '.' + func.__class__.__name__
else:
# A function-based view
self._func_path = func.__module__ + '.' + func.__name__
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name,
self.app_names, self.namespaces,
)
@functools.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@functools.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent URLconf pattern.
# This makes it possible to have captured parameters in the parent
# URLconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
class LocaleRegexDescriptor:
def __get__(self, instance, cls=None):
"""
Return a compiled regular expression based on the active language.
"""
if instance is None:
return self
# As a performance optimization, if the given regex string is a regular
# string (not a lazily-translated string proxy), compile it once and
# avoid per-language compilation.
if isinstance(instance._regex, str):
instance.__dict__['regex'] = self._compile(instance._regex)
return instance.__dict__['regex']
language_code = get_language()
if language_code not in instance._regex_dict:
instance._regex_dict[language_code] = self._compile(str(instance._regex))
return instance._regex_dict[language_code]
def _compile(self, regex):
"""
Compile and return the given regular expression.
"""
try:
return re.compile(regex)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' % (regex, e)
)
class LocaleRegexProvider:
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using gettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
regex = LocaleRegexDescriptor()
def describe(self):
"""
Format the URL pattern for display in warning messages.
"""
description = "'{}'".format(self.regex.pattern)
if getattr(self, 'name', False):
description += " [name='{}']".format(self.name)
return description
def _check_pattern_startswith_slash(self):
"""
Check that the pattern does not begin with a forward slash.
"""
regex_pattern = self.regex.pattern
if not settings.APPEND_SLASH:
# Skip check as it can be useful to start a URL pattern with a slash
# when APPEND_SLASH=False.
return []
if (regex_pattern.startswith('/') or regex_pattern.startswith('^/')) and not regex_pattern.endswith('/'):
warning = Warning(
"Your URL pattern {} has a regex beginning with a '/'. Remove this "
"slash as it is unnecessary. If this pattern is targeted in an "
"include(), ensure the include() pattern has a trailing '/'.".format(
self.describe()
),
id="urls.W002",
)
return [warning]
else:
return []
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
self.callback = callback # the view
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern)
def check(self):
warnings = self._check_pattern_name()
if not warnings:
warnings = self._check_pattern_startswith_slash()
return warnings
def _check_pattern_name(self):
"""
Check that the pattern name does not contain a colon.
"""
if self.name is not None and ":" in self.name:
warning = Warning(
"Your URL pattern {} has a name including a ':'. Remove the colon, to "
"avoid ambiguous namespace references.".format(self.describe()),
id="urls.W003",
)
return [warning]
else:
return []
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
args = () if kwargs else match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@cached_property
def lookup_str(self):
"""
A string that identifies the view (e.g. 'path.to.view_function' or
'path.to.ClassBasedView').
"""
callback = self.callback
# Python 3.5 collapses nested partials, so can change "while" to "if"
# when it's the minimum supported version.
while isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
return callback.__module__ + "." + callback.__class__.__name__
return callback.__module__ + "." + callback.__qualname__
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
self._local = threading.local()
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return '<%s %s (%s:%s) %s>' % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern,
)
def check(self):
warnings = self._check_include_trailing_dollar()
for pattern in self.url_patterns:
warnings.extend(check_resolver(pattern))
if not warnings:
warnings = self._check_pattern_startswith_slash()
return warnings
def _check_include_trailing_dollar(self):
"""
Check that include is not used with a regex ending with a dollar.
"""
regex_pattern = self.regex.pattern
if regex_pattern.endswith('$') and not regex_pattern.endswith(r'\$'):
warning = Warning(
"Your URL pattern {} uses include with a regex ending with a '$'. "
"Remove the dollar from the regex to avoid problems including "
"URLs.".format(self.describe()),
id="urls.W001",
)
return [warning]
else:
return []
def _populate(self):
# Short-circuit if called recursively in this thread to prevent
# infinite recursion. Concurrent threads may call this at the same
# time and will need to continue, so set 'populating' on a
# thread-local variable.
if getattr(self._local, 'populating', False):
return
self._local.populating = True
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if isinstance(pattern, RegexURLPattern):
self._callback_strs.add(pattern.lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
if not getattr(pattern._local, 'populating', False):
pattern._populate()
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
self._local.populating = False
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = str(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces,
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, str):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included URLconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use lazy import, since
# django.conf.urls imports this file.
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [str(v) for v in args]
text_kwargs = {k: str(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if set(kwargs) | set(defaults) != set(params) | set(defaults):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs):
# safe characters from `pchar` definition of RFC 3986
url = quote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + '/~:@')
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL name or callable, but callables are not
# friendly in error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
if patterns:
if args:
arg_msg = "arguments '%s'" % (args,)
elif kwargs:
arg_msg = "keyword arguments '%s'" % (kwargs,)
else:
arg_msg = "no arguments"
msg = (
"Reverse for '%s' with %s not found. %d pattern(s) tried: %s" %
(lookup_view_s, arg_msg, len(patterns), patterns)
)
else:
msg = (
"Reverse for '%(view)s' not found. '%(view)s' is not "
"a valid view function or pattern name." % {'view': lookup_view_s}
)
raise NoReverseMatch(msg)
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(
self, urlconf_name, default_kwargs=None, app_name=None, namespace=None,
prefix_default_language=True,
):
super().__init__(None, urlconf_name, default_kwargs, app_name, namespace)
self.prefix_default_language = prefix_default_language
@property
def regex(self):
language_code = get_language() or settings.LANGUAGE_CODE
if language_code not in self._regex_dict:
if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language:
regex_string = ''
else:
regex_string = '^%s/' % language_code
self._regex_dict[language_code] = re.compile(regex_string)
return self._regex_dict[language_code]
| {
"content_hash": "a6327145aec1b76f4a70dc0c17c79051",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 113,
"avg_line_length": 40.929961089494164,
"alnum_prop": 0.5641220648350603,
"repo_name": "whs/django",
"id": "03bac717f810d0555bc9ad3506d872e945bd0f00",
"size": "21038",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/urls/resolvers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "203581"
},
{
"name": "JavaScript",
"bytes": "252581"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11896742"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import re, operator
from flask import Blueprint, request, render_template, flash, g, session, \
redirect, url_for, jsonify, abort, make_response, Response
from dataviva import db
from dataviva.utils.make_query import make_query
from dataviva.secex.models import Yb_secex, Yw, Yp, Ybw, Ybp, Ypw, Ybpw
mod = Blueprint('secex', __name__, url_prefix='/secex')
@mod.errorhandler(404)
def page_not_found(error):
return error, 404
@mod.after_request
def per_request_callbacks(response):
if response.status_code != 302 and response.mimetype != "text/csv":
response.headers['Content-Encoding'] = 'gzip'
response.headers['Content-Length'] = str(len(response.data))
return response
############################################################
# ----------------------------------------------------------
# 2 variable views
#
############################################################
@mod.route('/all/<bra_id>/all/all/')
@mod.route('/<year>/<bra_id>/all/all/')
def secex_yb(**kwargs):
return make_response(make_query(Yb_secex, request.args, g.locale, **kwargs))
@mod.route('/all/all/<hs_id>/all/')
@mod.route('/<year>/all/<hs_id>/all/')
def secex_yp(**kwargs):
return make_response(make_query(Yp, request.args, g.locale, **kwargs))
@mod.route('/all/all/all/<wld_id>/')
@mod.route('/<year>/all/all/<wld_id>/')
def secex_yw(**kwargs):
return make_response(make_query(Yw, request.args, g.locale, **kwargs))
############################################################
# ----------------------------------------------------------
# 3 variable views
#
############################################################
@mod.route('/all/<bra_id>/all/<wld_id>/')
@mod.route('/<year>/<bra_id>/all/<wld_id>/')
def secex_ybw(**kwargs):
return make_response(make_query(Ybw, request.args, g.locale, **kwargs))
@mod.route('/all/<bra_id>/<hs_id>/all/')
@mod.route('/<year>/<bra_id>/<hs_id>/all/')
def secex_ybp(**kwargs):
kwargs["join"] = [{
"table": Yp,
"columns": ["pci"],
"on": ["year", "hs_id"]
}]
return make_response(make_query(Ybp, request.args, g.locale, **kwargs))
@mod.route('/all/all/<hs_id>/<wld_id>/')
@mod.route('/<year>/all/<hs_id>/<wld_id>/')
def secex_ypw(**kwargs):
return make_response(make_query(Ypw, request.args, g.locale, **kwargs))
############################################################
# ----------------------------------------------------------
# 4 variable views
#
############################################################
@mod.route('/all/<bra_id>/<hs_id>/<wld_id>/')
@mod.route('/<year>/<bra_id>/<hs_id>/<wld_id>/')
def secex_ybpw(**kwargs):
return make_response(make_query(Ybpw, request.args, g.locale, **kwargs)) | {
"content_hash": "6e9d34ad2f39cd482602f1ecb0d7bbfd",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 36.41558441558441,
"alnum_prop": 0.503922967189729,
"repo_name": "dogobox/datavivamaster",
"id": "1e86bb76591f1593961e913e88770e47264b09b6",
"size": "2804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataviva/secex/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "223703"
},
{
"name": "JavaScript",
"bytes": "205565"
},
{
"name": "Python",
"bytes": "244375"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
from django.http import Http404
from django.conf import settings
from .views import page
class PageFallbackMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a flatpage for non-404 responses.
try:
return page(request, request.path_info, True)
# Return the original response if any errors happened. Because this
# is a middleware, we can't assume the errors will be caught elsewhere.
except Http404:
return response
except:
if settings.DEBUG:
raise
return response | {
"content_hash": "238daac323f286002267b70433b6da08",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 84,
"avg_line_length": 35.78947368421053,
"alnum_prop": 0.6470588235294118,
"repo_name": "kimus/django-blocks",
"id": "bc88051add7ec35b2600bb2ea750f231cb8b2bf5",
"size": "680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blocks/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2345"
},
{
"name": "JavaScript",
"bytes": "23810"
},
{
"name": "Python",
"bytes": "111560"
}
],
"symlink_target": ""
} |
from elasticsearch import Elasticsearch
from id_getter import get_ids_with_response_status
import time
import sys
if __name__ == '__main__':
es = Elasticsearch(['http://localhost:9200'])
print "Starting to extract responses from Elasticsearch"
filename = sys.argv[1]
with open(str(filename), 'w') as output_file:
status = 200
start_time = time.clock()
ids_list = get_ids_with_response_status(str(status))
total_num_docs = es.count(index="gor", body={"query": {"match_all": {}}})['count']
filtered_num = es.count(index="gor", body={"query": {"bool": {"must": { "match": { "Resp_Status": str(status) }}}}})['count']
output_file.write("\n")
output_file.write("Total number of docs(auctions) in the index is: " + str(total_num_docs))
output_file.write("\n")
output_file.write("Number of docs(auctions) that has 200 and non octet-stream response body is: " + str(filtered_num))
output_file.write("\n")
output_file.write("Responses...\n")
for doc_id in ids_list:
#extracted_output = es.get(index="gor", id=str(doc_id), doc_type="RequestResponse", fields="Resp_Body")
extracted_competingIds = es.get(index="gor", id=str(doc_id), doc_type="RequestResponse", fields="Resp_Competing-Placements")
extracted_winningId = es.get(index="gor", id=str(doc_id), doc_type="RequestResponse", fields="Resp_Winning-Placement")
#if 'fields' in extracted_output.keys():
#if type(extracted_output['fields']['Resp_Body'][0]) == unicode:
#write_body = repr(extracted_output['fields']['Resp_Body'][0])
#else:
#write_body = str(extracted_output['fields']['Resp_Body'][0])
if 'fields' in extracted_competingIds.keys():
output_file.write("\n")
output_file.write("\n")
output_file.write("COMPETING PLACEMENTS: " + str(extracted_competingIds['fields']['Resp_Competing-Placements'][0]))
output_file.write("\n")
output_file.write("WINNING PLACEMENT: " + str(extracted_winningId['fields']['Resp_Winning-Placement'][0]))
output_file.write("\n")
output_file.write("--------------------------------------------Next Response----------------------------------------------------------")
print "...Done"
print "It took --- %s seconds --- " % (time.clock() - start_time)
| {
"content_hash": "6a6dc30bd874d2b96b598dcabf97e421",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 138,
"avg_line_length": 47.16326530612245,
"alnum_prop": 0.617914322803981,
"repo_name": "cemkoc/GorReplay_and_Log",
"id": "ad885e78cbfafa814a2ced48bce077b3ae39b21f",
"size": "2334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_GorAuction_logs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5525"
},
{
"name": "Shell",
"bytes": "1888"
}
],
"symlink_target": ""
} |
import json
import threading
__author__ = 'Daniel Puschmann'
import abc
import os
from virtualisation.misc.jsonobject import JSONObject
from messagebus.rabbitmq import RabbitMQ
from virtualisation.misc.threads import QueueThread
from virtualisation.annotation.genericannotation import GenericAnnotation
from virtualisation.triplestore.threadedtriplestoreadapter import ThreadedTriplestoreAdapter
import zipfile
class AbstractEventWrapper(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self.config = JSONObject(file(os.path.join(os.path.dirname(__file__), "..", "config.json"), "rb"))
self.host = self.config.rabbitmq.host
self.port = self.config.rabbitmq.port
# self.rabbitmqconnection, self.rabbitmqchannel = RabbitMQ.establishConnection(self.host, self.port)
self.messageBusReceiveQueue = QueueThread(handler=self.receiveEventHandler)
self.messageBusSendQueue = QueueThread(handler=self.sendAnnotatedEventHandler)
self.wrappers = []
self.splitters = None
self.annotator = GenericAnnotation()
self.exchange = RabbitMQ.exchange_annotated_event
@abc.abstractmethod
def getEventDescription(self):
"""
:return: a event description
"""
pass
@classmethod
def getFileObject(cls, currentfile, filename, mode="r"):
parent = os.path.dirname(currentfile)
if parent.endswith(".zip"):
zFile = zipfile.ZipFile(parent)
return zFile.open(filename, mode)
else:
return file(os.path.join(parent, filename), mode)
# def addWrapper(self, wrapper):
# """
# adds a wrapper to the internal wrapper list
# :param wrapper:
# :return:
# """
# if not isinstance(wrapper, AbstractEventWrapper):
# raise Exception(error="trying to add a wrapper of the wrong instance. Requires AbstractEventWRapper")
# self.wrappers.append(wrapper)
def start(self):
"@Daniel P: The ResourceManagement declares all available exchanges. I guess this is unnecessary therefore."
# RabbitMQ.declareExchange(self.rabbitmqchannel, self.exchange, _type="topic")
queue = RabbitMQ.channel.queue_declare()
queue_name = queue.method.queue
# in the following line the exchange should be RabbitMQ.exchange_event
RabbitMQ.channel.queue_bind(exchange=self.exchange, queue=queue_name,
routing_key=self.getEventDescription().messagebus.routingKey)
def run(self):
"""
start listening on the event detection component
:return:
"""
# self.__forEachWrapper("run")
self.runthread = threading.Thread(target=self._run)
self.runthread.start()
def _run(self):
self.channel.basic_consume(self.receiveEventHandler, no_ack=True)
self.channel.start_consuming()
def receiveEventHandler(self, channel, method, properties, body):
"""
Receives messages throught the message bus, annotates the event
and sends the annotated event
:param channel:
:param method:
:param properties:
:param body:
:return:
"""
event = json.loads(body)
annotatedevent = self.annotateEvent(event)
self.messageBusSendQueue.add(annotatedevent)
def annotateEvent(self, event):
"""
Annotates the event and saves the graph in the triple store
:param event:
:return: returns the annotated graph of the event
"""
graph = self.annotator.annotateEvent(event, self.getEventDescription())
ThreadedTriplestoreAdapter.getOrMake(self.getEventDescription().graphname)
return graph
def sendAnnotatedEventHandler(self, annotatedevent):
key = self.getEventDescription().messagebus.routingKey
message = annotatedevent.serialize(format='n3')
RabbitMQ.sendMessage(message, self.exchange, key) | {
"content_hash": "1414809ad4a575e623faf41b659d3596",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 116,
"avg_line_length": 38.34285714285714,
"alnum_prop": 0.6686537506209638,
"repo_name": "CityPulse/CP_Resourcemanagement",
"id": "a9d5cd68c5a90d6e4d4562ac4c16d1f8cd0fb15c",
"size": "4026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtualisation/events/abstracteventwrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1373596"
},
{
"name": "Shell",
"bytes": "708"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import unittest
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_assert_ops_in_graph(self):
with ops.Graph().as_default():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegex(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def test_assert_equal_graph_def_hash_table(self):
def get_graph_def():
with ops.Graph().as_default() as g:
x = constant_op.constant([2, 9], name="x")
keys = constant_op.constant([1, 2], name="keys")
values = constant_op.constant([3, 4], name="values")
default = constant_op.constant(-1, name="default")
table = lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default)
_ = table.lookup(x)
return g.as_graph_def()
def_1 = get_graph_def()
def_2 = get_graph_def()
# The unique shared_name of each table makes the graph unequal.
with self.assertRaisesRegex(AssertionError, "hash_table_"):
test_util.assert_equal_graph_def(def_1, def_2,
hash_table_shared_name=False)
# That can be ignored. (NOTE: modifies GraphDefs in-place.)
test_util.assert_equal_graph_def(def_1, def_2,
hash_table_shared_name=True)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.disable_asan("Skip test if ASAN is enabled.")
def testDisableAsan(self):
self.assertFalse(pywrap_sanitizers.is_asan_enabled())
@test_util.disable_msan("Skip test if MSAN is enabled.")
def testDisableMsan(self):
self.assertFalse(pywrap_sanitizers.is_msan_enabled())
@test_util.disable_tsan("Skip test if TSAN is enabled.")
def testDisableTsan(self):
self.assertFalse(pywrap_sanitizers.is_tsan_enabled())
@test_util.disable_ubsan("Skip test if UBSAN is enabled.")
def testDisableUbsan(self):
self.assertFalse(pywrap_sanitizers.is_ubsan_enabled())
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegex(AssertionError, r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegex(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegex(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegex(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegex(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegex(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllEqual(self):
i = variables.Variable([100], dtype=dtypes.int32, name="i")
j = constant_op.constant([20], dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertNotAllEqual([100] * 3, i)
self.assertNotAllEqual([120] * 3, k)
self.assertNotAllEqual([20] * 3, j)
with self.assertRaisesRegex(
AssertionError, r"two values are equal at all elements.*extra message"):
self.assertNotAllEqual([120], k, msg="extra message")
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeScalar(self):
x = constant_op.constant(10.0, name="x")
nan = constant_op.constant(np.nan, name="nan")
self.assertAllInRange(x, 5, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(nan, 5, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 1, 2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new default graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
with context.eager_mode():
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
a_rand = random_ops.random_normal([1])
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
b_rand = random_ops.random_normal([1])
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertAllEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc) # pylint: disable=assignment-from-no-return
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegex(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[1:2], ["run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
@combinations.generate(combinations.combine(arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_combinations(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFunctionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFunctionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
if ops.inside_function():
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFunctionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
@test_util.run_in_graph_and_eager_modes
def test_consistent_random_seed_in_assert_all_equal(self):
random_seed.set_seed(1066)
index = random_ops.random_shuffle([0, 1, 2, 3, 4], seed=2021)
# This failed when `a` and `b` were evaluated in separate sessions.
self.assertAllEqual(index, index)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
class SkipTestTest(test_util.TensorFlowTestCase):
def _verify_test_in_set_up_or_tear_down(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def setUp(self):
super(SkipTestTest, self).setUp()
self._verify_test_in_set_up_or_tear_down()
def tearDown(self):
super(SkipTestTest, self).tearDown()
self._verify_test_in_set_up_or_tear_down()
def test_skip_if_error_should_skip(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("test message")
def test_skip_if_error_should_skip_with_list(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_expected_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_error_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError()
def test_skip_if_error_should_raise_message_mismatch(self):
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def test_skip_if_error_should_raise_no_message(self):
try:
with self.assertRaisesRegex(ValueError, ""):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError()
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegex(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(LeakedObjectTest, self).__init__(*args, **kwargs)
self.accumulation = []
@unittest.expectedFailure
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
self.assertTrue(LeakedObjectTest("test_has_leak").run().wasSuccessful())
self.assertTrue(LeakedObjectTest("test_has_no_leak").run().wasSuccessful())
class RunFunctionsEagerlyInV2Test(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.named_parameters(
[("_RunEagerly", True), ("_RunGraph", False)])
def test_run_functions_eagerly(self, run_eagerly): # pylint: disable=g-wrong-blank-lines
results = []
@def_function.function
def add_two(x):
for _ in range(5):
x += 2
results.append(x)
return x
with test_util.run_functions_eagerly(run_eagerly):
add_two(constant_op.constant(2.))
if context.executing_eagerly():
if run_eagerly:
self.assertTrue(isinstance(t, ops.EagerTensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "23ff61c22a5e90599130b516805bbf5b",
"timestamp": "",
"source": "github",
"line_count": 1021,
"max_line_length": 98,
"avg_line_length": 35.081292850146916,
"alnum_prop": 0.6547545926629069,
"repo_name": "sarvex/tensorflow",
"id": "85cc7b88581c524c735ddd6b01996e1b0cd39e6c",
"size": "36507",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tensorflow/python/framework/test_util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
} |
import io
import os
import pytest
from cookiecutter import repository
@pytest.fixture
def template():
return 'cookiecutter-pytest-plugin'
@pytest.fixture
def cloned_cookiecutter_path(user_config_data, template):
cookiecutters_dir = user_config_data['cookiecutters_dir']
cloned_template_path = os.path.join(cookiecutters_dir, template)
os.mkdir(cloned_template_path)
io.open(os.path.join(cloned_template_path, 'cookiecutter.json'), 'w')
return cloned_template_path
def test_should_find_existing_cookiecutter(
template, user_config_data, cloned_cookiecutter_path):
project_dir = repository.determine_repo_dir(
template,
abbreviations={},
clone_to_dir=user_config_data['cookiecutters_dir'],
checkout=None,
no_input=True,
)
assert cloned_cookiecutter_path == project_dir
| {
"content_hash": "089d6209ec34ac3312d4c0a5795755bb",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 24.62857142857143,
"alnum_prop": 0.7088167053364269,
"repo_name": "stevepiercy/cookiecutter",
"id": "7b274a35e711d5b237c2c3430e2453597fba3988",
"size": "886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/repository/test_determine_repo_dir_finds_existing_cookiecutter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "188773"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
} |
import json
import click
from tabulate import tabulate
@click.command('users', short_help='List users')
@click.option('--role', 'roles', multiple=True, help='Filter users by role')
@click.pass_obj
def cli(obj, roles):
"""List users."""
client = obj['client']
query = [('roles', r) for r in roles]
if obj['output'] == 'json':
r = client.http.get('/users', query)
click.echo(json.dumps(r['users'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {'id': 'ID', 'name': 'USER', 'email': 'EMAIL', 'roles': 'ROLES', 'status': 'STATUS', 'text': 'TEXT',
'createTime': 'CREATED', 'updateTime': 'LAST UPDATED', 'lastLogin': 'LAST LOGIN', 'email_verified': 'VERIFIED'}
click.echo(
tabulate([u.tabular(timezone) for u in client.get_users(query)], headers=headers, tablefmt=obj['output'])
)
| {
"content_hash": "cbb7d5c1ebdf8b7daad5ebd8f6e2dce9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 130,
"avg_line_length": 38.416666666666664,
"alnum_prop": 0.5997830802603037,
"repo_name": "alerta/python-alerta",
"id": "ee2ed62014423dea9cb0296db4c960b4b389c3b5",
"size": "922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "alertaclient/commands/cmd_users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106941"
}
],
"symlink_target": ""
} |
"""
Created on Wed Oct 22 10:53:57 2014
@author: Natural Solutions (Thomas)
"""
| {
"content_hash": "25926b0e9f4cb624e05bae6b93fe381f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 35,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.6707317073170732,
"repo_name": "NaturalSolutions/ecoReleve-Server",
"id": "d82ece3a603de50a25c837c3e7de34bd9316f0a8",
"size": "82",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ecorelevesensor/renderers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "413882"
},
{
"name": "Shell",
"bytes": "761"
}
],
"symlink_target": ""
} |
import json
import os
import pytest
@pytest.mark.integration
def test_bigquery_uri():
from datahub.ingestion.source.sql.bigquery import BigQueryConfig
config = BigQueryConfig.parse_obj(
{
"project_id": "test-project",
}
)
assert config.get_sql_alchemy_url() == "bigquery://test-project"
@pytest.mark.integration
def test_bigquery_uri_with_credential():
from datahub.ingestion.source.sql.bigquery import BigQueryConfig
expected_credential_json = {
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"client_email": "test@acryl.io",
"client_id": "test_client-id",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test@acryl.io",
"private_key": "random_private_key",
"private_key_id": "test-private-key",
"project_id": "test-project",
"token_uri": "https://oauth2.googleapis.com/token",
"type": "service_account",
}
config = BigQueryConfig.parse_obj(
{
"project_id": "test-project",
"credential": {
"project_id": "test-project",
"private_key_id": "test-private-key",
"private_key": "random_private_key",
"client_email": "test@acryl.io",
"client_id": "test_client-id",
},
}
)
try:
assert config.get_sql_alchemy_url() == "bigquery://test-project"
assert config.credentials_path
with open(config.credentials_path) as jsonFile:
json_credential = json.load(jsonFile)
jsonFile.close()
credential = json.dumps(json_credential, sort_keys=True)
expected_credential = json.dumps(expected_credential_json, sort_keys=True)
assert expected_credential == credential
except AssertionError as e:
if config.credentials_path:
os.unlink(str(config.credentials_path))
raise e
| {
"content_hash": "878476bd0d86571fc6d385b9124d1c45",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 98,
"avg_line_length": 31.676923076923078,
"alnum_prop": 0.6032054395337543,
"repo_name": "linkedin/WhereHows",
"id": "7bc769d29f54990c52064a0fd0cc8b39fe1238eb",
"size": "2059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata-ingestion/tests/unit/test_bigquery_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110129"
},
{
"name": "Dockerfile",
"bytes": "2521"
},
{
"name": "HTML",
"bytes": "131513"
},
{
"name": "Java",
"bytes": "1307442"
},
{
"name": "JavaScript",
"bytes": "148450"
},
{
"name": "Nearley",
"bytes": "2837"
},
{
"name": "Python",
"bytes": "1419332"
},
{
"name": "Shell",
"bytes": "2564"
},
{
"name": "TSQL",
"bytes": "42644"
},
{
"name": "TypeScript",
"bytes": "641014"
}
],
"symlink_target": ""
} |
import mock
from novaclient import exceptions as exc
from novaclient.tests.unit import utils
from novaclient.tests.unit.v2 import fakes
from novaclient.v2 import versions
class VersionsTest(utils.TestCase):
def setUp(self):
super(VersionsTest, self).setUp()
self.cs = fakes.FakeClient()
self.service_type = versions.Version
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=False)
def test_list_services_with_http_client(self, mock_is_session_client):
self.cs.versions.list()
self.cs.assert_called('GET', None)
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=True)
def test_list_services_with_session_client(self, mock_is_session_client):
self.cs.versions.list()
self.cs.assert_called('GET', 'http://nova-api:8774/')
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=False)
@mock.patch.object(versions.VersionManager, 'list')
def test_get_current_with_http_client(self, mock_list,
mock_is_session_client):
current_version = versions.Version(
None, {"links": [{"href": "http://nova-api:8774/v2.1"}]},
loaded=True)
mock_list.return_value = [
versions.Version(
None, {"links": [{"href": "http://url/v1"}]}, loaded=True),
versions.Version(
None, {"links": [{"href": "http://url/v2"}]}, loaded=True),
versions.Version(
None, {"links": [{"href": "http://url/v3"}]}, loaded=True),
current_version,
versions.Version(
None, {"links": [{"href": "http://url/v21"}]}, loaded=True)]
self.assertEqual(current_version, self.cs.versions.get_current())
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=True)
def test_get_current_with_session_client(self, mock_is_session_client):
self.cs.callback = []
self.cs.versions.get_current()
self.cs.assert_called('GET', 'http://nova-api:8774/v2.1/')
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=True)
@mock.patch.object(versions.VersionManager, '_get',
side_effect=exc.Unauthorized("401 RAX"))
def test_get_current_with_rax_workaround(self, session, get):
self.cs.callback = []
self.assertIsNone(self.cs.versions.get_current())
@mock.patch.object(versions.VersionManager, '_is_session_client',
return_value=False)
@mock.patch.object(versions.VersionManager, '_list',
side_effect=exc.Unauthorized("401 RAX"))
def test_get_current_with_rax_auth_plugin_workaround(self, session, _list):
self.cs.callback = []
self.assertIsNone(self.cs.versions.get_current())
| {
"content_hash": "687f3e56a1f96b9f0bd8216bf2916dfb",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 43.73913043478261,
"alnum_prop": 0.6033797216699801,
"repo_name": "takeshineshiro/python-novaclient",
"id": "aa8e84a6ef3ede9cbaf721ddb73637be24f1d43b",
"size": "3649",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "novaclient/tests/unit/v2/test_versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1104314"
},
{
"name": "Shell",
"bytes": "6319"
}
],
"symlink_target": ""
} |
from .test_commons import conn_info, VerticaTestCase
from .. import connect
class ColumnTestCase(VerticaTestCase):
def test_column_names_query(self):
column_0 = 'isocode'
column_1 = 'name'
query = """
select 'US' as {column_0}, 'United States' as {column_1}
union all
select 'CA', 'Canada'
union all
select 'MX', 'Mexico'
""".format(column_0=column_0, column_1=column_1)
with connect(**conn_info) as conn:
cur = conn.cursor()
cur.execute(query)
description = cur.description
assert description[0].name == column_0
assert description[1].name == column_1
| {
"content_hash": "7a6e43c2ff662c97bc081906ef86b04d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 33.285714285714285,
"alnum_prop": 0.5779685264663805,
"repo_name": "dennisobrien/vertica-python",
"id": "f1d889d74b65609306b931ec586eb487df17b235",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vertica_python/tests/column_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66237"
}
],
"symlink_target": ""
} |
import importlib
import asyncio
import pytest
import os
from aiohttp.web import Application, AppRunner, TCPSite
from aiohttp_json_rpc import JsonRpc, JsonRpcClient
try:
django_settings = importlib.import_module(
os.environ.get('DJANGO_SETTINGS_MODULE'))
django_wsgi_application = importlib.import_module(
'.'.join(django_settings.WSGI_APPLICATION.split('.')[0:-1])
).application
DJANGO = True
except Exception:
DJANGO = False
__all__ = [
'rpc_context',
]
if DJANGO:
__all__.append('django_rpc_context')
class RpcContext(object):
def __init__(self, app, rpc, host, port, url):
self.app = app
self.rpc = rpc
self.host = host
self.port = port
self.url = url
self.clients = []
async def make_clients(self, count, cookies=None):
clients = [JsonRpcClient() for i in range(count)]
await asyncio.gather(
*[i.connect(self.host, self.port, url=self.url,
cookies=cookies or {}) for i in clients]
)
self.clients += clients
return clients
async def make_client(self, cookies=None):
clients = await self.make_clients(1, cookies=cookies)
return clients[0]
async def finish_connections(self):
await asyncio.gather(
*[i.disconnect() for i in self.clients]
)
def gen_rpc_context(loop, host, port, rpc, rpc_route, routes=(),
RpcContext=RpcContext):
# make app
app = Application()
app.router.add_route(*rpc_route)
for route in routes:
app.router.add_route(*route)
# make app runner
runner = AppRunner(app)
loop.run_until_complete(runner.setup())
site = TCPSite(runner, host, port)
loop.run_until_complete(site.start())
# create RpcContext
rpc_context = RpcContext(app, rpc, host, port, rpc_route[1])
yield rpc_context
# teardown clients
loop.run_until_complete(rpc_context.finish_connections())
# teardown server
loop.run_until_complete(runner.cleanup())
@pytest.yield_fixture
def rpc_context(event_loop, unused_tcp_port):
rpc = JsonRpc(loop=event_loop, max_workers=4)
rpc_route = ('*', '/rpc', rpc.handle_request)
for context in gen_rpc_context(event_loop, 'localhost', unused_tcp_port,
rpc, rpc_route):
yield context
@pytest.yield_fixture
def django_rpc_context(db, event_loop, unused_tcp_port):
from aiohttp_json_rpc.auth.django import DjangoAuthBackend
from aiohttp_wsgi import WSGIHandler
rpc = JsonRpc(loop=event_loop,
auth_backend=DjangoAuthBackend(generic_orm_methods=True),
max_workers=4)
rpc_route = ('*', '/rpc', rpc.handle_request)
routes = [
('*', '/{path_info:.*}', WSGIHandler(django_wsgi_application)),
]
for context in gen_rpc_context(event_loop, 'localhost',
unused_tcp_port, rpc, rpc_route,
routes):
yield context
@pytest.fixture
def django_staff_user(db):
from django.contrib.auth import get_user_model
user = get_user_model().objects.create(username='admin', is_active=True,
is_staff=True, is_superuser=True)
user.set_password('admin')
user.save()
user._password = 'admin'
return user
| {
"content_hash": "20e687f7a8d15de3550fe90e06c7154a",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 76,
"avg_line_length": 25.51492537313433,
"alnum_prop": 0.6089499853758409,
"repo_name": "pengutronix/aiohttp-json-rpc",
"id": "e57c26ff4ebb52bd794ed37760e97fc06c3851ee",
"size": "3419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp_json_rpc/pytest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "353"
},
{
"name": "JavaScript",
"bytes": "8530"
},
{
"name": "Makefile",
"bytes": "823"
},
{
"name": "Python",
"bytes": "95707"
}
],
"symlink_target": ""
} |
import logging
import os
from dart.client.python.dart_client import Dart
from dart.config.config import configuration
from dart.engine.es.metadata import ElasticsearchActionTypes
from dart.model.engine import Engine, EngineData
_logger = logging.getLogger(__name__)
def add_elasticsearch_engine(config):
engine_config = config['engines']['elasticsearch_engine']
opts = engine_config['options']
dart = Dart(opts['dart_host'], opts['dart_port'], opts['dart_api_version'])
assert isinstance(dart, Dart)
_logger.info('saving elasticsearch_engine')
engine_id = None
for e in dart.get_engines():
if e.data.name == 'elasticsearch_engine':
engine_id = e.id
ecs_task_definition = None if config['dart']['use_local_engines'] else {
'family': 'dart-%s-elasticsearch_engine' % config['dart']['env_name'],
'containerDefinitions': [
{
'name': 'dart-elasticsearch_engine',
'cpu': 64,
'memory': 256,
'image': engine_config['docker_image'],
'logConfiguration': {'logDriver': 'syslog'},
'environment': [
{'name': 'DART_ROLE', 'value': 'worker:engine_elasticsearch'},
{'name': 'DART_CONFIG', 'value': engine_config['config']},
{'name': 'AWS_DEFAULT_REGION', 'value': opts['region']}
],
'mountPoints': [
{
'containerPath': '/mnt/ecs_agent_data',
'sourceVolume': 'ecs-agent-data',
'readOnly': True
}
],
}
],
'volumes': [
{
'host': {'sourcePath': '/var/lib/ecs/data'},
'name': 'ecs-agent-data'
}
],
}
e1 = dart.save_engine(Engine(id=engine_id, data=EngineData(
name='elasticsearch_engine',
description='For Elasticsearch clusters',
options_json_schema={
'type': 'object',
'properties': {
'access_key_id': {
'type': 'string',
'default': '',
'minLength': 0,
'maxLength': 20,
'description': 'the access_key_id for accessing this elasticsearch cluster. '
+ 'Leave blank to use Dart\'s instance profile credentials'
},
'secret_access_key': {
'type': 'string',
'default': '',
'minLength': 0,
'maxLength': 40,
'x-dart-secret': True,
'description': 'the secret_access_key for accessing this elasticsearch cluster. '
+ 'Leave blank to use Dart\'s instance profile credentials'
},
'endpoint': {
'type': 'string',
'minLength': 1,
'maxLength': 256,
'pattern': '^[a-zA-Z0-9]+[a-zA-Z0-9\-\.]*\.es\.amazonaws\.com$',
'description': 'The AWS Elasticsearch domain endpoint that you use to submit index and search requests.'
},
},
'additionalProperties': False,
'required': ['endpoint']
},
supported_action_types=[
ElasticsearchActionTypes.data_check,
ElasticsearchActionTypes.create_index,
ElasticsearchActionTypes.create_mapping,
ElasticsearchActionTypes.create_template,
ElasticsearchActionTypes.delete_index,
ElasticsearchActionTypes.delete_template,
ElasticsearchActionTypes.force_merge_index,
],
ecs_task_definition=ecs_task_definition
)))
_logger.info('saved elasticsearch_engine: %s' % e1.id)
if __name__ == '__main__':
add_elasticsearch_engine(configuration(os.environ['DART_CONFIG']))
| {
"content_hash": "d2925fada0ee677eb7ae9b797eb449df",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 124,
"avg_line_length": 39.01923076923077,
"alnum_prop": 0.5066535239034007,
"repo_name": "RetailMeNotSandbox/dart",
"id": "481af0264413c8a40b7e925340b3048a38ec9107",
"size": "4058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dart/engine/es/add_engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103727"
},
{
"name": "HTML",
"bytes": "67636"
},
{
"name": "JavaScript",
"bytes": "2762304"
},
{
"name": "Nginx",
"bytes": "996"
},
{
"name": "PLpgSQL",
"bytes": "1475"
},
{
"name": "Python",
"bytes": "1025954"
},
{
"name": "Ruby",
"bytes": "5523"
},
{
"name": "Shell",
"bytes": "3100"
}
],
"symlink_target": ""
} |
import falcon
import falcon.testing as testing
from belzoni.operation.api.api import OperationAPI
#class falcon.testing.TestBase(methodName='runTest'):
class OperationAPITest(testing.TestBase):
def before(self):
# TestBase provides an instance of falcon.API to use along
# with simulate_request (see below).
storage_path ="/home/mehdi/Pictures/"
operation_api = OperationAPI(storage_path)
self.api.add_route('/operation', operation_api)
def test_grace(self):
# TestBase provides a method to simulate a WSGI request without
# having to stand up an actual server. The decode option tells
# simulate_request to convert the raw WSGI response into a
# Unicode string.
body = self.simulate_request('/operation?id=1234', decode='utf-8')
#t I can pass method kwarg to simulate_request.
# TestBase provides an instance of StartResponseMock that captures
# the data passed to WSGI's start_response callback. This includes
# the status code and headers returned by the Falcon app.
self.assertEqual(self.srmock.status, falcon.HTTP_200)
#self.assertEqual(body, QUOTE)
| {
"content_hash": "5a804465a4ac8a491029f9b33fd10bea",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 41.689655172413794,
"alnum_prop": 0.6898263027295285,
"repo_name": "SmartInfrastructures/belzoni-workload-manager",
"id": "5e42670d6889f6bf00d79d2679be07a93fb65561",
"size": "1209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "belzoni/tests/operation/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "17682"
},
{
"name": "Python",
"bytes": "8048"
},
{
"name": "Shell",
"bytes": "164"
}
],
"symlink_target": ""
} |
"""Define various utilities for WWLLN tests."""
import pytest
from homeassistant.components.wwlln import CONF_WINDOW, DOMAIN
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_UNIT_SYSTEM,
)
from tests.common import MockConfigEntry
@pytest.fixture
def config_entry():
"""Create a mock WWLLN config entry."""
return MockConfigEntry(
domain=DOMAIN,
data={
CONF_LATITUDE: 39.128712,
CONF_LONGITUDE: -104.9812612,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_WINDOW: 3600,
},
title="39.128712, -104.9812612",
)
| {
"content_hash": "92c0dd757fdc385c5e2e12ea898da631",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 62,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.624251497005988,
"repo_name": "postlund/home-assistant",
"id": "787b68aebcca51bd08f1da564f2985009fd391e9",
"size": "668",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "tests/components/wwlln/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
import sys, os, time
import doctest
sys.path.append("..")
from mas.multiagent import *
def test_context_basic() :
'''
>>> test_context_basic()
Initialization.
Context: <<multiagent.Context has_oracle=1>>
Time: 0.0000
Object: <<multiagent.Object name=0>>
Obstacle: <<multiagent.Obstacle name=0>>
'''
print("Initialization.")
oracle = OracleSpace()
context = Context(oracle = oracle, delta = 0.02, objs = [Object(name = "0"), ], obts = [Obstacle(name ="0", a = (0.0, 1.0), b = (1.0, 0.0), radius = 2.0), ])
print("Context: %s" % context.info())
print("Time: %.4f" % context.time)
for name, obj in context.oracle.objs.items() :
print("Object: %s" % obj.info())
for name, obt in context.oracle.obts.items() :
print("Obstacle: %s" % obt.info())
def test_context_add() :
'''
>>> test_context_add()
Initialization.
Context: <<multiagent.Context has_oracle=1>>
Add object: <<multiagent.Object name=0>>
Add object: <<multiagent.Object name=1>>
Object: <<multiagent.Object name=1>>
Object: <<multiagent.Object name=0>>
Add obstacle: <<multiagent.Obstacle name=0>>
Obstacle: <<multiagent.Obstacle name=0>>
'''
print("Initialization.")
context = Context()
print("Context: %s" % context.info())
obj = Object(name = "0")
print("Add object: %s" % obj.info())
context.add_obj(obj)
obj = Object(name = "1")
print("Add object: %s" % obj.info())
context.add_obj(obj)
for name, obj in context.oracle.objs.items() :
print("Object: %s" % obj.info())
obt = Obstacle(name ="0", a = (0.0, 1.0), b = (1.0, 0.0), radius = 2.0)
print("Add obstacle: %s" % obt.info())
context.add_obt(obt)
for name, obt in context.oracle.obts.items() :
print("Obstacle: %s" % obt.info())
def test_context_get() :
'''
>>> test_context_get()
Initialization.
Context: <<multiagent.Context has_oracle=1>>
Get time by 10 steps.
Time: 0.1
Objects within distance 10 from center (0, 0).
Object: <<multiagent.Object name=0>>
Obstacles within distance 10 from center (0, 0).
Obstacle: <<multiagent.Obstacle name=0>>
'''
print("Initialization.")
context = Context(objs = [Object(name = "0"), ], obts = [Obstacle(name ="0", a = (0.0, 1.0), b = (1.0, 0.0), radius = 2.0), ])
print("Context: %s" % context.info())
print("Get time by 10 steps.")
print("Time: %s" % context.get_time_by_steps(steps = 10))
print("Objects within distance 10 from center (0, 0).")
for obj in context.get_objs_at(pos = (0, 0), d = 10) :
print("Object: %s" % obj.info())
print("Obstacles within distance 10 from center (0, 0).")
for obt in context.get_obts_at(pos = (0, 0), d = 10) :
print("Obstacle: %s" % obt.info())
def test_context_para() :
'''
>>> test_context_para()
Initialization.
Context: <<multiagent.Context has_oracle=1>>
Time: 0.0000
Change parameters by directly apply 'context.paras = paras', where 'paras' is a map storing the new values.
Time: 0.5000
'''
print("Initialization.")
context = Context()
print("Context: %s" % context.info())
print("Time: %.4f" % context.time)
print("Change parameters by directly apply 'context.paras = paras', where 'paras' is a map storing the new values.")
paras = {"time" : "0.5"}
context.paras = paras
print("Time: %.4f" % context.time)
if __name__ == '__main__' :
result = doctest.testmod()
print("-" * 50)
print("[Context Test] attempted/failed tests: %d/%d" % (result.attempted, result.failed))
| {
"content_hash": "6995bfa54c6b3056e0b6665141214218",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 161,
"avg_line_length": 36.01980198019802,
"alnum_prop": 0.597306212204508,
"repo_name": "csningli/MultiAgent",
"id": "bcae97221a5461e1dd5e183e7d79e50bf60df016",
"size": "3698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_context.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "142545"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
import sklearn_crfsuite
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics
import json
import codecs
import random
from crf_ner import get_features, get_X_y, train_model
# config
ENCODING = 'utf-8'
FILE_PATH = '../data/ner/train.json'
SEED = 7
SPLIT = 0.20
def load():
json_data = json.load(codecs.open(
FILE_PATH, 'r', 'utf-8'), encoding=ENCODING)
return json_data['sentences']
def shuffle(sentences):
random.seed(SEED)
random.shuffle(sentences)
def split(sentences):
split = int(len(sentences) * SPLIT)
return {'train': sentences[split:], 'test': sentences[:split]}
def evaluate_model(test, crf):
X_test, y_test = get_X_y(test)
labels = list(crf.classes_)
labels.remove('O')
y_pred = crf.predict(X_test)
# F1 score
f1_score = metrics.flat_f1_score(y_test, y_pred,
average='weighted', labels=labels)
print 'F1 score : ', f1_score
print 'NE label wise analysis'
sorted_labels = sorted(
labels,
key=lambda name: (name[1:], name[0])
)
print metrics.flat_classification_report(
y_test, y_pred, labels=sorted_labels, digits=3
)
if __name__ == '__main__':
sentences = load()
ne_count = {}
for sentence in sentences:
for word in sentence:
ne_count[word[len(word) - 1]
] = ne_count.get(word[(len(word) - 1)], 0) + 1
output = ''
# Analysis of the NE types
print 'Analysis of the NE types in the entire Data'
for ne_type in sorted(ne_count, key=ne_count.get, reverse=True):
print ne_type, ne_count[ne_type]
shuffle(sentences)
data = split(sentences)
train, test = data['train'], data['test']
# Analysis of the dataset
print '\n\nTotal # sentences : \t', len(sentences)
print '# sentences in train: \t', len(train)
print '# sentences in test: \t', len(test)
# Sample feature extraction
temp_X, temp_y = get_X_y([train[0]])
print '\n\n-----------\n'
print 'Sample feature extraction'
print temp_X[0][0], temp_y[0][0]
crf = train_model(train)
print '\n\n-----------\n'
evaluate_model(test, crf)
| {
"content_hash": "bfdbfc73b3d0331560ef8a4a9975393a",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 71,
"avg_line_length": 24.274725274725274,
"alnum_prop": 0.6034404708012675,
"repo_name": "lastmansleeping/hindi-toolkit",
"id": "d1cddd791e38b71e65763d576ae6f97cde68f78e",
"size": "2226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hindi_toolkit/ner/evaluate_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29570"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class B0Validator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="b0", parent_name="carpet", **kwargs):
super(B0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "7c7658d3041fd093a9c563b341d49b1f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 34.63636363636363,
"alnum_prop": 0.6062992125984252,
"repo_name": "plotly/plotly.py",
"id": "7f6b8166b206eda3dd7153b2a4ca9d3fff64fee3",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/carpet/_b0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_required
from tekton.gae.middleware.json_middleware import JsonResponse, JsonUnsecureResponse
from book_app import facade
@login_required
def index():
cmd = facade.list_books_cmd()
book_list = cmd()
short_form = facade.book_short_form()
book_short = [short_form.fill_with_model(m) for m in book_list]
return JsonResponse(book_short)
@login_required
def save(_resp,**book_properties):
cmd = facade.save_book_cmd(**book_properties)
return _save_or_update_json_response(_resp,cmd)
def update(_resp,id, **book_properties):
cmd = facade.update_book_cmd(id, **book_properties)
return _save_or_update_json_response(_resp,cmd)
@login_required
def delete(id):
facade.delete_book_cmd(id)()
def _save_or_update_json_response(_resp, cmd):
try:
book = cmd()
except CommandExecutionException:
_resp.status_code = 500
_resp.get('nome')
return JsonResponse(cmd.errors)
short_form = facade.book_short_form()
model = short_form.fill_with_model(book)
return JsonResponse(model)
| {
"content_hash": "5ea0defbe730f1bde3465b0db2972e0c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 29.27906976744186,
"alnum_prop": 0.7148530579825259,
"repo_name": "renzon/fatec-script-2",
"id": "afa193246b933b04289947fb45cb667d774fa17c",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/appengine/routes/books/rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128733"
},
{
"name": "JavaScript",
"bytes": "10573"
},
{
"name": "Python",
"bytes": "78172"
},
{
"name": "Shell",
"bytes": "1771"
}
],
"symlink_target": ""
} |
__revision__ = "src/engine/SCons/Tool/MSCommon/__init__.py 2014/09/27 12:51:43 garyo"
__doc__ = """
Common functions for Microsoft Visual Studio and Visual C/C++.
"""
import copy
import os
import re
import subprocess
import SCons.Errors
import SCons.Platform.win32
import SCons.Util
from SCons.Tool.MSCommon.sdk import mssdk_exists, \
mssdk_setup_env
from SCons.Tool.MSCommon.vc import msvc_exists, \
msvc_setup_env, \
msvc_setup_env_once
from SCons.Tool.MSCommon.vs import get_default_version, \
get_vs_by_version, \
merge_default_version, \
msvs_exists, \
query_versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "d6a9ed8a12da0830fdbb7ebd8c3dbf3d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 86,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.5454545454545454,
"repo_name": "smandy/locke",
"id": "7c05452bf84fc61aaed005fc820609745299cb68",
"size": "2048",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "scons-local-2.3.4/SCons/Tool/MSCommon/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "165"
},
{
"name": "D",
"bytes": "29663"
},
{
"name": "Python",
"bytes": "1921409"
}
],
"symlink_target": ""
} |
from GenSched import Schedule, Theater_Room
class SpecGenreSchedule(Schedule):
def __init__(self):
#Doesn't need to do anything in here. Type in anything to make sure indentation doesn't messes up.
1
def schedule(self, name, genre):
for i in range(0, len(self.theaters)):
if(self.theaters[i].get_genre() == genre):
for j in range(0, 24):
if(self.theaters[i].fill_in_time_slot(name, j) != False):
return "Scheduled " + name + " at " + str(j) + " for movie: " + self.theaters[i].get_name() + " using SpecGenreSchedule of type: " + self.theaters[i].get_genre()
return "Could not schedule " + name + " at all" + " for a movie of type: " + not_genre + " using SpecGenreSchedule"
#Test cases
if __name__ == "__main__":
s = Schedule([("The Little Mermaid", "Family"), ("Saw 3", "Horror"), ("Kung Fu Panda", "Family")])
g = SpecGenreSchedule()
for i in range(0, 25):
print(g.schedule("Bobby", "Family"))
print(g.schedule("Davey", "Family"))
print(g.schedule("Davey", "Family"))
print(g.schedule("Davey", "Horror"))
s.print_theaters()
| {
"content_hash": "ce1c350d559e430312eacb0cd7116285",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 167,
"avg_line_length": 42.68,
"alnum_prop": 0.6457357075913777,
"repo_name": "askii93/Strategy-Movies",
"id": "3b4cdd65d0894ccfb974853b61bdc8540821d3bd",
"size": "1067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SpecGenreSched.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8560"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from nose.plugins.attrib import attr
import hgvs.variant
@attr(tags=["quick","models"])
class Test_SequenceVariant(unittest.TestCase):
def test_SequenceVariant(self):
var = hgvs.variant.SequenceVariant(ac='AC',type='B',posedit='1234DE>FG')
self.assertEqual( str(var) , 'AC:B.1234DE>FG' )
if __name__ == '__main__':
unittest.main()
## <LICENSE>
## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs)
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## </LICENSE>
| {
"content_hash": "9ce0ba90be3280b5ab4a3d5c98870874",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 33.90909090909091,
"alnum_prop": 0.7184986595174263,
"repo_name": "jmuhlich/hgvs",
"id": "3852a2d2b906268cb8698d70e089d5d5f92cbe61",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_hgvs_variant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6322"
},
{
"name": "Perl",
"bytes": "2833"
},
{
"name": "Python",
"bytes": "281828"
},
{
"name": "Shell",
"bytes": "3269"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('servisformu', '0004_auto_20160114_1526'),
]
operations = [
migrations.AddField(
model_name='urunler',
name='TeslimatTarihi',
field=models.DateField(blank=True, default=django.utils.timezone.now, verbose_name='Teslimat Tarihi'),
),
migrations.AlterField(
model_name='musteriler',
name='Kodu',
field=models.CharField(default='98930143', max_length=8, verbose_name='M\xfc\u015fteri Kodu'),
),
migrations.AlterField(
model_name='servisform',
name='FormNo',
field=models.CharField(default='21352374', max_length=8, verbose_name='Form No'),
),
]
| {
"content_hash": "3cf8b5c42fcef6890cedc5ebc2bdb717",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 30.79310344827586,
"alnum_prop": 0.606942889137738,
"repo_name": "muslu/django-teknikservis-py3",
"id": "bb7f1c5bb18c433d23506ab42f64ee8c2da3904f",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servisformu/migrations/0005_auto_20160115_1151.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49828"
},
{
"name": "HTML",
"bytes": "52141"
},
{
"name": "JavaScript",
"bytes": "101554"
},
{
"name": "Python",
"bytes": "30730"
}
],
"symlink_target": ""
} |
from netmiko.huawei.huawei_ssh import HuaweiSSH
__all__ = ['HuaweiSSH']
| {
"content_hash": "683c0b54d8cc67c2e8fa5693352f033f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 47,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.726027397260274,
"repo_name": "sbyount/my_python",
"id": "af11986699f49f9b316659baf37e3b77ce29c07b",
"size": "73",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "netmiko-master/netmiko/huawei/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "198854"
},
{
"name": "Shell",
"bytes": "5383"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from typing import (cast, Any, Callable, Dict, Generator, Iterable, Iterator, List, Mapping,
Optional, Sized, Tuple, Union)
from django.core.urlresolvers import LocaleRegexURLResolver
from django.conf import settings
from django.test import TestCase
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.http import HttpResponse
from django.db.utils import IntegrityError
from django.utils.translation import ugettext as _
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.str_utils import force_text
from zerver.lib import cache
from zerver.tornado import event_queue
from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, bulk_add_subscriptions,
get_display_recipient, bulk_remove_subscriptions
)
from zerver.models import (
get_realm,
get_stream,
get_user_profile_by_email,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
import collections
import base64
import mock
import os
import re
import sys
import time
import ujson
import unittest
from six.moves import urllib
from six import text_type, binary_type
from zerver.lib.str_utils import NonBinaryStr
from contextlib import contextmanager
import six
@contextmanager
def simulated_queue_client(client):
# type: (type) -> Iterator[None]
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client # type: ignore # https://github.com/JukkaL/mypy/issues/1152
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient # type: ignore # https://github.com/JukkaL/mypy/issues/1152
@contextmanager
def tornado_redirected_to_list(lst):
# type: (List[Mapping[str, Any]]) -> Iterator[None]
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
# type: () -> Generator[List[Tuple[str, Union[text_type, List[text_type]], text_type]], None, None]
cache_queries = [] # type: List[Tuple[str, Union[text_type, List[text_type]], text_type]]
def my_cache_get(key, cache_name=None):
# type: (text_type, Optional[str]) -> Any
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
# type: (List[text_type], Optional[str]) -> Dict[text_type, Any]
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured(include_savepoints=False):
# type: (Optional[bool]) -> Generator[List[Dict[str, Union[str, binary_type]]], None, None]
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = [] # type: List[Dict[str, Union[str, binary_type]]]
def wrapper_execute(self, action, sql, params=()):
# type: (TimeTrackingCursor, Callable, NonBinaryStr, Iterable[Any]) -> None
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or ('SAVEPOINT' not in sql):
queries.append({
'sql': self.mogrify(sql, params).decode('utf-8'),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.execute = cursor_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def cursor_executemany(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = cursor_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
yield queries
TimeTrackingCursor.execute = old_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = old_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def make_client(name):
# type: (str) -> Client
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address):
# type: (text_type) -> text_type
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
# type: (Dict[str, Any]) -> Set[int]
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
# type: (UserProfile) -> int
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
# type: (UserProfile) -> UserMessage
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
# type: (UserProfile) -> Message
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
# type: (UserProfile) -> List[Message]
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyHandler(object):
def __init__(self):
# type: () -> None
allocate_handler_id(self) # type: ignore # this is a testing mock
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile):
# type: (Dict[str, Any], UserProfile) -> None
self.GET = {} # type: Dict[str, Any]
self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler()
self._log_data = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
class HostRequestMock(object):
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(self, host=settings.EXTERNAL_HOST):
# type: (text_type) -> None
self.host = host
def get_host(self):
# type: () -> text_type
return self.host
INSTRUMENTING = os.environ.get('TEST_INSTRUMENT_URL_COVERAGE', '') == 'TRUE'
INSTRUMENTED_CALLS = [] # type: List[Dict[str, Any]]
UrlFuncT = Callable[..., HttpResponse] # TODO: make more specific
def instrument_url(f):
# type: (UrlFuncT) -> UrlFuncT
if not INSTRUMENTING:
return f
else:
def wrapper(self, url, info={}, **kwargs):
# type: (Any, text_type, Dict[str, Any], **Any) -> HttpResponse
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if '?' in url:
url, extra_info = url.split('?', 1)
else:
extra_info = ''
INSTRUMENTED_CALLS.append(dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs))
return result
return wrapper
def write_instrumentation_reports(full_suite):
# type: (bool) -> None
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
from zproject.urls import urlpatterns, v1_api_and_json_patterns
# Find our untested urls.
pattern_cnt = collections.defaultdict(int) # type: Dict[str, int]
def re_strip(r):
# type: (Any) -> str
return str(r).lstrip('^').rstrip('$')
def find_patterns(patterns, prefixes):
# type: (List[Any], List[str]) -> None
for pattern in patterns:
find_pattern(pattern, prefixes)
def cleanup_url(url):
# type: (str) -> str
if url.startswith('/'):
url = url[1:]
if url.startswith('http://testserver/'):
url = url[len('http://testserver/'):]
if url.startswith('http://zulip.testserver/'):
url = url[len('http://zulip.testserver/'):]
if url.startswith('http://testserver:9080/'):
url = url[len('http://testserver:9080/'):]
return url
def find_pattern(pattern, prefixes):
# type: (Any, List[str]) -> None
if isinstance(pattern, type(LocaleRegexURLResolver)):
return
if hasattr(pattern, 'url_patterns'):
return
canon_pattern = prefixes[0] + re_strip(pattern.regex.pattern)
cnt = 0
for call in calls:
if 'pattern' in call:
continue
url = cleanup_url(call['url'])
for prefix in prefixes:
if url.startswith(prefix):
match_url = url[len(prefix):]
if pattern.regex.match(match_url):
if call['status_code'] in [200, 204, 301, 302]:
cnt += 1
call['pattern'] = canon_pattern
pattern_cnt[canon_pattern] += cnt
find_patterns(urlpatterns, ['', 'en/', 'de/'])
find_patterns(v1_api_and_json_patterns, ['api/v1/', 'json/'])
assert len(pattern_cnt) > 100
untested_patterns = set([p for p in pattern_cnt if pattern_cnt[p] == 0])
# We exempt some patterns that are called via Tornado.
exempt_patterns = set([
'api/v1/events',
'api/v1/register',
])
untested_patterns -= exempt_patterns
var_dir = 'var' # TODO make sure path is robust here
fn = os.path.join(var_dir, 'url_coverage.txt')
with open(fn, 'w') as f:
for call in calls:
try:
line = ujson.dumps(call)
f.write(line + '\n')
except OverflowError:
print('''
A JSON overflow error was encountered while
producing the URL coverage report. Sometimes
this indicates that a test is passing objects
into methods like client_post(), which is
unnecessary and leads to false positives.
''')
print(call)
if full_suite:
print('INFO: URL coverage report is in %s' % (fn,))
print('INFO: Try running: ./tools/create-test-api-docs')
if full_suite and len(untested_patterns):
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
print(" %s" % (untested_pattern,))
sys.exit(1)
def get_all_templates():
# type: () -> List[str]
templates = []
relpath = os.path.relpath
isfile = os.path.isfile
path_exists = os.path.exists
def is_valid_template(p, n):
# type: (text_type, text_type) -> bool
return (not n.startswith('.') and
not n.startswith('__init__') and
not n.endswith(".md") and
isfile(p))
def process(template_dir, dirname, fnames):
# type: (str, str, Iterable[str]) -> None
for name in fnames:
path = os.path.join(dirname, name)
if is_valid_template(path, name):
templates.append(relpath(path, template_dir))
for engine in loader.engines.all():
template_dirs = [d for d in engine.template_dirs if path_exists(d)]
for template_dir in template_dirs:
template_dir = os.path.normpath(template_dir)
for dirpath, dirnames, fnames in os.walk(template_dir):
process(template_dir, dirpath, fnames)
return templates
| {
"content_hash": "4970c5e636bd50727e2ba8f035f9a92b",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 154,
"avg_line_length": 35.39841688654354,
"alnum_prop": 0.6066636851520573,
"repo_name": "Jianchun1/zulip",
"id": "d811bd3714db277911a36b425395766156e5fb9b",
"size": "13416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/lib/test_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "242541"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "457584"
},
{
"name": "JavaScript",
"bytes": "1470909"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "82466"
},
{
"name": "Python",
"bytes": "2987663"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "37195"
}
],
"symlink_target": ""
} |
import random
from enum import Enum
X, O = TOKENS = 'XO'
WIN_MASKS = [
0b100100100,
0b10010010,
0b1001001,
0b111000000,
0b111000,
0b111,
0b100010001,
0b1010100,
]
SIZE = 9
EMPTY = 2 ** SIZE - 1
FULL = 0
Events = Enum('Events', 'Start Move Win Draw')
class Tree:
pass
class State:
def __init__(self, board=0, space=EMPTY):
self.board = board
self.space = space
def next(self, move):
i = 2**(move-1)
# space = self.space ^ i
# board = self.board
# if self.token is O:
# board |= i
space = self.space ^ i
board = ((self.board | i) ^ EMPTY) ^ space
return State(board, space)
@property
def id(self):
raise NotImplemented
@property
def token(self):
return TOKENS[1-len(self.moves)%2]
@property
def finished(self):
return self.winner or self.space is FULL
@property
def winner(self):
# sides = {
# X: (self.board ^ EMPTY) ^ self.space,
# O: self.board
# }
# for token, board in sides.items():
for mask in WIN_MASKS:
# if (board & mask) == mask:
if (self.board & mask) == mask:
return token
@property
def moves(self):
return [i + 1 for i in range(SIZE) if self.space & 2**i]
@property
def turn(self):
return SIZE-len(self.moves)
def __repr__(self):
points = []
for i in range(SIZE):
point = str(i + 1)
if not self.space & 2 ** i:
point = TOKENS[bool(self.board & 2 ** i)]
points.append(' ' + point)
if i % 3 is 2:
points.append("\n")
return "".join(points)
class Game:
def __init__(self, players, state=None, ui=None):
self.players = {X: players[0](self), O: players[1](self)}
self.state = state or State()
self.ui = ui or Console()
def run(self):
self.ui.update(Events.Start, self.players)
while not self.state.finished:
token = self.state.token
print(token, bin(self.state.space), bin(self.state.board))
player = self.players[token]
move = player.play()
self.state = self.state.next(move)
turn = self.state.turn
self.ui.update(Events.Move, locals())
if self.state.winner is None:
self.ui.update(Events.Draw)
else:
token = self.state.winner
player = self.players[token]
self.ui.update(Events.Win, locals())
class Console:
Templates = {
Events.Start: 'Game started: {X} "X" vs {O} "O"',
Events.Move: '{player} "{token}" chooses {move} [turn {turn}]',
Events.Win: '{player} "{token}" wins',
Events.Draw: 'draw'
}
def update(self, event, data={}):
print(self.Templates[event].format(**data))
def input_move(self, state):
move, moves = None, state.moves
while move not in moves:
move = int(input("Enter a number {}: ".format(moves)))
return move
def show_state(self, state):
print(state)
class Player:
def __init__(self, game):
self.game = game
def play(self):
raise NotImplemented
def __repr__(self):
return self.__class__.__name__
class Human(Player):
def play(self):
ui, state = self.game.ui, self.game.state
ui.show_state(state)
return ui.input_move(state)
class Random(Player):
def play(self):
return random.choice(self.game.state.moves)
g = Game([Random, Human])
g.run()
# g = State(0b110101100, 0)
# print(g)
# print(g.winner)
#
# print()
#
# g = State(0b010101001, 0)
# print(g)
# print(g.winner)
#
# print()
#
# g = State(0b010101001, 0b1)
# print(g)
# print(g.winner)
| {
"content_hash": "a72167ab9d79556721324626658453fb",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 69,
"avg_line_length": 19.810055865921786,
"alnum_prop": 0.5910885504794134,
"repo_name": "jorgebg/tictactoe",
"id": "b6eb8257e16dfa67cbe9d9a9a9d9a3d924007d68",
"size": "3547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23519"
}
],
"symlink_target": ""
} |
import datetime
from operator import attrgetter
from .models import (
Country, Person, Group, Membership, Friendship, Article,
ArticleTranslation, ArticleTag, ArticleIdea, NewsArticle)
from django.test import TestCase
from django.utils.translation import activate
from django.core.exceptions import FieldError
from django import forms
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
self.assertRaises(Person.DoesNotExist, getattr, membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
self.assertQuerysetEqual(
self.bob.membership_set.all(), [
self.cia.id
],
attrgetter("group_id")
)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
normal_membership_sets = [list(p.membership_set.all())
for p in Person.objects.order_by('pk')]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_foward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
def test_translations(self):
activate('fi')
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertTrue(fetched.active_translation.title == 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
activate('en')
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(ideas__name="idea1")
def test_inheritance(self):
activate("fi")
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertQuerysetEqual(
NewsArticle.objects.select_related('active_translation'),
[na], lambda x: x
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
class FormsTests(TestCase):
# ForeignObjects should not have any form fields, currently the user needs
# to manually deal with the foreignobject relation.
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
def test_foreign_object_form(self):
# A very crude test checking that the non-concrete fields do not get form fields.
form = FormsTests.ArticleForm()
self.assertIn('id_pub_date', form.as_table())
self.assertNotIn('active_translation', form.as_table())
form = FormsTests.ArticleForm(data={'pub_date': str(datetime.date.today())})
self.assertTrue(form.is_valid())
a = form.save()
self.assertEqual(a.pub_date, datetime.date.today())
form = FormsTests.ArticleForm(instance=a, data={'pub_date': '2013-01-01'})
a2 = form.save()
self.assertEqual(a.pk, a2.pk)
self.assertEqual(a2.pub_date, datetime.date(2013, 1, 1))
| {
"content_hash": "13a6b91b0fa9e2d3a1bee399ee117ff0",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 100,
"avg_line_length": 43.25454545454546,
"alnum_prop": 0.6324385996517145,
"repo_name": "alu0100207385/dsi_3Django",
"id": "cd81cc68a24a940125dd662b55c648425c2dff08",
"size": "16653",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/foreign_object/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "92853"
},
{
"name": "JavaScript",
"bytes": "195552"
},
{
"name": "Python",
"bytes": "13033122"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
import inspect
from util.log import *
class Callback(object):
def __init__(self):
if not callable(getattr(self, "callback", None)):
ERROR("callback method not found.")
return
self.callback_param_names = inspect.getargspec(self.callback)[0]
DEBUG(self.callback_param_names)
if "self" in self.callback_param_names:
self.callback_param_names.remove("self")
if callable(getattr(self, "canceled", None)):
self.canceled_param_names = inspect.getargspec(
self.canceled
)[0]
DEBUG(self.canceled_param_names)
if "self" in self.canceled_param_names:
self.canceled_param_names.remove("self")
def initialize(self, **kwargs):
for k in kwargs:
setattr(self, k, kwargs[k])
if callable(getattr(self, "init", None)):
self.init()
def internal_callback(self, **kwargs):
call_dict = {}
for key in self.callback_param_names:
if key in kwargs:
call_dict[key] = kwargs[key]
else:
call_dict[key] = None
# DEBUG("callback: %s" % (kwargs, ))
return self.callback(**call_dict)
def internal_canceled(self, **kwargs):
if not callable(getattr(self, "canceled", None)):
return
call_dict = {}
for key in self.canceled_param_names:
if key in kwargs:
call_dict[key] = kwargs[key]
else:
call_dict[key] = None
# DEBUG("canceled: %s" % (kwargs, ))
return self.canceled(**call_dict)
| {
"content_hash": "77b266a2bc6a9b785015cb659716408e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 73,
"avg_line_length": 35.04,
"alnum_prop": 0.5176940639269406,
"repo_name": "fangjing828/LEHome",
"id": "64b600a29244ce9264974243ce175b71617f1c85",
"size": "2394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/model/Callback.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "307005"
},
{
"name": "Shell",
"bytes": "4154"
}
],
"symlink_target": ""
} |
"""Django middleware for NDB."""
__author__ = 'James A. Morrison'
from . import eventloop, tasklets
class NdbDjangoMiddleware(object):
"""Django middleware for NDB.
To use NDB with django, add
'ndb.NdbDjangoMiddleware',
to the MIDDLEWARE_CLASSES entry in your Django settings.py file.
Or, if you are using the ndb version from the SDK, use
'google.appengine.ext.ndb.NdbDjangoMiddleware',
It's best to insert it in front of any other middleware classes,
since some other middleware may make datastore calls and those won't be
handled properly if that middleware is invoked before this middleware.
See http://docs.djangoproject.com/en/dev/topics/http/middleware/.
"""
def process_request(self, unused_request):
"""Called by Django before deciding which view to execute."""
# Compare to the first half of toplevel() in context.py.
tasklets._state.clear_all_pending()
# Create and install a new context.
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
@staticmethod
def _finish():
# Compare to the finally clause in toplevel() in context.py.
ctx = tasklets.get_context()
tasklets.set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
def process_response(self, request, response):
"""Called by Django just before returning a response."""
self._finish()
return response
def process_exception(self, unused_request, unused_exception):
"""Called by Django when a view raises an exception."""
self._finish()
return None
| {
"content_hash": "c55fb4a8ea7d9e062c376b1c5a83ccc6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 73,
"avg_line_length": 31.137254901960784,
"alnum_prop": 0.7103274559193955,
"repo_name": "GoogleCloudPlatform/python-compat-runtime",
"id": "fce0682f3626843d25ce04b73363d4ab673b8206",
"size": "2192",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "appengine-compat/exported_appengine_sdk/google/appengine/ext/ndb/django_middleware.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30211"
},
{
"name": "HTML",
"bytes": "171272"
},
{
"name": "JavaScript",
"bytes": "414229"
},
{
"name": "Makefile",
"bytes": "2138"
},
{
"name": "PHP",
"bytes": "3132250"
},
{
"name": "Python",
"bytes": "11709249"
},
{
"name": "Shell",
"bytes": "1787"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
class EditForm(Form):
nickname = StringField('nickname', validators=[DataRequired()])
# about_me = TextAreaField('about_me', validators=[Length(min=0, max=140)] | {
"content_hash": "be97f20bfa899cb935d078b911272fa6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 41.36363636363637,
"alnum_prop": 0.7670329670329671,
"repo_name": "kweztah/com.sociavist",
"id": "7130153fb6b36722ef722308c7e948402a995ca9",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12171"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.tax_list, name='taxes'),
url(r'^(?P<country_code>[A-Z]{2})/details/$', views.tax_details,
name='tax-details'),
url(r'^configure-taxes/$', views.configure_taxes,
name='configure-taxes'),
url(r'^fetch-tax-rates/$', views.fetch_tax_rates,
name='fetch-tax-rates')]
| {
"content_hash": "9e17a334fa6e1ec4a8cec06002b92cfa",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 32.416666666666664,
"alnum_prop": 0.6169665809768637,
"repo_name": "UITools/saleor",
"id": "1d22e4e2fc6f3475b918fbdd48b5e30836b0c31b",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/dashboard/taxes/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
} |
__author__ = 'sshnaidm'
import sys
import xml.etree.ElementTree as Et
with open(sys.argv[1]) as f:
xml = f.read()
exml = Et.fromstring(xml)
tmp_fails = [i.attrib["classname"] + "." + i.attrib["name"]
for i in exml.getchildren()
for z in i.getchildren()
if i.getchildren()
if "failure" in z.tag]
fails = [i for i in tmp_fails if "process-returncode" not in i]
if fails:
print("\n".join(fails))
else:
sys.exit() | {
"content_hash": "5c98e2e40ba7f48d8af9418504956742",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 23.05,
"alnum_prop": 0.6052060737527115,
"repo_name": "sshnaidm/openstack-sqe",
"id": "bd9d01fa9bd064ef151bb174ad76b734afcf4e41",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/tempest-scripts/extract_failures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "12580"
},
{
"name": "Python",
"bytes": "157602"
},
{
"name": "Shell",
"bytes": "4668"
}
],
"symlink_target": ""
} |
import urlparse
import urllib
import cgi
import hashlib
from six import moves
from w3lib.util import unicode_to_str
import tldextract
# Python 2.x urllib.always_safe become private in Python 3.x;
# its content is copied here
_ALWAYS_SAFE_BYTES = (b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789' b'_.-')
_reserved = b';/?:@&=+$|,#' # RFC 3986 (Generic Syntax)
_unreserved_marks = b"-_.!~*'()" # RFC 3986 sec 2.3
_safe_chars = _ALWAYS_SAFE_BYTES + b'%' + _reserved + _unreserved_marks
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
return url if isinstance(url, urlparse.ParseResult) else \
urlparse.urlparse(unicode_to_str(url, encoding))
def parse_domain_from_url(url):
"""
Extract domain info from a passed url.
Examples:
-------------------------------------------------------------------------------------------------------
URL NETLOC NAME SCHEME SLD TLD SUBDOMAIN
-------------------------------------------------------------------------------------------------------
http://www.google.com www.google.com google.com http google com www
http://docs.google.com docs.google.com google.com http google com docs
https://google.es/mail google.es google.es https google es
-------------------------------------------------------------------------------------------------------
"""
extracted = tldextract.extract(url)
scheme, _, _, _, _, _ = parse_url(url)
sld = extracted.domain
tld = extracted.suffix
subdomain = extracted.subdomain
name = '.'.join([sld, tld]) if tld else sld
netloc = '.'.join([subdomain, name]) if subdomain else name
return netloc, name, scheme, sld, tld, subdomain
def parse_domain_from_url_fast(url):
"""
Extract domain info from a passed url, without analyzing subdomains and tld
"""
result = parse_url(url)
return result.netloc, result.hostname, result.scheme, "", "", ""
def safe_url_string(url, encoding='utf8'):
"""Convert the given url into a legal URL by escaping unsafe characters
according to RFC-3986.
If a unicode url is given, it is first converted to str using the given
encoding (which defaults to 'utf-8'). When passing a encoding, you should
use the encoding of the original page (the page from which the url was
extracted from).
Calling this function on an already "safe" url will return the url
unmodified.
Always returns a str.
"""
s = unicode_to_str(url, encoding)
return moves.urllib.parse.quote(s, _safe_chars)
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
return urllib.unquote(path)
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False):
"""Canonicalize the given url by applying the following procedures:
- sort query arguments, first by key, then by value
- percent encode paths and query arguments. non-ASCII characters are
percent-encoded using UTF-8 (RFC-3986)
- normalize all spaces (in query arguments) '+' (plus symbol)
- normalize percent encodings case (%2f -> %2F)
- remove query arguments with blank values (unless keep_blank_values is True)
- remove fragments (unless keep_fragments is True)
The url passed can be a str or unicode, while the url returned is always a
str.
For examples see the tests in scrapy.tests.test_utils_url
"""
scheme, netloc, path, params, query, fragment = parse_url(url)
keyvals = cgi.parse_qsl(query, keep_blank_values)
keyvals.sort()
query = urllib.urlencode(keyvals)
path = safe_url_string(_unquotepath(path)) or '/'
fragment = '' if not keep_fragments else fragment
return urlparse.urlunparse((scheme, netloc.lower(), path, params, query, fragment))
| {
"content_hash": "ece4320b108ae3f8ae8f87bc6801020b",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 107,
"avg_line_length": 37.153153153153156,
"alnum_prop": 0.6006304558680893,
"repo_name": "rahulsharma1991/frontera",
"id": "8a990d75a6d07147559e97aa1b45a44e3b60b4fa",
"size": "4124",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "frontera/utils/url.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "210178"
}
],
"symlink_target": ""
} |
import itertools
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import netutils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import resource
from heat.engine.resources.openstack.neutron import port as neutron_port
LOG = logging.getLogger(__name__)
class ServerNetworkMixin(object):
def _validate_network(self, network):
net_id = network.get(self.NETWORK_ID)
port = network.get(self.NETWORK_PORT)
subnet = network.get(self.NETWORK_SUBNET)
fixed_ip = network.get(self.NETWORK_FIXED_IP)
if net_id is None and port is None and subnet is None:
msg = _('One of the properties "%(id)s", "%(port_id)s" '
'or "%(subnet)s" should be set for the '
'specified network of server "%(server)s".'
'') % dict(id=self.NETWORK_ID,
port_id=self.NETWORK_PORT,
subnet=self.NETWORK_SUBNET,
server=self.name)
raise exception.StackValidationFailed(message=msg)
if port and not self.is_using_neutron():
msg = _('Property "%s" is supported only for '
'Neutron.') % self.NETWORK_PORT
raise exception.StackValidationFailed(message=msg)
# Nova doesn't allow specify ip and port at the same time
if fixed_ip and port:
raise exception.ResourcePropertyConflict(
"/".join([self.NETWORKS, self.NETWORK_FIXED_IP]),
"/".join([self.NETWORKS, self.NETWORK_PORT]))
def _validate_belonging_subnet_to_net(self, network):
if network.get(self.NETWORK_PORT) is None and self.is_using_neutron():
net = self._get_network_id(network)
# check if there are subnet and network both specified that
# subnet belongs to specified network
subnet = network.get(self.NETWORK_SUBNET)
if (subnet is not None and net is not None):
subnet_net = self.client_plugin(
'neutron').network_id_from_subnet_id(
self._get_subnet_id(subnet))
if subnet_net != net:
msg = _('Specified subnet %(subnet)s does not belongs to '
'network %(network)s.') % {
'subnet': subnet,
'network': net}
raise exception.StackValidationFailed(message=msg)
def _create_internal_port(self, net_data, net_number):
name = _('%(server)s-port-%(number)s') % {'server': self.name,
'number': net_number}
kwargs = self._prepare_internal_port_kwargs(net_data)
kwargs['name'] = name
port = self.client('neutron').create_port({'port': kwargs})['port']
# Store ids (used for floating_ip association, updating, etc.)
# in resource's data.
self._data_update_ports(port['id'], 'add')
return port['id']
def _prepare_internal_port_kwargs(self, net_data):
kwargs = {'network_id': self._get_network_id(net_data)}
fixed_ip = net_data.get(self.NETWORK_FIXED_IP)
subnet = net_data.get(self.NETWORK_SUBNET)
body = {}
if fixed_ip:
body['ip_address'] = fixed_ip
if subnet:
body['subnet_id'] = self._get_subnet_id(subnet)
# we should add fixed_ips only if subnet or ip were provided
if body:
kwargs.update({'fixed_ips': [body]})
if net_data.get(self.SECURITY_GROUPS):
sec_uuids = self.client_plugin(
'neutron').get_secgroup_uuids(net_data.get(
self.SECURITY_GROUPS))
kwargs['security_groups'] = sec_uuids
extra_props = net_data.get(self.NETWORK_PORT_EXTRA)
if extra_props is not None:
port_extra_keys = list(neutron_port.Port.EXTRA_PROPERTIES)
port_extra_keys.remove(neutron_port.Port.ALLOWED_ADDRESS_PAIRS)
for key in port_extra_keys:
if extra_props.get(key) is not None:
kwargs[key] = extra_props.get(key)
allowed_address_pairs = extra_props.get(
neutron_port.Port.ALLOWED_ADDRESS_PAIRS)
if allowed_address_pairs is not None:
for pair in allowed_address_pairs:
if (neutron_port.Port.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS
in pair and pair.get(
neutron_port.Port.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS)
is None):
del pair[
neutron_port.Port.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS]
port_address_pairs = neutron_port.Port.ALLOWED_ADDRESS_PAIRS
kwargs[port_address_pairs] = allowed_address_pairs
return kwargs
def _delete_internal_port(self, port_id):
"""Delete physical port by id."""
with self.client_plugin('neutron').ignore_not_found:
self.client('neutron').delete_port(port_id)
self._data_update_ports(port_id, 'delete')
def _delete_internal_ports(self):
for port_data in self._data_get_ports():
self._delete_internal_port(port_data['id'])
self.data_delete('internal_ports')
def _data_update_ports(self, port_id, action, port_type='internal_ports'):
data = self._data_get_ports(port_type)
if action == 'add':
data.append({'id': port_id})
elif action == 'delete':
for port in data:
if port_id == port['id']:
data.remove(port)
break
self.data_set(port_type, jsonutils.dumps(data))
def _data_get_ports(self, port_type='internal_ports'):
data = self.data().get(port_type)
return jsonutils.loads(data) if data else []
def store_external_ports(self):
"""Store in resource's data IDs of ports created by nova for server.
If no port property is specified and no internal port has been created,
nova client takes no port-id and calls port creating into server
creating. We need to store information about that ports, so store
their IDs to data with key `external_ports`.
"""
if not self.is_using_neutron():
return
# check if os-attach-interfaces extension is available on this cloud.
# If it's not, then novaclient's interface_list method cannot be used
# to get the list of interfaces.
if not self.client_plugin().has_extension('os-attach-interfaces'):
return
server = self.client().servers.get(self.resource_id)
ifaces = server.interface_list()
external_port_ids = set(iface.port_id for iface in ifaces)
# need to make sure external_ports data doesn't store ids of non-exist
# ports. Delete such port_id if it's needed.
data_external_port_ids = set(
port['id'] for port in self._data_get_ports('external_ports'))
for port_id in data_external_port_ids - external_port_ids:
self._data_update_ports(port_id, 'delete',
port_type='external_ports')
internal_port_ids = set(port['id'] for port in self._data_get_ports())
# add ids of new external ports which not contains in external_ports
# data yet. Also, exclude ids of internal ports.
new_ports = ((external_port_ids - internal_port_ids) -
data_external_port_ids)
for port_id in new_ports:
self._data_update_ports(port_id, 'add', port_type='external_ports')
def _build_nics(self, networks):
if not networks:
return None
nics = []
for idx, net in enumerate(networks):
self._validate_belonging_subnet_to_net(net)
nic_info = {'net-id': self._get_network_id(net)}
if net.get(self.NETWORK_PORT):
nic_info['port-id'] = net[self.NETWORK_PORT]
elif self.is_using_neutron() and net.get(self.NETWORK_SUBNET):
nic_info['port-id'] = self._create_internal_port(net, idx)
# if nic_info including 'port-id', do not set ip for nic
if not nic_info.get('port-id'):
if net.get(self.NETWORK_FIXED_IP):
ip = net[self.NETWORK_FIXED_IP]
if netutils.is_valid_ipv6(ip):
nic_info['v6-fixed-ip'] = ip
else:
nic_info['v4-fixed-ip'] = ip
if net.get(self.NETWORK_FLOATING_IP) and nic_info.get('port-id'):
floating_ip_data = {'port_id': nic_info['port-id']}
if net.get(self.NETWORK_FIXED_IP):
floating_ip_data.update(
{'fixed_ip_address':
net.get(self.NETWORK_FIXED_IP)})
self._floating_ip_neutron_associate(
net.get(self.NETWORK_FLOATING_IP), floating_ip_data)
nics.append(nic_info)
return nics
def _floating_ip_neutron_associate(self, floating_ip, floating_ip_data):
if self.is_using_neutron():
self.client('neutron').update_floatingip(
floating_ip, {'floatingip': floating_ip_data})
def _floating_ip_nova_associate(self, floating_ip):
fl_ip = self.client().floating_ips.get(floating_ip)
if fl_ip and self.resource_id:
self.client().servers.add_floating_ip(self.resource_id, fl_ip.ip)
def _floating_ips_disassociate(self):
networks = self.properties[self.NETWORKS] or []
for network in networks:
floating_ip = network.get(self.NETWORK_FLOATING_IP)
if floating_ip is not None:
self._floating_ip_disassociate(floating_ip)
def _floating_ip_disassociate(self, floating_ip):
if self.is_using_neutron():
with self.client_plugin('neutron').ignore_not_found:
self.client('neutron').update_floatingip(
floating_ip, {'floatingip': {'port_id': None}})
else:
with self.client_plugin().ignore_conflict_and_not_found:
fl_ip = self.client().floating_ips.get(floating_ip)
self.client().servers.remove_floating_ip(self.resource_id,
fl_ip.ip)
def _exclude_not_updated_networks(self, old_nets, new_nets):
# make networks similar by adding None vlues for not used keys
for key in self._NETWORK_KEYS:
# if _net.get(key) is '', convert to None
for _net in itertools.chain(new_nets, old_nets):
_net[key] = _net.get(key) or None
# find matches and remove them from old and new networks
not_updated_nets = [net for net in old_nets if net in new_nets]
for net in not_updated_nets:
old_nets.remove(net)
new_nets.remove(net)
return not_updated_nets
def _get_network_id(self, net):
net_id = net.get(self.NETWORK_ID) or None
subnet = net.get(self.NETWORK_SUBNET) or None
if net_id:
if self.is_using_neutron():
net_id = self.client_plugin(
'neutron').find_resourceid_by_name_or_id('network',
net_id)
else:
net_id = self.client_plugin(
'nova').get_nova_network_id(net_id)
elif subnet:
net_id = self.client_plugin('neutron').network_id_from_subnet_id(
self._get_subnet_id(subnet))
return net_id
def _get_subnet_id(self, subnet):
return self.client_plugin('neutron').find_resourceid_by_name_or_id(
'subnet', subnet)
def update_networks_matching_iface_port(self, nets, interfaces):
def find_equal(port, net_id, ip, nets):
for net in nets:
if (net.get('port') == port or
(net.get('fixed_ip') == ip and
self._get_network_id(net) == net_id)):
return net
def find_poor_net(net_id, nets):
for net in nets:
if (not net.get('port') and not net.get('fixed_ip') and
self._get_network_id(net) == net_id):
return net
for iface in interfaces:
# get interface properties
props = {'port': iface.port_id,
'net_id': iface.net_id,
'ip': iface.fixed_ips[0]['ip_address'],
'nets': nets}
# try to match by port or network_id with fixed_ip
net = find_equal(**props)
if net is not None:
net['port'] = props['port']
continue
# find poor net that has only network_id
net = find_poor_net(props['net_id'], nets)
if net is not None:
net['port'] = props['port']
def calculate_networks(self, old_nets, new_nets, ifaces):
remove_ports = []
add_nets = []
attach_first_free_port = False
if not new_nets:
new_nets = []
attach_first_free_port = True
# if old nets is None, it means that the server got first
# free port. so we should detach this interface.
if old_nets is None:
for iface in ifaces:
remove_ports.append(iface.port_id)
# if we have any information in networks field, we should:
# 1. find similar networks, if they exist
# 2. remove these networks from new_nets and old_nets
# lists
# 3. detach unmatched networks, which were present in old_nets
# 4. attach unmatched networks, which were present in new_nets
else:
# remove not updated networks from old and new networks lists,
# also get list these networks
not_updated_nets = self._exclude_not_updated_networks(old_nets,
new_nets)
self.update_networks_matching_iface_port(
old_nets + not_updated_nets, ifaces)
# according to nova interface-detach command detached port
# will be deleted
for net in old_nets:
if net.get(self.NETWORK_PORT):
remove_ports.append(net.get(self.NETWORK_PORT))
if self.data().get('internal_ports'):
# if we have internal port with such id, remove it
# instantly.
self._delete_internal_port(net.get(self.NETWORK_PORT))
if net.get(self.NETWORK_FLOATING_IP):
self._floating_ip_disassociate(
net.get(self.NETWORK_FLOATING_IP))
handler_kwargs = {'port_id': None, 'net_id': None, 'fip': None}
# if new_nets is None, we should attach first free port,
# according to similar behavior during instance creation
if attach_first_free_port:
add_nets.append(handler_kwargs)
# attach section similar for both variants that
# were mentioned above
for idx, net in enumerate(new_nets):
handler_kwargs = {'port_id': None,
'net_id': None,
'fip': None}
if net.get(self.NETWORK_PORT):
handler_kwargs['port_id'] = net.get(self.NETWORK_PORT)
elif self.is_using_neutron() and net.get(self.NETWORK_SUBNET):
handler_kwargs['port_id'] = self._create_internal_port(net,
idx)
if not handler_kwargs['port_id']:
handler_kwargs['net_id'] = self._get_network_id(net)
if handler_kwargs['net_id']:
handler_kwargs['fip'] = net.get('fixed_ip')
floating_ip = net.get(self.NETWORK_FLOATING_IP)
if floating_ip:
flip_associate = {'port_id': handler_kwargs.get('port_id')}
if net.get('fixed_ip'):
flip_associate['fixed_ip_address'] = net.get('fixed_ip')
self.update_floating_ip_association(floating_ip,
flip_associate)
add_nets.append(handler_kwargs)
return remove_ports, add_nets
def update_floating_ip_association(self, floating_ip, flip_associate):
if self.is_using_neutron() and flip_associate.get('port_id'):
self._floating_ip_neutron_associate(floating_ip, flip_associate)
elif not self.is_using_neutron():
self._floating_ip_nova_associate(floating_ip)
def prepare_ports_for_replace(self):
if not self.is_using_neutron():
return
data = {'external_ports': [],
'internal_ports': []}
port_data = list(itertools.chain(
[('internal_ports', port) for port in self._data_get_ports()],
[('external_ports', port)
for port in self._data_get_ports('external_ports')]))
for port_type, port in port_data:
# store port fixed_ips for restoring after failed update
port_details = self.client('neutron').show_port(port['id'])['port']
fixed_ips = port_details.get('fixed_ips', [])
data[port_type].append({'id': port['id'], 'fixed_ips': fixed_ips})
if data.get('internal_ports'):
self.data_set('internal_ports',
jsonutils.dumps(data['internal_ports']))
if data.get('external_ports'):
self.data_set('external_ports',
jsonutils.dumps(data['external_ports']))
# reset fixed_ips for these ports by setting for each of them
# fixed_ips to []
for port_type, port in port_data:
self.client('neutron').update_port(
port['id'], {'port': {'fixed_ips': []}})
def restore_ports_after_rollback(self, convergence):
if not self.is_using_neutron():
return
# In case of convergence, during rollback, the previous rsrc is
# already selected and is being acted upon.
backup_res = self.stack._backup_stack().resources.get(self.name)
prev_server = self if convergence else backup_res
if convergence:
rsrc, rsrc_owning_stack, stack = resource.Resource.load(
prev_server.context, prev_server.replaced_by, True,
prev_server.stack.cache_data
)
existing_server = rsrc
else:
existing_server = self
port_data = itertools.chain(
existing_server._data_get_ports(),
existing_server._data_get_ports('external_ports')
)
for port in port_data:
# reset fixed_ips to [] for new resource
self.client('neutron').update_port(port['id'],
{'port': {'fixed_ips': []}})
# restore ip for old port
prev_port_data = itertools.chain(
prev_server._data_get_ports(),
prev_server._data_get_ports('external_ports'))
for port in prev_port_data:
fixed_ips = port['fixed_ips']
self.client('neutron').update_port(
port['id'], {'port': {'fixed_ips': fixed_ips}})
| {
"content_hash": "7e6df147e31fa8da11eff6c80e8000b8",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 79,
"avg_line_length": 43.146288209606986,
"alnum_prop": 0.5502251910328425,
"repo_name": "dims/heat",
"id": "16f0c898848560db2c0cde339147ae0dcb9f970b",
"size": "20336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/nova/server_network_mixin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7618889"
},
{
"name": "Shell",
"bytes": "32548"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.