text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
"""
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <d.linguaglossa@mseclab.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyjfuzz.core.pjf_configuration import PJFConfiguration
import unittest
import argparse
import sys
__TITLE__ = "Testing PJFConfiguration object"
class TestPJFConfiguration(unittest.TestCase):
def test_json_configuration(self):
sys.argv.append("--J")
sys.argv.append("[1]")
sys.argv.append("--no-logo")
parser = argparse.ArgumentParser(description='', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--J', type=str, default=None)
parser.add_argument('--no-logo', action='store_true', dest='nologo', default=False, required=False)
parsed = parser.parse_args()
args = PJFConfiguration(parsed)
for arg in parsed.__dict__:
self.assertTrue(arg in args.__dict__)
def test():
print("=" * len(__TITLE__))
print(__TITLE__)
print("=" * len(__TITLE__))
suite = unittest.TestLoader().loadTestsFromTestCase(TestPJFConfiguration)
unittest.TextTestRunner(verbosity=2).run(suite)
|
mseclab/PyJFuzz
|
test/test_pjf_configuration.py
|
Python
|
mit
| 2,126
| 0.001881
|
# This file is part of MyPaint.
# Copyright (C) 2007-2008 by Martin Renold <martinxyz@gmx.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""This module implements an unbounded tiled surface for painting."""
## Imports
from __future__ import division, print_function
import time
import sys
import os
import contextlib
import logging
from gettext import gettext as _
import numpy as np
import mypaintlib
import helpers
import pixbufsurface
import lib.surface
from lib.surface import TileAccessible, TileBlittable, TileCompositable
from errors import FileHandlingError
import lib.fileutils
import lib.modes
logger = logging.getLogger(__name__)
## Constants
TILE_SIZE = N = mypaintlib.TILE_SIZE
MAX_MIPMAP_LEVEL = mypaintlib.MAX_MIPMAP_LEVEL
## Tile class and marker tile constants
class _Tile (object):
"""Internal tile storage, with readonly flag
Note: pixels are stored with premultiplied alpha.
15 bits are used, but fully opaque or white is stored as 2**15
(requiring 16 bits). This is to allow many calcuations to divide by
2**15 instead of (2**16-1).
"""
def __init__(self, copy_from=None):
super(_Tile, self).__init__()
if copy_from is None:
self.rgba = np.zeros((N, N, 4), 'uint16')
else:
self.rgba = copy_from.rgba.copy()
self.readonly = False
def copy(self):
return _Tile(copy_from=self)
# tile for read-only operations on empty spots
transparent_tile = _Tile()
transparent_tile.readonly = True
# tile with invalid pixel memory (needs refresh)
mipmap_dirty_tile = _Tile()
del mipmap_dirty_tile.rgba
## Class defs: surfaces
class _SurfaceSnapshot (object):
pass
# TODO:
# - move the tile storage from MyPaintSurface to a separate class
class MyPaintSurface (TileAccessible, TileBlittable, TileCompositable):
"""Tile-based surface
The C++ part of this class is in tiledsurface.hpp
"""
def __init__(self, mipmap_level=0, mipmap_surfaces=None,
looped=False, looped_size=(0, 0)):
super(MyPaintSurface, self).__init__()
# TODO: pass just what it needs access to, not all of self
self._backend = mypaintlib.TiledSurface(self)
self.tiledict = {}
self.observers = []
# Used to implement repeating surfaces, like Background
if looped_size[0] % N or looped_size[1] % N:
raise ValueError('Looped size must be multiples of tile size')
self.looped = looped
self.looped_size = looped_size
self.mipmap_level = mipmap_level
if mipmap_level == 0:
assert mipmap_surfaces is None
self._mipmaps = self._create_mipmap_surfaces()
else:
assert mipmap_surfaces is not None
self._mipmaps = mipmap_surfaces
# Forwarding API
self.set_symmetry_state = self._backend.set_symmetry_state
self.begin_atomic = self._backend.begin_atomic
self.get_color = self._backend.get_color
self.get_alpha = self._backend.get_alpha
self.draw_dab = self._backend.draw_dab
def _create_mipmap_surfaces(self):
"""Internal: initializes an internal mipmap lookup table
Overridable to avoid unnecessary work when initializing the background
surface subclass.
"""
assert self.mipmap_level == 0
mipmaps = [self]
for level in range(1, MAX_MIPMAP_LEVEL+1):
s = MyPaintSurface(mipmap_level=level, mipmap_surfaces=mipmaps)
mipmaps.append(s)
# for quick lookup
for level, s in enumerate(mipmaps):
try:
s.parent = mipmaps[level-1]
except IndexError:
s.parent = None
try:
s.mipmap = mipmaps[level+1]
except IndexError:
s.mipmap = None
return mipmaps
def end_atomic(self):
bbox = self._backend.end_atomic()
if (bbox[2] > 0 and bbox[3] > 0):
self.notify_observers(*bbox)
@property
def backend(self):
return self._backend
def notify_observers(self, *args):
for f in self.observers:
f(*args)
def clear(self):
tiles = self.tiledict.keys()
self.tiledict = {}
self.notify_observers(*lib.surface.get_tiles_bbox(tiles))
if self.mipmap:
self.mipmap.clear()
def trim(self, rect):
"""Trim the layer to a rectangle, discarding data outside it
:param rect: A trimming rectangle in model coordinates
:type rect: tuple (x, y, w, h)
Only complete tiles are discarded by this method.
If a tile is neither fully inside nor fully outside the
rectangle, the part of the tile outside the rectangle will be
cleared.
"""
x, y, w, h = rect
logger.info("Trim %dx%d%+d%+d", w, h, x, y)
trimmed = []
for tx, ty in list(self.tiledict.keys()):
if tx*N+N < x or ty*N+N < y or tx*N > x+w or ty*N > y+h:
trimmed.append((tx, ty))
self.tiledict.pop((tx, ty))
self._mark_mipmap_dirty(tx, ty)
elif (tx*N < x and x < tx*N+N
or ty*N < y and y < ty*N+N
or tx*N < x+w and x+w < tx*N+N
or ty*N < y+h and y+h < ty*N+N):
trimmed.append((tx, ty))
with self.tile_request(tx, ty, readonly=False) as rgba:
if tx*N < x and x < tx*N+N:
rgba[:, 0:(x - tx*N), :] = 0 # Clear left edge
if ty*N < y and y < ty*N+N:
rgba[0:(y - ty*N), :, :] = 0 # Clear top edge
if tx*N < x+w and x+w < tx*N+N:
# This slice is [N-1-c for c in range(tx*N+N - (x+w))].
rgba[:, (x+w - tx*N):N, :] = 0 # Clear right edge
if ty*N < y+h and y+h < ty*N+N:
# This slice is [N-1-r for r in range(ty*N+N - (y+h))].
rgba[(y+h - ty*N):N, :, :] = 0 # Clear bottom edge
self._mark_mipmap_dirty(tx, ty)
self.notify_observers(*lib.surface.get_tiles_bbox(trimmed))
@contextlib.contextmanager
def tile_request(self, tx, ty, readonly):
"""Get a tile as a NumPy array, then put it back
:param int tx: Tile X coord (multiply by TILE_SIZE for pixels)
:param int ty: Tile Y coord (multiply by TILE_SIZE for pixels)
:param bool readonly: get a read-only tile
Context manager that fetches a tile as a NumPy array,
and then puts the potentially modified tile back into the
tile backing store. To be used with the 'with' statement.
Read/write tile requests on empty slots get you a new
writeable tile::
>>> surf = MyPaintSurface()
>>> with surf.tile_request(1, 2, readonly=False) as t1:
... t1[...] = (1<<15)
>>> with surf.tile_request(1, 2, readonly=False) as t2:
... assert t2 is t1
... assert (t2 == t1).all()
Read-only tile requests on empty addresses yield the special
transparent tile, which is marked as read-only::
>>> with surf.tile_request(666, 666, readonly=True) as tr:
... assert tr is transparent_tile.rgba
Snapshotting a surface makes all its tiles read-only as a side
effect, so the next read/write tile request will yield a copy
for you to work on::
>>> sshot = surf.save_snapshot()
>>> with surf.tile_request(1, 2, readonly=True) as t3:
... assert t3 is t1
... assert (t3 == t1).all()
>>> with surf.tile_request(1, 2, readonly=False) as t4:
... assert t4 is not t1
... assert (t4 == t1).all()
"""
numpy_tile = self._get_tile_numpy(tx, ty, readonly)
yield numpy_tile
self._set_tile_numpy(tx, ty, numpy_tile, readonly)
def _regenerate_mipmap(self, t, tx, ty):
t = _Tile()
self.tiledict[(tx, ty)] = t
empty = True
for x in xrange(2):
for y in xrange(2):
src = self.parent.tiledict.get((tx*2 + x, ty*2 + y), transparent_tile)
if src is mipmap_dirty_tile:
src = self.parent._regenerate_mipmap(src, tx*2 + x, ty*2 + y)
mypaintlib.tile_downscale_rgba16(src.rgba, t.rgba,
x * N // 2,
y * N // 2)
if src.rgba is not transparent_tile.rgba:
empty = False
if empty:
# rare case, no need to speed it up
del self.tiledict[(tx, ty)]
t = transparent_tile
return t
def _get_tile_numpy(self, tx, ty, readonly):
# OPTIMIZE: do some profiling to check if this function is a bottleneck
# yes it is
# Note: we must return memory that stays valid for writing until the
# last end_atomic(), because of the caching in tiledsurface.hpp.
if self.looped:
tx = tx % (self.looped_size[0] // N)
ty = ty % (self.looped_size[1] // N)
t = self.tiledict.get((tx, ty))
if t is None:
if readonly:
t = transparent_tile
else:
t = _Tile()
self.tiledict[(tx, ty)] = t
if t is mipmap_dirty_tile:
t = self._regenerate_mipmap(t, tx, ty)
if t.readonly and not readonly:
# shared memory, get a private copy for writing
t = t.copy()
self.tiledict[(tx, ty)] = t
if not readonly:
# assert self.mipmap_level == 0
self._mark_mipmap_dirty(tx, ty)
return t.rgba
def _set_tile_numpy(self, tx, ty, obj, readonly):
pass # Data can be modified directly, no action needed
def _mark_mipmap_dirty(self, tx, ty):
#assert self.mipmap_level == 0
if not self._mipmaps:
return
for level, mipmap in enumerate(self._mipmaps):
if level == 0:
continue
fac = 2**(level)
if mipmap.tiledict.get((tx // fac, ty // fac),
None) == mipmap_dirty_tile:
break
mipmap.tiledict[(tx // fac, ty // fac)] = mipmap_dirty_tile
def blit_tile_into(self, dst, dst_has_alpha, tx, ty, mipmap_level=0,
*args, **kwargs):
"""Copy one tile from this object into a destination array
See lib.surface.TileBlittable for the parameters. This
implementation adds an extra param:
:param int mipmap_level: layer mipmap level to use
"""
# used mainly for saving (transparent PNG)
#assert dst_has_alpha is True
if self.mipmap_level < mipmap_level:
return self.mipmap.blit_tile_into(dst, dst_has_alpha, tx, ty, mipmap_level)
assert dst.shape[2] == 4
if dst.dtype not in ('uint16', 'uint8'):
raise ValueError('Unsupported destination buffer type %r', dst.dtype)
dst_is_uint16 = (dst.dtype == 'uint16')
with self.tile_request(tx, ty, readonly=True) as src:
if src is transparent_tile.rgba:
#dst[:] = 0 # <-- notably slower than memset()
if dst_is_uint16:
mypaintlib.tile_clear_rgba16(dst)
else:
mypaintlib.tile_clear_rgba8(dst)
else:
if dst_is_uint16:
# this will do memcpy, not worth to bother skipping the u channel
mypaintlib.tile_copy_rgba16_into_rgba16(src, dst)
else:
if dst_has_alpha:
mypaintlib.tile_convert_rgba16_to_rgba8(src, dst)
else:
mypaintlib.tile_convert_rgbu16_to_rgbu8(src, dst)
def composite_tile(self, dst, dst_has_alpha, tx, ty, mipmap_level=0,
opacity=1.0, mode=mypaintlib.CombineNormal,
*args, **kwargs):
"""Composite one tile of this surface over a NumPy array.
See lib.surface.TileCompositable for the parameters. This
implementation adds two further ones:
:param float opacity: opacity multiplier
:param int mode: mode to use when compositing
"""
# Apply zero-alpha-source optimizations if possible.
# Sometimes this can be done without issuing a tile request.
if opacity == 0:
if dst_has_alpha:
if mode in lib.modes.MODES_CLEARING_BACKDROP_AT_ZERO_ALPHA:
mypaintlib.tile_clear_rgba16(dst)
return
if mode not in lib.modes.MODES_EFFECTIVE_AT_ZERO_ALPHA:
return
# Tile request needed, but may need to satisfy it from a deeper
# mipmap level.
if self.mipmap_level < mipmap_level:
self.mipmap.composite_tile(dst, dst_has_alpha, tx, ty,
mipmap_level, opacity, mode)
return
# Tile request at the required level.
# Try optimizations again if we got the special marker tile
with self.tile_request(tx, ty, readonly=True) as src:
if src is transparent_tile.rgba:
if dst_has_alpha:
if mode in lib.modes.MODES_CLEARING_BACKDROP_AT_ZERO_ALPHA:
mypaintlib.tile_clear_rgba16(dst)
return
if mode not in lib.modes.MODES_EFFECTIVE_AT_ZERO_ALPHA:
return
mypaintlib.tile_combine(mode, src, dst, dst_has_alpha, opacity)
## Snapshotting
def save_snapshot(self):
"""Creates and returns a snapshot of the surface
Snapshotting marks all the tiles of the surface as read-only,
then just shallow-copes the tiledict. It's quick. See
tile_request() for how new read/write tiles can be unlocked.
"""
sshot = _SurfaceSnapshot()
for t in self.tiledict.itervalues():
t.readonly = True
sshot.tiledict = self.tiledict.copy()
return sshot
def load_snapshot(self, sshot):
"""Loads a saved snapshot, replacing the internal tiledict"""
self._load_tiledict(sshot.tiledict)
def _load_tiledict(self, d):
"""Efficiently loads a tiledict, and notifies the observers"""
if d == self.tiledict:
# common case optimization, called via stroke.redo()
# testcase: comparison above (if equal) takes 0.6ms, code below 30ms
return
old = set(self.tiledict.iteritems())
self.tiledict = d.copy()
new = set(self.tiledict.iteritems())
dirty = old.symmetric_difference(new)
for pos, tile in dirty:
self._mark_mipmap_dirty(*pos)
bbox = lib.surface.get_tiles_bbox(pos for (pos, tile) in dirty)
if not bbox.empty():
self.notify_observers(*bbox)
## Loading tile data
def load_from_surface(self, other):
"""Loads tile data from another surface, via a snapshot"""
self.load_snapshot(other.save_snapshot())
def _load_from_pixbufsurface(self, s):
dirty_tiles = set(self.tiledict.keys())
self.tiledict = {}
for tx, ty in s.get_tiles():
with self.tile_request(tx, ty, readonly=False) as dst:
s.blit_tile_into(dst, True, tx, ty)
dirty_tiles.update(self.tiledict.keys())
bbox = lib.surface.get_tiles_bbox(dirty_tiles)
self.notify_observers(*bbox)
def load_from_numpy(self, arr, x, y):
"""Loads tile data from a numpy array
:param arr: Array containing the pixel data
:type arr: numpy.ndarray of uint8, dimensions HxWx3 or HxWx4
:param x: X coordinate for the array
:param y: Y coordinate for the array
:returns: the dimensions of the loaded surface, as (x,y,w,h)
"""
h, w, channels = arr.shape
if h <= 0 or w <= 0:
return (x, y, w, h)
if arr.dtype == 'uint8':
s = pixbufsurface.Surface(x, y, w, h, data=arr)
self._load_from_pixbufsurface(s)
else:
raise ValueError("Only uint8 data is supported by MyPaintSurface")
return (x, y, w, h)
def load_from_png(self, filename, x, y, feedback_cb=None,
convert_to_srgb=True,
**kwargs):
"""Load from a PNG, one tilerow at a time, discarding empty tiles.
:param str filename: The file to load
:param int x: X-coordinate at which to load the replacement data
:param int y: Y-coordinate at which to load the replacement data
:param bool convert_to_srgb: If True, convert to sRGB
:param callable feedback_cb: Called every few tile rows
:param dict \*\*kwargs: Ignored
Raises a `lib.errors.FileHandlingError` with a descriptive
string when conversion or PNG reading fails.
"""
dirty_tiles = set(self.tiledict.keys())
self.tiledict = {}
state = {}
state['buf'] = None # array of height N, width depends on image
state['ty'] = y // N # current tile row being filled into buf
state['frame_size'] = None
def get_buffer(png_w, png_h):
state['frame_size'] = x, y, png_w, png_h
if feedback_cb:
feedback_cb()
buf_x0 = x // N * N
buf_x1 = ((x + png_w - 1) // N + 1) * N
buf_y0 = state['ty']*N
buf_y1 = buf_y0+N
buf_w = buf_x1-buf_x0
buf_h = buf_y1-buf_y0
assert buf_w % N == 0
assert buf_h == N
if state['buf'] is not None:
consume_buf()
else:
state['buf'] = np.empty((buf_h, buf_w, 4), 'uint8')
png_x0 = x
png_x1 = x+png_w
subbuf = state['buf'][:, png_x0-buf_x0:png_x1-buf_x0]
if 1: # optimize: only needed for first and last
state['buf'].fill(0)
png_y0 = max(buf_y0, y)
png_y1 = min(buf_y0+buf_h, y+png_h)
assert png_y1 > png_y0
subbuf = subbuf[png_y0-buf_y0:png_y1-buf_y0, :]
state['ty'] += 1
return subbuf
def consume_buf():
ty = state['ty']-1
for i in xrange(state['buf'].shape[1] // N):
tx = x // N + i
src = state['buf'][:, i*N:(i+1)*N, :]
if src[:, :, 3].any():
with self.tile_request(tx, ty, readonly=False) as dst:
mypaintlib.tile_convert_rgba8_to_rgba16(src, dst)
if sys.platform == 'win32':
filename_sys = filename.encode("utf-8")
else:
filename_sys = filename.encode(sys.getfilesystemencoding()) # FIXME: should not do that, should use open(unicode_object)
try:
flags = mypaintlib.load_png_fast_progressive(
filename_sys,
get_buffer,
convert_to_srgb,
)
except (IOError, OSError, RuntimeError) as ex:
raise FileHandlingError(_("PNG reader failed: %s") % str(ex))
consume_buf() # also process the final chunk of data
logger.debug("PNG loader flags: %r", flags)
dirty_tiles.update(self.tiledict.keys())
bbox = lib.surface.get_tiles_bbox(dirty_tiles)
self.notify_observers(*bbox)
# return the bbox of the loaded image
return state['frame_size']
def render_as_pixbuf(self, *args, **kwargs):
if not self.tiledict:
logger.warning('empty surface')
t0 = time.time()
kwargs['alpha'] = True
res = pixbufsurface.render_as_pixbuf(self, *args, **kwargs)
logger.debug('%.3fs rendering layer as pixbuf', time.time() - t0)
return res
def save_as_png(self, filename, *args, **kwargs):
if 'alpha' not in kwargs:
kwargs['alpha'] = True
if len(self.tiledict) == 1 and self.looped:
kwargs['single_tile_pattern'] = True
lib.surface.save_as_png(self, filename, *args, **kwargs)
def get_bbox(self):
return lib.surface.get_tiles_bbox(self.tiledict)
def get_tiles(self):
return self.tiledict
def is_empty(self):
return not self.tiledict
def remove_empty_tiles(self):
"""Removes tiles from the tiledict which contain no data"""
for pos, data in self.tiledict.items():
if not data.rgba.any():
self.tiledict.pop(pos)
def get_move(self, x, y, sort=True):
"""Returns a move object for this surface
:param x: Start position for the move, X coord
:param y: Start position for the move, X coord
:param sort: If true, sort tiles to move by distance from (x,y)
:rtype: _TiledSurfaceMove
It's up to the caller to ensure that only one move is active at a
any single instant in time.
"""
return _TiledSurfaceMove(self, x, y, sort=sort)
def flood_fill(self, x, y, color, bbox, tolerance, dst_surface):
"""Fills connected areas of this surface into another
:param x: Starting point X coordinate
:param y: Starting point Y coordinate
:param color: an RGB color
:type color: tuple
:param bbox: Bounding box: limits the fill
:type bbox: lib.helpers.Rect or equivalent 4-tuple
:param tolerance: how much filled pixels are permitted to vary
:type tolerance: float [0.0, 1.0]
:param dst: Target surface
:type dst: lib.tiledsurface.MyPaintSurface
See also `lib.layer.Layer.flood_fill()` and `fill.flood_fill()`.
"""
flood_fill(self, x, y, color, bbox, tolerance, dst_surface)
class _TiledSurfaceMove (object):
"""Ongoing move state for a tiled surface, processed in chunks
Tile move processing involves slicing and copying data from a
snapshot of the surface's original tile arrays into an active
surface within the model document. It's therefore potentially very
slow for huge layers: doing this interactively requires the move to
be processed in chunks in idle routines.
Moves are created by a surface's get_move() method starting at a
particular point in model coordinates.
>>> surf = MyPaintSurface()
>>> with surf.tile_request(10, 10, readonly=False) as a:
... a[...] = 1<<15
>>> len(surf.tiledict)
1
>>> move = surf.get_move(N/2, N/2, sort=True)
During an interactive move, the move object is typically updated in
response to the user moving the pointer,
>>> move.update(N/2, N/2)
>>> move.update(N/2 + 1, N/2 + 3)
while being processed in chunks of a few hundred tiles in an idle
routine.
>>> while move.process():
... pass
When the user is done moving things and releases the layer, or quits
the layer moving mode, the conventional way of finalizing things is
>>> move.process(n=-1)
False
>>> move.cleanup()
After the cleanup, the move should not be updated or processed any
further.
Moves which are not an exact multiple of the tile size generally
make more tiles due to slicing and recombining.
>>> len(surf.tiledict)
4
Moves which are an exact multiple of the tile size are processed
faster (and never add tiles to the layer).
>>> surf = MyPaintSurface()
>>> with surf.tile_request(-3, 2, readonly=False) as a:
... a[...] = 1<<15
>>> surf.tiledict.keys()
[(-3, 2)]
>>> move = surf.get_move(0, 0, sort=False)
>>> move.update(N*3, -N*2)
>>> move.process(n=1) # single op suffices
False
>>> move.cleanup()
>>> surf.tiledict.keys()
[(0, 0)]
>>> # Please excuse the doctest for this special case
>>> # just regression-proofing.
Moves can be processed non-interactively by calling all the
different phases together, as above.
"""
def __init__(self, surface, x, y, sort=True):
"""Starts the move, recording state in the Move object
:param x: Where to start, model X coordinate
:param y: Where to start, model Y coordinate
:param sort: If true, sort tiles to move by distance from (x,y)
Sorting tiles by distance makes the move look nicer when moving
interactively, but it's pointless for non-interactive moves.
"""
object.__init__(self)
self.surface = surface
self.snapshot = surface.save_snapshot()
self.chunks = self.snapshot.tiledict.keys()
self.sort = sort
tx = x // N
ty = y // N
self.start_pos = (x, y)
if self.sort:
manhattan_dist = lambda p: abs(tx - p[0]) + abs(ty - p[1])
self.chunks.sort(key=manhattan_dist)
# High water mark of chunks processed so far.
# This is reset on every call to update().
self.chunks_i = 0
# Tile state tracking for individual update cycles
self.written = set()
self.blank_queue = []
# Tile offsets which we'll be applying,
# initially the move is zero.
self.slices_x = calc_translation_slices(0)
self.slices_y = calc_translation_slices(0)
def update(self, dx, dy):
"""Updates the offset during a move
:param dx: New move offset: relative to the constructor x.
:param dy: New move offset: relative to the constructor y.
This causes all the move's work to be re-queued.
"""
# Nothing has been written in this pass yet
self.written = set()
# Tile indices to be cleared during processing,
# unless they've been written to
self.blank_queue = self.surface.tiledict.keys() # fresh!
if self.sort:
x, y = self.start_pos
tx = (x + dx) // N
ty = (y + dy) // N
manhattan_dist = lambda p: abs(tx - p[0]) + abs(ty - p[1])
self.blank_queue.sort(key=manhattan_dist)
# Calculate offsets
self.slices_x = calc_translation_slices(int(dx))
self.slices_y = calc_translation_slices(int(dy))
# Need to process every source chunk
self.chunks_i = 0
def cleanup(self):
"""Cleans up after processing the move.
This must be called after the move has been processed fully, and
should only be called after `process()` indicates that all tiles have
been sliced and moved.
"""
# Process any remaining work. Caller should have done this already.
if self.chunks_i < len(self.chunks) or len(self.blank_queue) > 0:
logger.warning("Stuff left to do at end of move cleanup(). May "
"result in poor interactive appearance. "
"chunks=%d/%d, blanks=%d", self.chunks_i,
len(self.chunks), len(self.blank_queue))
logger.warning("Doing cleanup now...")
self.process(n=-1)
assert self.chunks_i >= len(self.chunks)
assert len(self.blank_queue) == 0
# Remove empty tiles created by Layer Move
self.surface.remove_empty_tiles()
def process(self, n=200):
"""Process a number of pending tile moves
:param int n: The number of source tiles to process in this call
:returns: whether there are any more tiles to process
:rtype: bool
Specify zero or negative `n` to process all remaining tiles.
"""
updated = set()
moves_remaining = self._process_moves(n, updated)
blanks_remaining = self._process_blanks(n, updated)
for pos in updated:
self.surface._mark_mipmap_dirty(*pos)
bbox = lib.surface.get_tiles_bbox(updated)
self.surface.notify_observers(*bbox)
return blanks_remaining or moves_remaining
def _process_moves(self, n, updated):
"""Internal: process pending tile moves
:param int n: as for process()
:param set updated: Set of tile indices to be redrawn (in+out)
:returns: Whether moves need to be processed
:rtype: bool
"""
if self.chunks_i > len(self.chunks):
return False
if n <= 0:
n = len(self.chunks) # process all remaining
is_integral = len(self.slices_x) == 1 and len(self.slices_y) == 1
for src_t in self.chunks[self.chunks_i:self.chunks_i + n]:
src_tx, src_ty = src_t
src_tile = self.snapshot.tiledict[src_t]
for slice_x in self.slices_x:
(src_x0, src_x1), (targ_tdx, targ_x0, targ_x1) = slice_x
for slice_y in self.slices_y:
(src_y0, src_y1), (targ_tdy, targ_y0, targ_y1) = slice_y
targ_tx = src_tx + targ_tdx
targ_ty = src_ty + targ_tdy
targ_t = targ_tx, targ_ty
if is_integral:
# We're lucky. Perform a straight data copy.
self.surface.tiledict[targ_t] = src_tile.copy()
updated.add(targ_t)
self.written.add(targ_t)
continue
# Get a tile to write
targ_tile = None
if targ_t in self.written:
# Reuse a target tile made earlier in this
# update cycle
targ_tile = self.surface.tiledict.get(targ_t, None)
if targ_tile is None:
# Create and store a new blank target tile
# to avoid corruption
targ_tile = _Tile()
self.surface.tiledict[targ_t] = targ_tile
self.written.add(targ_t)
# Copy this source slice to the destination
targ_tile.rgba[targ_y0:targ_y1, targ_x0:targ_x1] \
= src_tile.rgba[src_y0:src_y1, src_x0:src_x1]
updated.add(targ_t)
# The source tile has been fully processed at this point,
# and can be removed from the output dict if it hasn't
# also been written to.
if src_t in self.surface.tiledict and src_t not in self.written:
self.surface.tiledict.pop(src_t, None)
updated.add(src_t)
# Move on, and return whether we're complete
self.chunks_i += n
return self.chunks_i < len(self.chunks)
def _process_blanks(self, n, updated):
"""Internal: process blanking-out queue
:param int n: as for process()
:param set updated: Set of tile indices to be redrawn (in+out)
:returns: Whether the blanking queue is empty
:rtype: bool
"""
if n <= 0:
n = len(self.blank_queue)
while len(self.blank_queue) > 0 and n > 0:
t = self.blank_queue.pop(0)
if t not in self.written:
self.surface.tiledict.pop(t, None)
updated.add(t)
n -= 1
return len(self.blank_queue) > 0
def calc_translation_slices(dc):
"""Returns a list of offsets and slice extents for a translation
:param dc: translation amount along the axis of interest (pixels)
:type dc: int
:returns: list of offsets and slice extents
The returned slice list's members are of the form
((src_c0, src_c1), (targ_tdc, targ_c0, targ_c1))
where ``src_c0`` and ``src_c1`` determine the extents of the source
slice within a tile, their ``targ_`` equivalents specify where to
put that slice in the target tile, and ``targ_tdc`` is the tile
offset. For example,
>>> assert N == 64, "FIXME: test only valid for 64 pixel tiles"
>>> calc_translation_slices(N*2)
[((0, 64), (2, 0, 64))]
This indicates that all data from each tile is to be put exactly two
tiles after the current tile index. In this case, a simple copy will
suffice. Normally though, translations require slices.
>>> calc_translation_slices(-16)
[((0, 16), (-1, 48, 64)), ((16, 64), (0, 0, 48))]
Two slices are needed for each tile: one strip of 16 pixels at the
start to be copied to the end of output tile immediately before the
current tile, and one strip of 48px to be copied to the start of the
output tile having the same as the input.
"""
dcr = dc % N
tdc = (dc // N)
if dcr == 0:
return [
((0, N), (tdc, 0, N))
]
else:
return [
((0, N-dcr), (tdc, dcr, N)),
((N-dcr, N), (tdc+1, 0, dcr))
]
# Set which surface backend to use
Surface = MyPaintSurface
def _new_backend_surface():
"""Fetches a new backend surface object for C test code to use.
Used by mypaintlib internals during tests: see lib/tiledsurface.hpp.
The resultant pointer, after swizzling with SWIG_ConvertPtr(),
exposes the libmypaint "MyPaintSurface" interface.
"""
surface = Surface()
return surface.backend
class BackgroundError(Exception):
"""Errors raised by Background during failed initiailizations"""
pass
class Background (Surface):
"""A background layer surface, with a repeating image"""
def __init__(self, obj, mipmap_level=0):
"""Construct from a color or from a NumPy array
:param obj: RGB triple (uint8), or a HxWx4 or HxWx3 numpy array which
can be either uint8 or uint16.
:param mipmap_level: mipmap level, used internally. Root is zero.
"""
if not isinstance(obj, np.ndarray):
r, g, b = obj
obj = np.zeros((N, N, 3), dtype='uint8')
obj[:, :, :] = r, g, b
height, width = obj.shape[0:2]
if height % N or width % N:
raise BackgroundError('unsupported background tile size: %dx%d' % (width, height))
super(Background, self).__init__(mipmap_level=0, looped=True,
looped_size=(width, height))
self.load_from_numpy(obj, 0, 0)
# Generate mipmap
if mipmap_level <= MAX_MIPMAP_LEVEL:
mipmap_obj = np.zeros((height, width, 4), dtype='uint16')
for ty in range(height // N * 2):
for tx in range(width // N * 2):
with self.tile_request(tx, ty, readonly=True) as src:
mypaintlib.tile_downscale_rgba16(src, mipmap_obj,
tx * N // 2,
ty * N // 2)
self.mipmap = Background(mipmap_obj, mipmap_level+1)
self.mipmap.parent = self
self.mipmap_level = mipmap_level
def _create_mipmap_surfaces(self):
"""Internal override: Background uses a different mipmap impl."""
return None
def load_from_numpy(self, arr, x, y):
"""Loads tile data from a numpy array
This extends the base class's implementation with additional support
for tile-aligned uint16 data.
"""
h, w, channels = arr.shape
if h <= 0 or w <= 0:
return (x, y, w, h)
if arr.dtype == 'uint16':
assert w % N == 0 and h % N == 0
assert x == 0 and y == 0
for ty in range(h // N):
for tx in range(w // N):
with self.tile_request(tx, ty, readonly=False) as dst:
dst[:, :, :] = arr[ty*N:(ty+1)*N, tx*N:(tx+1)*N, :]
return (x, y, w, h)
else:
return super(Background, self).load_from_numpy(arr, x, y)
def flood_fill(src, x, y, color, bbox, tolerance, dst):
"""Fills connected areas of one surface into another
:param src: Source surface-like object
:type src: Anything supporting readonly tile_request()
:param x: Starting point X coordinate
:param y: Starting point Y coordinate
:param color: an RGB color
:type color: tuple
:param bbox: Bounding box: limits the fill
:type bbox: lib.helpers.Rect or equivalent 4-tuple
:param tolerance: how much filled pixels are permitted to vary
:type tolerance: float [0.0, 1.0]
:param dst: Target surface
:type dst: lib.tiledsurface.MyPaintSurface
See also `lib.layer.Layer.flood_fill()`.
"""
# Color to fill with
fill_r, fill_g, fill_b = color
# Limits
tolerance = helpers.clamp(tolerance, 0.0, 1.0)
# Maximum area to fill: tile and in-tile pixel extents
bbx, bby, bbw, bbh = bbox
if bbh <= 0 or bbw <= 0:
return
bbbrx = bbx + bbw - 1
bbbry = bby + bbh - 1
min_tx = int(bbx // N)
min_ty = int(bby // N)
max_tx = int(bbbrx // N)
max_ty = int(bbbry // N)
min_px = int(bbx % N)
min_py = int(bby % N)
max_px = int(bbbrx % N)
max_py = int(bbbry % N)
# Tile and pixel addressing for the seed point
tx, ty = int(x // N), int(y // N)
px, py = int(x % N), int(y % N)
# Sample the pixel color there to obtain the target color
with src.tile_request(tx, ty, readonly=True) as start:
targ_r, targ_g, targ_b, targ_a = [int(c) for c in start[py][px]]
if targ_a == 0:
targ_r = 0
targ_g = 0
targ_b = 0
targ_a = 0
# Flood-fill loop
filled = {}
tileq = [
((tx, ty),
[(px, py)])
]
while len(tileq) > 0:
(tx, ty), seeds = tileq.pop(0)
# Bbox-derived limits
if tx > max_tx or ty > max_ty:
continue
if tx < min_tx or ty < min_ty:
continue
# Pixel limits within this tile...
min_x = 0
min_y = 0
max_x = N-1
max_y = N-1
# ... vary at the edges
if tx == min_tx:
min_x = min_px
if ty == min_ty:
min_y = min_py
if tx == max_tx:
max_x = max_px
if ty == max_ty:
max_y = max_py
# Flood-fill one tile
with src.tile_request(tx, ty, readonly=True) as src_tile:
dst_tile = filled.get((tx, ty), None)
if dst_tile is None:
dst_tile = np.zeros((N, N, 4), 'uint16')
filled[(tx, ty)] = dst_tile
overflows = mypaintlib.tile_flood_fill(
src_tile, dst_tile, seeds,
targ_r, targ_g, targ_b, targ_a,
fill_r, fill_g, fill_b,
min_x, min_y, max_x, max_y,
tolerance
)
seeds_n, seeds_e, seeds_s, seeds_w = overflows
# Enqueue overflows in each cardinal direction
if seeds_n and ty > min_ty:
tpos = (tx, ty-1)
tileq.append((tpos, seeds_n))
if seeds_w and tx > min_tx:
tpos = (tx-1, ty)
tileq.append((tpos, seeds_w))
if seeds_s and ty < max_ty:
tpos = (tx, ty+1)
tileq.append((tpos, seeds_s))
if seeds_e and tx < max_tx:
tpos = (tx+1, ty)
tileq.append((tpos, seeds_e))
# Composite filled tiles into the destination surface
mode = mypaintlib.CombineNormal
for (tx, ty), src_tile in filled.iteritems():
with dst.tile_request(tx, ty, readonly=False) as dst_tile:
mypaintlib.tile_combine(mode, src_tile, dst_tile, True, 1.0)
dst._mark_mipmap_dirty(tx, ty)
bbox = lib.surface.get_tiles_bbox(filled)
dst.notify_observers(*bbox)
class PNGFileUpdateTask (object):
"""Piecemeal callable: writes to or replaces a PNG file
See lib.autosave.Autosaveable.
"""
def __init__(self, surface, filename, rect, alpha,
single_tile_pattern=False,
save_srgb_chunks=False,
**kwargs):
super(PNGFileUpdateTask, self).__init__()
self._final_filename = filename
# Sizes. Save at least one tile to allow empty docs to be written
if not rect:
rect = surface.get_bbox()
x, y, w, h = rect
if w == 0 or h == 0:
x, y, w, h = (0, 0, 1, 1)
rect = (x, y, w, h)
# Snapshot and recreate
clone_surface = Surface(
looped=surface.looped,
looped_size=surface.looped_size,
)
clone_surface.load_snapshot(surface.save_snapshot())
# Open a tempfile for writing
tmp_filename = filename + ".tmp"
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
tmp_fp = open(tmp_filename, "wb")
self._png_writer = mypaintlib.ProgressivePNGWriter(
tmp_fp,
w, h,
alpha,
save_srgb_chunks,
)
self._tmp_filename = tmp_filename
self._tmp_fp = tmp_fp
# What to write
self._strips_iter = lib.surface.scanline_strips_iter(
clone_surface, rect, alpha=alpha,
single_tile_pattern=single_tile_pattern,
**kwargs
)
logger.debug("autosave: scheduled update of %r", self._final_filename)
def __call__(self, *args, **kwargs):
if not (self._png_writer and self._strips_iter):
raise RuntimeError("Called too many times")
try:
strip = next(self._strips_iter)
self._png_writer.write(strip)
return True
except StopIteration:
self._png_writer.close()
self._png_writer = None
self._strips_iter = None
self._tmp_fp.close()
lib.fileutils.replace(
self._tmp_filename,
self._final_filename,
)
logger.debug("autosave: updated %r", self._final_filename)
return False
except:
self._png_writer.close()
self._png_writer = None
self._strips_iter = None
self._tmp_fp.close()
if os.path.exists(self._tmp_filename):
os.unlink(self._tmp_filename)
raise
if __name__ == '__main__':
import doctest
doctest.testmod()
|
achadwick/mypaint
|
lib/tiledsurface.py
|
Python
|
gpl-2.0
| 42,966
| 0.000535
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from otp.speedchat import SpeedChatGlobals
class DistributedScavengerHuntTarget(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedScavengerHuntTarget')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
def setupListenerDetails(self):
self.triggered = False
self.triggerDelay = 15
self.accept(SpeedChatGlobals.SCCustomMsgEvent, self.phraseSaid)
def phraseSaid(self, phraseId):
self.notify.debug('Checking if phrase was said')
helpPhrase = 10003
def reset():
self.triggered = False
if phraseId == helpPhrase and not self.triggered:
self.triggered = True
self.attemptScavengerHunt()
taskMgr.doMethodLater(self.triggerDelay, reset, 'ScavengerHunt-phrase-reset', extraArgs=[])
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
DistributedScavengerHuntTarget.notify.debug('announceGenerate')
self.setupListenerDetails()
def delete(self):
self.ignoreAll()
taskMgr.remove('ScavengerHunt-phrase-reset')
DistributedObject.DistributedObject.delete(self)
def attemptScavengerHunt(self):
DistributedScavengerHuntTarget.notify.debug('attempScavengerHunt')
self.sendUpdate('attemptScavengerHunt', [])
|
ksmit799/Toontown-Source
|
toontown/ai/DistributedScavengerHuntTarget.py
|
Python
|
mit
| 1,518
| 0.001976
|
from __future__ import absolute_import
import os
import shutil
from qgis.PyQt import uic
from qgis.PyQt.QtGui import QIcon, QPixmap
from qgis.PyQt.QtWidgets import QDialog, QMessageBox
from os import path
from . import extra_sources
from .data_source_info import DataSourceInfo
from .data_source_serializer import DataSourceSerializer
from .data_sources_list import DataSourcesList
from .group_info import GroupInfo
from .groups_list import GroupsList
from .supported_drivers import KNOWN_DRIVERS
from .gui.editor_widget_gdal import EditorWidgetGdal
from .gui.editor_widget_tms import EditorWidgetTms
from .gui.editor_widget_wms import EditorWidgetWms
from .gui.editor_widget_wfs import EditorWidgetWfs
from .gui.editor_widget_geojson import EditorWidgetGeoJson
from .gui.line_edit_color_validator import LineEditColorValidator
from .plugin_settings import PluginSettings
from .compat2qgis import getOpenFileName
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'ds_edit_dialog.ui'))
def is_same(file1, file2):
return os.path.normcase(os.path.normpath(file1)) == \
os.path.normcase(os.path.normpath(file2))
class DsEditDialog(QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(DsEditDialog, self).__init__(parent)
self.setupUi(self)
self.DRV_WIDGETS = {
KNOWN_DRIVERS.GDAL: EditorWidgetGdal(),
KNOWN_DRIVERS.TMS: EditorWidgetTms(),
KNOWN_DRIVERS.WMS: EditorWidgetWms(),
KNOWN_DRIVERS.WFS: EditorWidgetWfs(),
KNOWN_DRIVERS.GEOJSON: EditorWidgetGeoJson(),
}
# init icon selector
# self.txtIcon.set_dialog_ext(self.tr('Icons (*.ico *.jpg *.jpeg *.png *.svg);;All files (*.*)'))
# self.txtIcon.set_dialog_title(self.tr('Select icon for data source'))
self.iconChooseButton.clicked.connect(self.choose_icon)
# init combos
self.init_groups_cmb()
self.init_types_cmb()
self.change_spec_tab()
# validators
self.id_validator = LineEditColorValidator(self.txtId, '^[A-Za-z0-9_]+$', error_tooltip=self.tr('Any text'))
self.alias_validator = LineEditColorValidator(self.txtAlias, '^[A-Za-z0-9_ ]+$', error_tooltip=self.tr('Any text'))
# events
self.cmbType.currentIndexChanged.connect(self.change_spec_tab)
# vars
self.ds_info = None
self.init_with_existing = False
self._editor_tab = None
self.set_icon(
os.path.join(
os.path.dirname(__file__),
'icons',
'mapservices.png'
)
)
def init_groups_cmb(self):
ds_groups = GroupsList()
for ds_group in ds_groups.groups.values():
self.cmbGroup.addItem(QIcon(ds_group.icon), self.tr(ds_group.alias), ds_group)
def init_types_cmb(self):
for drv in KNOWN_DRIVERS.ALL_DRIVERS:
self.cmbType.addItem(drv, drv)
def change_spec_tab(self, index=0):
# remove old widget
self.tabWidget.removeTab(2) # bad!
drv = self.cmbType.itemData(self.cmbType.currentIndex())
self.tabWidget.addTab(self.DRV_WIDGETS[drv], drv)
def set_ds_info(self, ds_info):
self.ds_info = ds_info
self.init_with_existing = True
# feel fields
self.feel_common_fields()
self.feel_specific_fields()
def fill_ds_info(self, ds_info):
self.ds_info = ds_info
self.init_with_existing = False
# feel fields
self.feel_common_fields()
self.feel_specific_fields()
def choose_icon(self):
icon_path = getOpenFileName(
self,
self.tr('Select icon for data source'),
PluginSettings.get_default_user_icon_path(),
self.tr('Icons (*.ico *.jpg *.jpeg *.png *.svg);;All files (*.*)')
)
if icon_path != "":
PluginSettings.set_default_user_icon_path(icon_path)
self.set_icon(icon_path)
def set_icon(self, icon_path):
self.__ds_icon = icon_path
self.iconPreview.setPixmap(
QPixmap(self.__ds_icon)
)
def feel_common_fields(self):
self.txtId.setText(self.ds_info.id)
self.txtAlias.setText(self.ds_info.alias)
# self.txtIcon.set_path(self.ds_info.icon_path)
self.set_icon(self.ds_info.icon_path)
# license
self.txtLicense.setText(self.ds_info.lic_name)
self.txtLicenseLink.setText(self.ds_info.lic_link)
self.txtCopyrightText.setText(self.ds_info.copyright_text)
self.txtCopyrightLink.setText(self.ds_info.copyright_link)
self.txtTermsOfUse.setText(self.ds_info.terms_of_use)
# set group
group_index = None
for i in range(self.cmbGroup.count()):
if self.cmbGroup.itemData(i).id == self.ds_info.group:
group_index = i
break
if group_index is not None:
self.cmbGroup.setCurrentIndex(i)
else:
non_ex_group = GroupInfo(group_id=self.ds_info.group)
self.cmbGroup.addItem(self.ds_info.group, non_ex_group)
self.cmbGroup.setCurrentIndex(self.cmbGroup.count()-1)
def feel_specific_fields(self):
# set type
self.cmbType.setCurrentIndex(self.cmbType.findData(self.ds_info.type))
# feel widgets
for spec_widget in self.DRV_WIDGETS.values():
spec_widget.feel_form(self.ds_info)
def accept(self):
new_ds_info = DataSourceInfo()
self.feel_ds_info(new_ds_info)
if not self.validate(new_ds_info):
return
if self.init_with_existing:
res = self.save_existing(new_ds_info)
else:
res = self.create_new(new_ds_info)
if res:
super(DsEditDialog, self).accept()
def save_existing(self, ds_info):
if ds_info.id != self.ds_info.id and not self.check_existing_id(ds_info.id):
return False
if ds_info == self.ds_info:
return True
# replace icon if need
if not is_same(ds_info.icon_path, self.ds_info.icon_path):
os.remove(self.ds_info.icon_path)
dir_path = os.path.dirname(self.ds_info.file_path)
ico_file_name = path.basename(ds_info.icon_path)
ico_path = path.join(dir_path, ico_file_name)
shutil.copy(ds_info.icon_path, ico_path)
# replace gdal_conf if need
if ds_info.type == KNOWN_DRIVERS.GDAL:
def copy_new_gdal_file():
dir_path = os.path.dirname(self.ds_info.file_path)
gdal_file_name = path.basename(ds_info.gdal_source_file)
gdal_file_path = path.join(dir_path, gdal_file_name)
shutil.copy(ds_info.gdal_source_file, gdal_file_path)
# old ds = gdal
if self.ds_info.type == KNOWN_DRIVERS.GDAL:
if ds_info.gdal_source_file != self.ds_info.gdal_source_file:
os.remove(self.ds_info.icon_path)
copy_new_gdal_file()
else:
copy_new_gdal_file()
# write config
DataSourceSerializer.write_to_ini(ds_info, self.ds_info.file_path)
return True
def create_new(self, ds_info):
if not self.check_existing_id(ds_info.id):
return False
# set paths
dir_path = path.join(extra_sources.USER_DIR_PATH, extra_sources.DATA_SOURCES_DIR_NAME, ds_info.id)
if path.exists(dir_path):
salt = 0
while path.exists(dir_path + str(salt)):
salt += 1
dir_path += str(salt)
ini_path = path.join(dir_path, 'metadata.ini')
ico_path = path.join(dir_path, ds_info.icon)
# create dir
os.mkdir(dir_path)
# copy icon
shutil.copy(ds_info.icon_path, ico_path)
if ds_info.type == KNOWN_DRIVERS.GDAL:
# copy gdal file
gdal_file_name = path.basename(ds_info.gdal_source_file)
gdal_file_path = path.join(dir_path, gdal_file_name)
shutil.copy(ds_info.gdal_source_file, gdal_file_path)
# write config
DataSourceSerializer.write_to_ini(ds_info, ini_path)
return True
def check_existing_id(self, ds_id):
gl = DataSourcesList()
if ds_id in gl.data_sources.keys():
QMessageBox.critical(self, self.tr('Error on save group'),
self.tr('Data source with such id already exists! Select new id for data source!'))
return False
return True
def feel_ds_info(self, ds_info):
ds_info.id = self.txtId.text()
ds_info.alias = self.txtAlias.text()
# ds_info.icon = os.path.basename(self.txtIcon.get_path())
ds_info.icon = os.path.basename(self.__ds_icon)
ds_info.lic_name = self.txtLicense.text()
ds_info.lic_link = self.txtLicenseLink.text()
ds_info.copyright_text = self.txtCopyrightText.text()
ds_info.copyright_link = self.txtCopyrightLink.text()
ds_info.terms_of_use = self.txtTermsOfUse.text()
ds_info.group = self.cmbGroup.itemData(self.cmbGroup.currentIndex()).id
ds_info.type = self.cmbType.itemData(self.cmbType.currentIndex())
self.DRV_WIDGETS[ds_info.type].feel_ds_info(ds_info)
ds_info.icon_path = self.__ds_icon
# ds_info.icon_path = self.txtIcon.get_path()
def validate(self, ds_info):
# validate common fields
checks = [
(ds_info.id, self.tr('Please, enter data source id')),
(ds_info.alias, self.tr('Please, enter data source alias')),
(ds_info.icon, self.tr('Please, select icon for data source')),
(ds_info.group, self.tr('Please, select group for data source')),
(ds_info.type, self.tr('Please, select type for data source')),
]
for val, comment in checks:
if not val:
QMessageBox.critical(self, self.tr('Error on save data source'), self.tr(comment))
return False
checks_correct = [
(self.id_validator, self.tr('Please, enter correct value for data source id')),
(self.alias_validator, self.tr('Please, enter correct value for data source alias')),
]
for val, comment in checks_correct:
if not val.is_valid():
QMessageBox.critical(self, self.tr('Error on save data source'), self.tr(comment))
return False
# validate special fields
if not self.DRV_WIDGETS[ds_info.type].validate(ds_info):
return False
return True
|
nextgis/quickmapservices
|
src/ds_edit_dialog.py
|
Python
|
gpl-2.0
| 10,803
| 0.001574
|
"""log model admin."""
from django.contrib import admin
from django.db import models
from django.forms.widgets import TextInput
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.log_mgr.models import MakahikiLog
from apps.admin.admin import challenge_designer_site, challenge_manager_site, developer_site
class MakahikiLogAdmin(admin.ModelAdmin):
"""admin"""
list_display = ('request_url', "remote_user", 'remote_ip', 'request_time',
'request_method', 'response_status')
list_filter = ('response_status', 'remote_user')
search_fields = ('request_url', 'remote_ip')
ordering = ["-request_time"]
date_hierarchy = "request_time"
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size': '100'})},
}
def has_add_permission(self, request):
return False
admin.site.register(MakahikiLog, MakahikiLogAdmin)
challenge_designer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_manager_site.register(MakahikiLog, MakahikiLogAdmin)
developer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_mgr.register_admin_challenge_info_model("Status", 1, MakahikiLog, 1)
challenge_mgr.register_developer_challenge_info_model("Status", 4, MakahikiLog, 1)
|
yongwen/makahiki
|
makahiki/apps/managers/log_mgr/admin.py
|
Python
|
mit
| 1,266
| 0.00237
|
from ni.core.selection import Selection
from ni.core.text import char_pos_to_tab_pos
from ni.core.document import InsertDelta, DeleteDelta
class Action(object):
"""Base class for all view actions."""
def __init__(self, view):
self.grouped = False
self.editor = view.editor
self.view = view
def execute(self):
raise NotImplementedError
class MoveCursorAction(Action):
"""Base class for all actions that involve moving the cursor around."""
def __init__(self, view, is_select=False):
super(MoveCursorAction, self).__init__(view)
self.is_select = is_select
def execute(self):
view = self.view
doc = view.document
original_position = view.cursor_pos
original_scroll = view.scroll_pos
self.move()
if original_position != view.cursor_pos or \
original_scroll != view.scroll_pos:
view.invalidate()
if self.is_select:
if view.selection:
end_offset = doc.cursor_pos_to_offset(view.cursor_pos)
view.selection.end = end_offset
else:
start_offset = doc.cursor_pos_to_offset(original_position)
end_offset = doc.cursor_pos_to_offset(view.cursor_pos)
#print original_position, view.cursor_pos, start_offset, end_offset
view.selection = Selection(doc, start_offset, end_offset)
def move(self):
raise NotImplementedError
class EditAction(Action):
"""Base class for all undoable actions."""
def __init__(self, view):
super(EditAction, self).__init__(view)
self.before_cursor_pos = None
self.before_last_x_pos = None
self.before_scroll_pos = None
self.after_cursor_pos = None
self.after_last_x_pos = None
self.after_scroll_pos = None
self.deltas = []
self.is_executed = False
def execute(self):
"""
Save positions so that we can return later and call self.do().
"""
self.is_executed = True
view = self.view
# for undo purposes
self.before_cursor_pos = view.cursor_pos
self.before_last_x_pos = view.last_x_pos
self.before_scroll_pos = view.scroll_pos
self.do()
# recalculate last_x_pos based on where the cursor is now
doc = view.document
y, x = view.cursor_pos
line = doc.get_line(y)
view.last_x_pos = char_pos_to_tab_pos(line, x, doc.tab_size)
# for redo purposes
self.after_cursor_pos = view.cursor_pos
self.after_last_x_pos = view.last_x_pos
self.after_scroll_pos = view.scroll_pos
view.invalidate()
def delete_selection(self):
"""
Common code for deleting a selection used by many edit actions.
"""
view = self.view
doc = view.document
# delete the selection
selection = view.selection.get_normalised()
d = DeleteDelta(doc, selection.start, selection.end-selection.start+1)
d.do()
self.deltas.append(d)
view.selection = None
# move the cursor (insert point) to the start of where the selection
# was before we deleted it
view.cursor_pos = doc.offset_to_cursor_pos(selection.start)
def do(self):
"""
Subclasses should implement this.
"""
raise NotImplementedError
def undo(self):
if not self.is_executed:
raise RuntimeError("Not executed")
for d in reversed(self.deltas):
d.undo()
# reset the cursor and scroll positions to where it was
self.view.cursor_pos = self.before_cursor_pos
self.view.last_x_pos = self.before_last_x_pos
self.view.scroll_pos = self.before_scroll_pos
self.view.invalidate()
def redo(self):
if not self.is_executed:
raise RuntimeError("Not executed")
for d in self.deltas:
d.do()
# reset the cursor and scroll positions to where it was
self.view.cursor_pos = self.after_cursor_pos
self.view.last_x_pos = self.after_last_x_pos
self.view.scroll_pos = self.after_scroll_pos
self.view.invalidate()
class ToggleComment(EditAction):
def __init__(self, view, comment_string):
self.comment_string = comment_string
super(ToggleComment, self).__init__(view)
def do(self):
view = self.view
doc = view.document
settings = self.editor.settings
if view.selection:
selection = view.selection.get_normalised()
from_line = doc.offset_to_cursor_pos(selection.start)[0]
to_line = doc.offset_to_cursor_pos(selection.end)[0]
else:
from_line = view.cursor_pos[0]
to_line = from_line
for y in xrange(from_line, to_line+1):
line = doc.get_line(y)
offset = doc.cursor_pos_to_offset((y, 0))
if line[:len(self.comment_string)] == self.comment_string:
d = DeleteDelta(doc, offset, len(self.comment_string))
else:
d = InsertDelta(doc, offset, self.comment_string)
d.do()
self.deltas.append(d)
# move the cursor if necessary
y, x = view.cursor_pos
line = doc.get_line(y)
if line[:len(self.comment_string)] == self.comment_string:
# we added comment_string, so increase cursor pos
if x != 0:
x += len(self.comment_string)
if x > len(line):
x = len(line)
view.cursor_pos = (y, x)
else:
# we removed comment_string, so decrease cursor pos
x -= len(self.comment_string)
if x < 0:
x = 0
view.cursor_pos = (y, x)
# not sure how best to grow/shrink the selection right now,
# so just destroying it for now
view.selection = None
|
lerouxb/ni
|
actions/base.py
|
Python
|
mit
| 6,191
| 0.002746
|
#!/usr/bin/env python
from glob import glob
from distutils.core import setup
setup( name="mythutils_recfail_alarm",
version="1.0",
description="Autoamtically notify on Recorder Failed via Prowl service",
author="Wylie Swanson",
author_email="wylie@pingzero.net",
url="http://www.pingzero.net",
scripts=glob("bin/*"),
data_files=[
( '/etc/mythutils/', glob('etc/mythutils/*') ),
( '/etc/cron.d/', glob('etc/cron.d/*') ),
]
)
|
wylieswanson/mythutils
|
mythutils_recfail_alarm/setup.py
|
Python
|
gpl-3.0
| 441
| 0.054422
|
##
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for xlmpich compiler toolchain (includes IBM XL compilers (xlc, xlf) and MPICH).
@author: Jack Perdue <j-perdue@tamu.edu> - TAMU HPRC - http://sc.tamu.edu
"""
from easybuild.toolchains.compiler.ibmxl import IBMXL
from easybuild.toolchains.mpi.mvapich2 import Mvapich2
class Xlompi(IBMXL, Mvapich2):
"""
Compiler toolchain with IBM XL compilers (xlc/xlf) and MPICH.
"""
NAME = 'xlmvapich2'
|
nesi/easybuild-framework
|
easybuild/toolchains/xlmvapich2.py
|
Python
|
gpl-2.0
| 573
| 0.001745
|
#from django.contrib import admin
from django.contrib.gis import admin
from modeltranslation.admin import TranslationAdmin, TranslationTabularInline
from django.contrib.contenttypes.generic import GenericTabularInline
from cigno.mdtools.models import Connection
from django.utils.translation import ugettext_lazy as _
from geonode.core.models import UserObjectRoleMapping
from django.http import HttpResponseRedirect
from models import *
# riferimento per resize-fields-in-django-admin
# http://stackoverflow.com/questions/910169/resize-fields-in-django-admin
translation_js = (
'/static/modeltranslation/js/force_jquery.js',
'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',
'/static/modeltranslation/js/tabbed_translation_fields.js',
)
translation_css = {
'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),
}
class ConnectionInline(GenericTabularInline):
model = Connection
ct_field = 'o_content_type'
ct_fk_field = 'o_object_id'
class InverseConnectionInline(GenericTabularInline):
model = Connection
ct_field = 'd_content_type'
ct_fk_field = 'd_object_id'
class OnlineResourceInline(admin.TabularInline):
model = OnlineResource
classes = ('collapse closed',)
class TemporalExtentInline(admin.TabularInline):
model = TemporalExtent
classes = ('collapse closed',)
extra = 1
class ResourceTemporalExtentInline(admin.TabularInline):
model = ResourceTemporalExtent
classes = ('collapse closed',)
class ReferenceDateInline(admin.TabularInline):
model = ReferenceDate
classes = ('collapse closed',)
extra = 1
class ResourceReferenceDateInline(admin.TabularInline):
model = ResourceReferenceDate
classes = ('collapse closed',)
extra = 1
class ConformityInline(admin.TabularInline):
model = Conformity
classes = ('collapse closed',)
extra = 1
class ResourceConformityInline(admin.TabularInline):
model = ResourceConformity
classes = ('collapse closed',)
extra = 1
class ResponsiblePartyRoleInline(admin.TabularInline):
model = ResponsiblePartyRole
classes = ('collapse closed',)
extra = 1
class ResourceResponsiblePartyRoleInline(admin.TabularInline):
model = ResourceResponsiblePartyRole
classes = ('collapse closed',)
extra = 1
class MdResponsiblePartyRoleInline(admin.TabularInline):
model = MdResponsiblePartyRole
#exclude = ('role',)
readonly_fields = ('role',)
classes = ('collapse closed',)
extra = 1
class ResourceMdResponsiblePartyRoleInline(admin.TabularInline):
model = ResourceMdResponsiblePartyRole
#exclude = ('role',)
readonly_fields = ('role',)
classes = ('collapse closed',)
extra = 1
class BaseCodeAdmin(TranslationAdmin):
list_editable = ['label',]
list_display = ['id', 'label']
class Media:
js = translation_js
css = translation_css
class BaseCodeIsoAdmin(TranslationAdmin):
list_editable = ['label','isoid']
list_display = ['id', 'label', 'isoid']
class Media:
js = translation_js
css = translation_css
class CodeRefSysAdmin(TranslationAdmin):
list_editable = ['label', 'srid']
list_display = ['id', 'label', 'srid']
class Media:
js = translation_js
css = translation_css
class CodeLicenseAdmin(TranslationAdmin):
list_editable = ['label', 'abstract']
list_display = ['id', 'label', 'abstract']
class Media:
js = translation_js
css = translation_css
class CodeDistributionFormatAdmin(TranslationAdmin):
list_editable = ['format','label', 'version', 'mimetype', 'ordering']
list_display = ['id', 'format', 'label', 'version', 'mimetype', 'ordering']
class Media:
js = translation_js
css = translation_css
class ResponsiblePartyAdmin(TranslationAdmin):
# list_editable = ['label', 'version', 'ordering']
# list_display = ['id', 'label', 'version', 'ordering']
class Media:
js = translation_js
css = translation_css
class LayerExtAdmin(TranslationAdmin):
# row-level permissions
# http://www.ibm.com/developerworks/opensource/library/os-django-admin/index.html
def queryset(self, request):
qs = super(LayerExtAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(id__in = UserObjectRoleMapping.objects.filter(user=request.user,
role__codename__in =('layer_readwrite','layer_admin')
).values_list('object_id',flat=True)
)
list_display = ('titleml',)
inlines = [ # OnlineResourceInline,
TemporalExtentInline,
ReferenceDateInline,
ConformityInline,
ResponsiblePartyRoleInline,
MdResponsiblePartyRoleInline,
# ConnectionInline,
# InverseConnectionInline,
]
#raw_id_fields = ("parent_identifier",)
filter_horizontal = ['presentation_form','spatial_representation_type_ext','topic_category_ext','responsible_party_role','distribution_format','md_responsible_party_role']
# filter_horizontal
#readonly_fields = ['uuid', 'geographic_bounding_box']
# readonly_fields = ['uuid', 'md_uuid', 'geographic_bounding_box', 'md_standard_name', 'md_version_name', 'md_character_set']
search_fields = ['titleml', 'abstractml']
search_fields_verbose = ['Titolo', 'Descrizione'] #GRAPPELLI
list_filter = ('resource_type', 'spatial_representation_type_ext', 'topic_category', 'distribution_format')
list_display = ('id', 'titleml', 'inspire', 'completeness_bar')
fieldsets = (
(_('Metadata'), {
'classes': ('collapse closed',),
'fields': (
'md_uuid',
#'lingua_metadata',
'md_date_stamp',
('md_character_set', 'md_standard_name', 'md_version_name')
)
}),
(_('Identification'), {
'classes': ('collapse closed',),
'fields': (
'titleml', 'abstractml', # 'source_document', # override by resources connections
#'resource_type', 'parent_identifier', 'other_citation_details',
'other_citation_details',
'presentation_form',
'distribution_format'
)
}),
(_('Identification2'), {
'classes': ('collapse closed',),
'fields': (
('resource_type', 'uuid'),
('language', 'character_set'),
'supplemental_information_ml',
'update_frequency',
'spatial_representation_type_ext'
)
}),
(_('Responsible Party'), {
'classes': ('collapse closed',),
'fields': []
}),
(_('Classification e Keywords'), {
'classes': ('collapse closed',),
'fields': (
'inspire', 'topic_category_ext', 'gemetkeywords'
)
}),
(_('Geographic extent'), {
'classes': ('collapse',),
'fields': (
('ref_sys', 'geographic_bounding_box'),
#'geo',
('vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent')
)
}),
(_('Temporal extent'), {
'classes': ('collapse',),
'fields': []
}),
(_('DataQuality'), {
'classes': ('collapse closed',),
'fields': (
'lineage', ('equivalent_scale', 'distance', 'uom_distance')
)
}),
(_('Conformity'), {
'classes': ('collapse closed',),
'fields': []
}),
# ('Distribution', {
# 'classes': ('collapse closed',),
# 'fields': (
# )
# }),
(_('Constraints'), {
'classes': ('collapse closed',),
'fields': (
'license',
'use_limitation',
('access_constraints', 'use_constraints'),
'other_constraints',
'security_constraints',
)
}),
# ('Relations', {
# 'classes': ('collapse closed',),
# 'fields': []
# }),
#('Sezione sistema - non compilabile', {
# 'classes': ('collapse closed',),
# 'fields': (
# 'geonode_tipo_layer',
# )
# }),
)
class Media:
js = translation_js
css = translation_css
def response_change(self, request, obj):
res = super(LayerExtAdmin, self).response_change(request, obj)
if request.POST.has_key("_save"):
return HttpResponseRedirect(obj.get_absolute_url())
else:
return res
class ResourceAdmin(TranslationAdmin):
# row-level permissions
# http://www.ibm.com/developerworks/opensource/library/os-django-admin/index.html
def queryset(self, request):
qs = super(ResourceAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(id__in = UserObjectRoleMapping.objects.filter(user=request.user,
role__codename__in =('resource_readwrite','resource_admin')
).values_list('object_id',flat=True)
)
list_display = ('titleml',)
inlines = [ # OnlineResourceInline,
ResourceTemporalExtentInline,
ResourceReferenceDateInline,
ResourceConformityInline,
ResourceResponsiblePartyRoleInline,
ResourceMdResponsiblePartyRoleInline,
# ConnectionInline,
# InverseConnectionInline,
]
#raw_id_fields = ("parent_identifier",)
filter_horizontal = ['presentation_form','spatial_representation_type_ext','topic_category_ext','responsible_party_role','distribution_format','md_responsible_party_role']
# filter_horizontal
#readonly_fields = ['uuid', 'geographic_bounding_box']
#readonly_fields = ['uuid', 'md_uuid', 'geographic_bounding_box', 'md_standard_name', 'md_version_name', 'md_character_set']
readonly_fields = ['uuid', 'md_uuid', 'md_standard_name', 'md_version_name', 'md_character_set']
search_fields = ['titleml', 'abstractml']
search_fields_verbose = ['Titolo', 'Descrizione'] #GRAPPELLI
list_filter = ('resource_type', 'spatial_representation_type_ext', 'topic_category', 'distribution_format')
list_display = ('id', 'titleml', 'inspire') #, 'completeness_bar')
list_editable = ['titleml',]
fieldsets = (
(_('Metadata'), {
'classes': ('collapse closed',),
'fields': (
'md_uuid',
#'lingua_metadata',
'md_date_stamp',
('md_character_set', 'md_standard_name', 'md_version_name')
)
}),
(_('Identification'), {
'classes': ('collapse closed',),
'fields': (
'titleml', 'abstractml', # 'source_document', # override by resources connections
#'resource_type', 'parent_identifier', 'other_citation_details',
'other_citation_details',
'presentation_form',
'distribution_format'
)
}),
(_('Identification2'), {
'classes': ('collapse closed',),
'fields': (
('resource_type', 'uuid'),
('language', 'character_set'),
'supplemental_information_ml',
'update_frequency',
'spatial_representation_type_ext'
)
}),
(_('Responsible Party'), {
'classes': ('collapse closed',),
'fields': []
}),
(_('Classification e Keywords'), {
'classes': ('collapse closed',),
'fields': (
'inspire', 'topic_category_ext', 'gemetkeywords'
)
}),
(_('Geographic extent'), {
'classes': ('collapse',),
'fields': (
#('ref_sys', 'geographic_bounding_box'),
#'geo',
('vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent')
)
}),
(_('Temporal extent'), {
'classes': ('collapse',),
'fields': []
}),
(_('DataQuality'), {
'classes': ('collapse closed',),
'fields': (
'lineage', ('equivalent_scale', 'distance', 'uom_distance')
)
}),
(_('Conformity'), {
'classes': ('collapse closed',),
'fields': []
}),
# ('Distribution', {
# 'classes': ('collapse closed',),
# 'fields': (
# )
# }),
(_('Constraints'), {
'classes': ('collapse closed',),
'fields': (
'license',
'use_limitation',
('access_constraints', 'use_constraints'),
'other_constraints',
'security_constraints',
)
}),
# ('Relations', {
# 'classes': ('collapse closed',),
# 'fields': []
# }),
#('Sezione sistema - non compilabile', {
# 'classes': ('collapse closed',),
# 'fields': (
# 'geonode_tipo_layer',
# )
# }),
)
class Media:
js = translation_js
css = translation_css
def response_change(self, request, obj):
res = super(ResourceAdmin, self).response_change(request, obj)
if request.POST.has_key("_save"):
return HttpResponseRedirect(obj.get_absolute_url())
else:
return res
admin.site.register(DcCodeResourceType, BaseCodeAdmin)
admin.site.register(CodeScope, BaseCodeIsoAdmin)
admin.site.register(CodeTopicCategory, BaseCodeIsoAdmin)
admin.site.register(CodePresentationForm, BaseCodeIsoAdmin)
admin.site.register(CodeSpatialRepresentationType, BaseCodeIsoAdmin)
admin.site.register(CodeRefSys, CodeRefSysAdmin)
admin.site.register(CodeLicense, CodeLicenseAdmin)
admin.site.register(CodeCharacterSet, BaseCodeIsoAdmin)
admin.site.register(CodeVerticalDatum, BaseCodeAdmin)
admin.site.register(CodeMaintenanceFrequency, BaseCodeIsoAdmin)
admin.site.register(CodeSampleFrequency, BaseCodeIsoAdmin)
admin.site.register(CodeRestriction, BaseCodeIsoAdmin)
admin.site.register(CodeClassification, BaseCodeIsoAdmin)
admin.site.register(CodeTitle, BaseCodeAdmin)
admin.site.register(CodeDateType, BaseCodeIsoAdmin)
admin.site.register(CodeRole, BaseCodeIsoAdmin)
admin.site.register(CodeDistributionFormat, CodeDistributionFormatAdmin)
admin.site.register(ResponsibleParty, ResponsiblePartyAdmin)
admin.site.register(LayerExt, LayerExtAdmin)
admin.site.register(Resource, ResourceAdmin)
class ConnectionTypeAdmin(admin.ModelAdmin):
list_display = ('id', 'url', 'label', 'code', 'inverse')
list_editable = ('url', 'label', 'code', 'inverse')
pass
admin.site.register(ConnectionType, ConnectionTypeAdmin)
admin.site.register(Connection)
|
CIGNo-project/CIGNo
|
cigno/metadata/admin.py
|
Python
|
gpl-3.0
| 16,379
| 0.01044
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio Previewer Utilities."""
import cchardet
from flask import current_app
def detect_encoding(fp, default=None):
"""Detect the cahracter encoding of a file.
:param fp: Open Python file pointer.
:param default: Fallback encoding to use.
:returns: The detected encoding.
.. note:: The file pointer is returned at its original read position.
"""
init_pos = fp.tell()
try:
sample = fp.read(
current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024))
# Result contains 'confidence' and 'encoding'
result = cchardet.detect(sample)
threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9)
if result.get('confidence', 0) > threshold:
return result.get('encoding', default)
else:
return default
except Exception:
current_app.logger.warning('Encoding detection failed.', exc_info=True)
return default
finally:
fp.seek(init_pos)
|
hachreak/invenio-previewer
|
invenio_previewer/utils.py
|
Python
|
gpl-2.0
| 1,963
| 0
|
#########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify_rest_client import exceptions
from base_test import BaseServerTestCase
class ProviderContextTestCase(BaseServerTestCase):
def test_post_provider_context(self):
result = self.post('/provider/context', data={
'name': 'test_provider',
'context': {'key': 'value'}
})
self.assertEqual(result.status_code, 201)
self.assertEqual(result.json['status'], 'ok')
def test_get_provider_context(self):
self.test_post_provider_context()
result = self.get('/provider/context').json
self.assertEqual(result['context']['key'], 'value')
self.assertEqual(result['name'], 'test_provider')
def test_post_provider_context_twice_fails(self):
self.test_post_provider_context()
self.assertRaises(self.failureException,
self.test_post_provider_context)
def test_update_provider_context(self):
self.test_post_provider_context()
new_context = {'key': 'new-value'}
self.client.manager.update_context(
'test_provider', new_context)
context = self.client.manager.get_context()
self.assertEqual(context['context'], new_context)
def test_update_empty_provider_context(self):
try:
self.client.manager.update_context(
'test_provider',
{'key': 'value'})
self.fail('Expected failure due to existing context')
except exceptions.CloudifyClientError as e:
self.assertEqual(e.status_code, 404)
self.assertEqual(e.message, 'Provider Context not found')
|
konradxyz/cloudify-manager
|
rest-service/manager_rest/test/test_provider_context.py
|
Python
|
apache-2.0
| 2,268
| 0
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.gcp.hooks.text_to_speech import CloudTextToSpeechHook
from tests.compat import PropertyMock, patch
from tests.gcp.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
INPUT = {"text": "test text"}
VOICE = {"language_code": "en-US", "ssml_gender": "FEMALE"}
AUDIO_CONFIG = {"audio_encoding": "MP3"}
class TestTextToSpeechHook(unittest.TestCase):
def setUp(self):
with patch(
"airflow.gcp.hooks.base.CloudBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.gcp_text_to_speech_hook = CloudTextToSpeechHook(gcp_conn_id="test")
@patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook.client_info", new_callable=PropertyMock)
@patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook._get_credentials")
@patch("airflow.gcp.hooks.text_to_speech.TextToSpeechClient")
def test_text_to_speech_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.gcp_text_to_speech_hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.gcp_text_to_speech_hook._client, result)
@patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook.get_conn")
def test_synthesize_speech(self, get_conn):
synthesize_method = get_conn.return_value.synthesize_speech
synthesize_method.return_value = None
self.gcp_text_to_speech_hook.synthesize_speech(
input_data=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG
)
synthesize_method.assert_called_once_with(
input_=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG, retry=None, timeout=None
)
|
Fokko/incubator-airflow
|
tests/gcp/hooks/test_text_to_speech.py
|
Python
|
apache-2.0
| 2,693
| 0.001857
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .completion import Person
def test_person_suggests_on_all_variants_of_name(write_client):
Person.init(using=write_client)
Person(name='Honza Král', popularity=42).save(refresh=True)
s = Person.search().suggest('t', 'kra', completion={'field': 'suggest'})
response = s.execute()
opts = response.suggest.t[0].options
assert 1 == len(opts)
assert opts[0]._score == 42
assert opts[0]._source.name == 'Honza Král'
|
3lnc/elasticsearch-dsl-py
|
test_elasticsearch_dsl/test_integration/test_examples/test_completion.py
|
Python
|
apache-2.0
| 518
| 0.001938
|
#this module here is to compute the formula to calculate the new means and
#new variance.
def update(mean1, var1, mean2, var2):
new_mean = ((mean1 * var2) + (mean2*var1))/(var1 + var2)
new_var = 1/(1/var1 + 1/var2)
return [new_mean, new_var]
def predict(mean1, var1, mean2, var2):
new_mean = mean1 + mean2
new_var = var1 + var2
return [new_mean, new_var]
print 'update : 'update(10.,4., 12.,4.)
print predict(10.,4., 12.,4.)
|
napjon/moocs_solution
|
robotics-udacity/2.4.py
|
Python
|
mit
| 465
| 0.017204
|
"""Access and control log capturing."""
import logging
import os
import re
import sys
from contextlib import contextmanager
from io import StringIO
from pathlib import Path
from typing import AbstractSet
from typing import Dict
from typing import Generator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import TypeVar
from typing import Union
from _pytest import nodes
from _pytest._io import TerminalWriter
from _pytest.capture import CaptureManager
from _pytest.compat import final
from _pytest.compat import nullcontext
from _pytest.config import _strtobool
from _pytest.config import Config
from _pytest.config import create_terminal_writer
from _pytest.config import hookimpl
from _pytest.config import UsageError
from _pytest.config.argparsing import Parser
from _pytest.deprecated import check_ispytest
from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureRequest
from _pytest.main import Session
from _pytest.store import StoreKey
from _pytest.terminal import TerminalReporter
DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
caplog_handler_key = StoreKey["LogCaptureHandler"]()
caplog_records_key = StoreKey[Dict[str, List[logging.LogRecord]]]()
def _remove_ansi_escape_sequences(text: str) -> str:
return _ANSI_ESCAPE_SEQ.sub("", text)
class ColoredLevelFormatter(logging.Formatter):
"""A logging formatter which colorizes the %(levelname)..s part of the
log format passed to __init__."""
LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = {
logging.CRITICAL: {"red"},
logging.ERROR: {"red", "bold"},
logging.WARNING: {"yellow"},
logging.WARN: {"yellow"},
logging.INFO: {"green"},
logging.DEBUG: {"purple"},
logging.NOTSET: set(),
}
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*s)")
def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._original_fmt = self._style._fmt
self._level_to_fmt_mapping: Dict[int, str] = {}
assert self._fmt is not None
levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
if not levelname_fmt_match:
return
levelname_fmt = levelname_fmt_match.group()
for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
formatted_levelname = levelname_fmt % {
"levelname": logging.getLevelName(level)
}
# add ANSI escape sequences around the formatted levelname
color_kwargs = {name: True for name in color_opts}
colorized_formatted_levelname = terminalwriter.markup(
formatted_levelname, **color_kwargs
)
self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
colorized_formatted_levelname, self._fmt
)
def format(self, record: logging.LogRecord) -> str:
fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
self._style._fmt = fmt
return super().format(record)
class PercentStyleMultiline(logging.PercentStyle):
"""A logging style with special support for multiline messages.
If the message of a record consists of multiple lines, this style
formats the message as if each line were logged separately.
"""
def __init__(self, fmt: str, auto_indent: Union[int, str, bool, None]) -> None:
super().__init__(fmt)
self._auto_indent = self._get_auto_indent(auto_indent)
@staticmethod
def _update_message(
record_dict: Dict[str, object], message: str
) -> Dict[str, object]:
tmp = record_dict.copy()
tmp["message"] = message
return tmp
@staticmethod
def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int:
"""Determine the current auto indentation setting.
Specify auto indent behavior (on/off/fixed) by passing in
extra={"auto_indent": [value]} to the call to logging.log() or
using a --log-auto-indent [value] command line or the
log_auto_indent [value] config option.
Default behavior is auto-indent off.
Using the string "True" or "on" or the boolean True as the value
turns auto indent on, using the string "False" or "off" or the
boolean False or the int 0 turns it off, and specifying a
positive integer fixes the indentation position to the value
specified.
Any other values for the option are invalid, and will silently be
converted to the default.
:param None|bool|int|str auto_indent_option:
User specified option for indentation from command line, config
or extra kwarg. Accepts int, bool or str. str option accepts the
same range of values as boolean config options, as well as
positive integers represented in str form.
:returns:
Indentation value, which can be
-1 (automatically determine indentation) or
0 (auto-indent turned off) or
>0 (explicitly set indentation position).
"""
if auto_indent_option is None:
return 0
elif isinstance(auto_indent_option, bool):
if auto_indent_option:
return -1
else:
return 0
elif isinstance(auto_indent_option, int):
return int(auto_indent_option)
elif isinstance(auto_indent_option, str):
try:
return int(auto_indent_option)
except ValueError:
pass
try:
if _strtobool(auto_indent_option):
return -1
except ValueError:
return 0
return 0
def format(self, record: logging.LogRecord) -> str:
if "\n" in record.message:
if hasattr(record, "auto_indent"):
# Passed in from the "extra={}" kwarg on the call to logging.log().
auto_indent = self._get_auto_indent(record.auto_indent) # type: ignore[attr-defined]
else:
auto_indent = self._auto_indent
if auto_indent:
lines = record.message.splitlines()
formatted = self._fmt % self._update_message(record.__dict__, lines[0])
if auto_indent < 0:
indentation = _remove_ansi_escape_sequences(formatted).find(
lines[0]
)
else:
# Optimizes logging by allowing a fixed indentation.
indentation = auto_indent
lines[0] = formatted
return ("\n" + " " * indentation).join(lines)
return self._fmt % record.__dict__
def get_option_ini(config: Config, *names: str):
for name in names:
ret = config.getoption(name) # 'default' arg won't work as expected
if ret is None:
ret = config.getini(name)
if ret:
return ret
def pytest_addoption(parser: Parser) -> None:
"""Add options to control log capturing."""
group = parser.getgroup("logging")
def add_option_ini(option, dest, default=None, type=None, **kwargs):
parser.addini(
dest, default=default, type=type, help="default value for " + option
)
group.addoption(option, dest=dest, **kwargs)
add_option_ini(
"--log-level",
dest="log_level",
default=None,
metavar="LEVEL",
help=(
"level of messages to catch/display.\n"
"Not set by default, so it depends on the root/parent log handler's"
' effective level, where it is "WARNING" by default.'
),
)
add_option_ini(
"--log-format",
dest="log_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-date-format",
dest="log_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
parser.addini(
"log_cli",
default=False,
type="bool",
help='enable log display during test run (also known as "live logging").',
)
add_option_ini(
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
)
add_option_ini(
"--log-cli-format",
dest="log_cli_format",
default=None,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-cli-date-format",
dest="log_cli_date_format",
default=None,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-file",
dest="log_file",
default=None,
help="path to a file when logging will be written to.",
)
add_option_ini(
"--log-file-level",
dest="log_file_level",
default=None,
help="log file logging level.",
)
add_option_ini(
"--log-file-format",
dest="log_file_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-file-date-format",
dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-auto-indent",
dest="log_auto_indent",
default=None,
help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.",
)
_HandlerType = TypeVar("_HandlerType", bound=logging.Handler)
# Not using @contextmanager for performance reasons.
class catching_logs:
"""Context manager that prepares the whole logging machinery properly."""
__slots__ = ("handler", "level", "orig_level")
def __init__(self, handler: _HandlerType, level: Optional[int] = None) -> None:
self.handler = handler
self.level = level
def __enter__(self):
root_logger = logging.getLogger()
if self.level is not None:
self.handler.setLevel(self.level)
root_logger.addHandler(self.handler)
if self.level is not None:
self.orig_level = root_logger.level
root_logger.setLevel(min(self.orig_level, self.level))
return self.handler
def __exit__(self, type, value, traceback):
root_logger = logging.getLogger()
if self.level is not None:
root_logger.setLevel(self.orig_level)
root_logger.removeHandler(self.handler)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
stream: StringIO
def __init__(self) -> None:
"""Create a new log handler."""
super().__init__(StringIO())
self.records: List[logging.LogRecord] = []
def emit(self, record: logging.LogRecord) -> None:
"""Keep the log records in a list in addition to the log text."""
self.records.append(record)
super().emit(record)
def reset(self) -> None:
self.records = []
self.stream = StringIO()
def handleError(self, record: logging.LogRecord) -> None:
if logging.raiseExceptions:
# Fail the test if the log message is bad (emit failed).
# The default behavior of logging is to print "Logging error"
# to stderr with the call stack and some extra details.
# pytest wants to make such mistakes visible during testing.
raise
@final
class LogCaptureFixture:
"""Provides access and control of log capturing."""
def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None:
check_ispytest(_ispytest)
self._item = item
self._initial_handler_level: Optional[int] = None
# Dict of log name -> log level.
self._initial_logger_levels: Dict[Optional[str], int] = {}
def _finalize(self) -> None:
"""Finalize the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
# Restore log levels.
if self._initial_handler_level is not None:
self.handler.setLevel(self._initial_handler_level)
for logger_name, level in self._initial_logger_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
@property
def handler(self) -> LogCaptureHandler:
"""Get the logging handler used by the fixture.
:rtype: LogCaptureHandler
"""
return self._item._store[caplog_handler_key]
def get_records(self, when: str) -> List[logging.LogRecord]:
"""Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:returns: The list of captured records at the given stage.
:rtype: List[logging.LogRecord]
.. versionadded:: 3.4
"""
return self._item._store[caplog_records_key].get(when, [])
@property
def text(self) -> str:
"""The formatted log text."""
return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
@property
def records(self) -> List[logging.LogRecord]:
"""The list of log records."""
return self.handler.records
@property
def record_tuples(self) -> List[Tuple[str, int, str]]:
"""A list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self) -> List[str]:
"""A list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for
interpolation, log messages in this list are all interpolated.
Unlike 'text', which contains the output from the handler, log
messages in this list are unadorned with levels, timestamps, etc,
making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or
the `exc_info` or `stack_info` arguments to the logging functions) is
not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
return [r.getMessage() for r in self.records]
def clear(self) -> None:
"""Reset the list of log records and the captured log text."""
self.handler.reset()
def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None:
"""Set the level of a logger for the duration of a test.
.. versionchanged:: 3.4
The levels of the loggers changed by this function will be
restored to their initial values at the end of the test.
:param int level: The level.
:param str logger: The logger to update. If not given, the root logger.
"""
logger_obj = logging.getLogger(logger)
# Save the original log-level to restore it during teardown.
self._initial_logger_levels.setdefault(logger, logger_obj.level)
logger_obj.setLevel(level)
if self._initial_handler_level is None:
self._initial_handler_level = self.handler.level
self.handler.setLevel(level)
@contextmanager
def at_level(
self, level: int, logger: Optional[str] = None
) -> Generator[None, None, None]:
"""Context manager that sets the level for capturing of logs. After
the end of the 'with' statement the level is restored to its original
value.
:param int level: The level.
:param str logger: The logger to update. If not given, the root logger.
"""
logger_obj = logging.getLogger(logger)
orig_level = logger_obj.level
logger_obj.setLevel(level)
handler_orig_level = self.handler.level
self.handler.setLevel(level)
try:
yield
finally:
logger_obj.setLevel(orig_level)
self.handler.setLevel(handler_orig_level)
@fixture
def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]:
"""Access and control log capturing.
Captured logs are available through the following properties/methods::
* caplog.messages -> list of format-interpolated log messages
* caplog.text -> string containing formatted log output
* caplog.records -> list of logging.LogRecord instances
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
result = LogCaptureFixture(request.node, _ispytest=True)
yield result
result._finalize()
def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[int]:
for setting_name in setting_names:
log_level = config.getoption(setting_name)
if log_level is None:
log_level = config.getini(setting_name)
if log_level:
break
else:
return None
if isinstance(log_level, str):
log_level = log_level.upper()
try:
return int(getattr(logging, log_level, log_level))
except ValueError as e:
# Python logging does not recognise this as a logging level
raise UsageError(
"'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name)
) from e
# run after terminalreporter/capturemanager are configured
@hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
class LoggingPlugin:
"""Attaches to the logging module and captures log messages for each test."""
def __init__(self, config: Config) -> None:
"""Create a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
"""
self._config = config
# Report logging.
self.formatter = self._create_formatter(
get_option_ini(config, "log_format"),
get_option_ini(config, "log_date_format"),
get_option_ini(config, "log_auto_indent"),
)
self.log_level = get_log_level_for_setting(config, "log_level")
self.caplog_handler = LogCaptureHandler()
self.caplog_handler.setFormatter(self.formatter)
self.report_handler = LogCaptureHandler()
self.report_handler.setFormatter(self.formatter)
# File logging.
self.log_file_level = get_log_level_for_setting(config, "log_file_level")
log_file = get_option_ini(config, "log_file") or os.devnull
if log_file != os.devnull:
directory = os.path.dirname(os.path.abspath(log_file))
if not os.path.isdir(directory):
os.makedirs(directory)
self.log_file_handler = _FileHandler(log_file, mode="w", encoding="UTF-8")
log_file_format = get_option_ini(config, "log_file_format", "log_format")
log_file_date_format = get_option_ini(
config, "log_file_date_format", "log_date_format"
)
log_file_formatter = logging.Formatter(
log_file_format, datefmt=log_file_date_format
)
self.log_file_handler.setFormatter(log_file_formatter)
# CLI/live logging.
self.log_cli_level = get_log_level_for_setting(
config, "log_cli_level", "log_level"
)
if self._log_cli_enabled():
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
capture_manager = config.pluginmanager.get_plugin("capturemanager")
# if capturemanager plugin is disabled, live logging still works.
self.log_cli_handler: Union[
_LiveLoggingStreamHandler, _LiveLoggingNullHandler
] = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
else:
self.log_cli_handler = _LiveLoggingNullHandler()
log_cli_formatter = self._create_formatter(
get_option_ini(config, "log_cli_format", "log_format"),
get_option_ini(config, "log_cli_date_format", "log_date_format"),
get_option_ini(config, "log_auto_indent"),
)
self.log_cli_handler.setFormatter(log_cli_formatter)
def _create_formatter(self, log_format, log_date_format, auto_indent):
# Color option doesn't exist if terminal plugin is disabled.
color = getattr(self._config.option, "color", "no")
if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
log_format
):
formatter: logging.Formatter = ColoredLevelFormatter(
create_terminal_writer(self._config), log_format, log_date_format
)
else:
formatter = logging.Formatter(log_format, log_date_format)
formatter._style = PercentStyleMultiline(
formatter._style._fmt, auto_indent=auto_indent
)
return formatter
def set_log_path(self, fname: str) -> None:
"""Set the filename parameter for Logging.FileHandler().
Creates parent directory if it does not exist.
.. warning::
This is an experimental API.
"""
fpath = Path(fname)
if not fpath.is_absolute():
fpath = self._config.rootpath / fpath
if not fpath.parent.exists():
fpath.parent.mkdir(exist_ok=True, parents=True)
stream = fpath.open(mode="w", encoding="UTF-8")
if sys.version_info >= (3, 7):
old_stream = self.log_file_handler.setStream(stream)
else:
old_stream = self.log_file_handler.stream
self.log_file_handler.acquire()
try:
self.log_file_handler.flush()
self.log_file_handler.stream = stream
finally:
self.log_file_handler.release()
if old_stream:
old_stream.close()
def _log_cli_enabled(self):
"""Return whether live logging is enabled."""
enabled = self._config.getoption(
"--log-cli-level"
) is not None or self._config.getini("log_cli")
if not enabled:
return False
terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter")
if terminal_reporter is None:
# terminal reporter is disabled e.g. by pytest-xdist.
return False
return True
@hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionstart(self) -> Generator[None, None, None]:
self.log_cli_handler.set_when("sessionstart")
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
@hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(self) -> Generator[None, None, None]:
self.log_cli_handler.set_when("collection")
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
@hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]:
if session.config.option.collectonly:
yield
return
if self._log_cli_enabled() and self._config.getoption("verbose") < 1:
# The verbose flag is needed to avoid messy test progress output.
self._config.option.verbose = 1
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield # Run all the tests.
@hookimpl
def pytest_runtest_logstart(self) -> None:
self.log_cli_handler.reset()
self.log_cli_handler.set_when("start")
@hookimpl
def pytest_runtest_logreport(self) -> None:
self.log_cli_handler.set_when("logreport")
def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]:
"""Implement the internals of the pytest_runtest_xxx() hooks."""
with catching_logs(
self.caplog_handler,
level=self.log_level,
) as caplog_handler, catching_logs(
self.report_handler,
level=self.log_level,
) as report_handler:
caplog_handler.reset()
report_handler.reset()
item._store[caplog_records_key][when] = caplog_handler.records
item._store[caplog_handler_key] = caplog_handler
yield
log = report_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
@hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]:
self.log_cli_handler.set_when("setup")
empty: Dict[str, List[logging.LogRecord]] = {}
item._store[caplog_records_key] = empty
yield from self._runtest_for(item, "setup")
@hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item: nodes.Item) -> Generator[None, None, None]:
self.log_cli_handler.set_when("call")
yield from self._runtest_for(item, "call")
@hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None, None, None]:
self.log_cli_handler.set_when("teardown")
yield from self._runtest_for(item, "teardown")
del item._store[caplog_records_key]
del item._store[caplog_handler_key]
@hookimpl
def pytest_runtest_logfinish(self) -> None:
self.log_cli_handler.set_when("finish")
@hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionfinish(self) -> Generator[None, None, None]:
self.log_cli_handler.set_when("sessionfinish")
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
@hookimpl
def pytest_unconfigure(self) -> None:
# Close the FileHandler explicitly.
# (logging.shutdown might have lost the weakref?!)
self.log_file_handler.close()
class _FileHandler(logging.FileHandler):
"""A logging FileHandler with pytest tweaks."""
def handleError(self, record: logging.LogRecord) -> None:
# Handled by LogCaptureHandler.
pass
class _LiveLoggingStreamHandler(logging.StreamHandler):
"""A logging StreamHandler used by the live logging feature: it will
write a newline before the first log message in each test.
During live logging we must also explicitly disable stdout/stderr
capturing otherwise it will get captured and won't appear in the
terminal.
"""
# Officially stream needs to be a IO[str], but TerminalReporter
# isn't. So force it.
stream: TerminalReporter = None # type: ignore
def __init__(
self,
terminal_reporter: TerminalReporter,
capture_manager: Optional[CaptureManager],
) -> None:
logging.StreamHandler.__init__(self, stream=terminal_reporter) # type: ignore[arg-type]
self.capture_manager = capture_manager
self.reset()
self.set_when(None)
self._test_outcome_written = False
def reset(self) -> None:
"""Reset the handler; should be called before the start of each test."""
self._first_record_emitted = False
def set_when(self, when: Optional[str]) -> None:
"""Prepare for the given test phase (setup/call/teardown)."""
self._when = when
self._section_name_shown = False
if when == "start":
self._test_outcome_written = False
def emit(self, record: logging.LogRecord) -> None:
ctx_manager = (
self.capture_manager.global_and_fixture_disabled()
if self.capture_manager
else nullcontext()
)
with ctx_manager:
if not self._first_record_emitted:
self.stream.write("\n")
self._first_record_emitted = True
elif self._when in ("teardown", "finish"):
if not self._test_outcome_written:
self._test_outcome_written = True
self.stream.write("\n")
if not self._section_name_shown and self._when:
self.stream.section("live log " + self._when, sep="-", bold=True)
self._section_name_shown = True
super().emit(record)
def handleError(self, record: logging.LogRecord) -> None:
# Handled by LogCaptureHandler.
pass
class _LiveLoggingNullHandler(logging.NullHandler):
"""A logging handler used when live logging is disabled."""
def reset(self) -> None:
pass
def set_when(self, when: str) -> None:
pass
def handleError(self, record: logging.LogRecord) -> None:
# Handled by LogCaptureHandler.
pass
|
nicoddemus/pytest
|
src/_pytest/logging.py
|
Python
|
mit
| 29,805
| 0.001309
|
#! /usr/bin/env python
"""
this file converts simple html text into a docbook xml variant.
The mapping of markups and links is far from perfect. But all we
want is the docbook-to-pdf converter and similar technology being
present in the world of docbook-to-anything converters. """
from datetime import date
import match
import sys
m = match.Match
class htm2dbk_conversion_base:
regexlist = [
m()("</[hH]2>(.*)", "m") >> "</title>\n<subtitle>\\1</subtitle>",
m()("<[hH]2>") >> "<sect1 id=\"--filename--\"><title>",
m()("<[Pp]([> ])","m") >> "<para\\1",
m()("</[Pp]>") >> "</para>",
m()("<(pre|PRE)>") >> "<screen>",
m()("</(pre|PRE)>") >> "</screen>",
m()("<[hH]3>") >> "<sect2><title>",
m()("</[hH]3>((?:.(?!<sect2>))*.?)", "s") >> "</title>\\1</sect2>",
m()("<!doctype [^<>]*>","s") >> "",
m()("<!DOCTYPE [^<>]*>","s") >> "",
m()("(<\w+\b[^<>]*\swidth=)(\d+\%)","s") >> "\\1\"\\2\"",
m()("(<\w+\b[^<>]*\s\w+=)(\d+)","s") >> "\\1\"\\2\"",
m()("&&") >> "\&\;\&\;",
m()("\$\<") >> "\$\<\;",
m()("&(\w+[\),])") >> "\&\;\\1",
m()("(</?)span(\s[^<>]*)?>","s") >> "\\1phrase\\2>",
m()("(</?)small(\s[^<>]*)?>","s") >> "\\1note\\2>",
m()("(</?)(b|em|i)>")>> "\\1emphasis>",
m()("(</?)(li)>") >> "\\1listitem>",
m()("(</?)(ul)>") >> "\\1itemizedlist>",
m()("(</?)(ol)>") >> "\\1orderedlist>",
m()("(</?)(dl)>") >> "\\1variablelist>",
m()("<dt\b([^<>]*)>","s") >> "<varlistentry\\1><term>",
m()("</dt\b([^<>]*)>","s") >> "</term>",
m()("<dd\b([^<>]*)>","s") >> "<listitem\\1>",
m()("</dd\b([^<>]*)>","s") >> "</listitem></varlistentry>",
m()("<table\b([^<>]*)>","s")
>> "<informaltable\\1><tgroup cols=\"2\"><tbody>",
m()("</table\b([^<>]*)>","s") >> "</tbody></tgroup></informaltable>",
m()("(</?)tr(\s[^<>]*)?>","s") >> "\\1row\\2>",
m()("(</?)td(\s[^<>]*)?>","s") >> "\\1entry\\2>",
m()("<informaltable\b[^<>]*>\s*<tgroup\b[^<>]*>\s*<tbody>"+
"\s*<row\b[^<>]*>\s*<entry\b[^<>]*>\s*<informaltable\b","s")
>> "<informaltable",
m()("</informaltable>\s*</entry>\s*</row>"+
"\s*</tbody>\s*</tgroup>\s*</informaltable>", "s")
>> "</informaltable>",
m()("(<informaltable[^<>]*\swidth=\"100\%\")","s") >> "\\1 pgwide=\"1\"",
m()("(<tbody>\s*<row[^<>]*>\s*<entry[^<>]*\s)(width=\"50\%\")","s")
>> "<colspec colwidth=\"1*\" /><colspec colwidth=\"1*\" />\n\\1\\2",
m()("<nobr>([\'\`]*)<tt>") >> "<cmdsynopsis>\\1",
m()("</tt>([\'\`]*)</nobr>") >> "\\1</cmdsynopsis>",
m()("<nobr><(?:tt|code)>([\`\"\'])") >> "<cmdsynopsis>\\1",
m()("<(?:tt|code)><nobr>([\`\"\'])") >> "<cmdsynopsis>\\1",
m()("([\`\"\'])</(?:tt|code)></nobr>") >> "\\1</cmdsynopsis>",
m()("([\`\"\'])</nobr></(?:tt|code)>") >> "\\1</cmdsynopsis>",
m()("(</?)tt>") >> "\\1constant>",
m()("(</?)code>") >> "\\1literal>",
m()(">([^<>]+)<br>","s") >> "><highlights>\\1</highlights>",
m()("<br>") >> "<br />",
# m()("<date>") >> "<sect1info><date>",
# m()("</date>") >> "</date></sect1info>",
m()("<reference>") >> "<reference id=\"reference\">" >> 1,
m()("<a\s+href=\"((?:http|ftp|mailto):[^<>]+)\"\s*>((?:.(?!</a>))*.)</a>"
,"s") >> "<ulink url=\"\\1\">\\2</ulink>",
m()("<a\s+href=\"zziplib.html\#([\w_]+)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<link linkend=\"$1\">$2</link>",
m()("<a\s+href=\"(zziplib.html)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<link linkend=\"reference\">$2</link>",
m()("<a\s+href=\"([\w-]+[.]html)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<link linkend=\"\\1\">\\2</link>",
m()("<a\s+href=\"([\w-]+[.](?:h|c|am|txt))\"\s*>((?:.(?!</a>))*.)</a>"
,"s") >> "<ulink url=\"file:\\1\">\\2</ulink>",
m()("<a\s+href=\"([A-Z0-9]+[.][A-Z0-9]+)\"\s*>((?:.(?!</a>))*.)</a>","s")
>> "<ulink url=\"file:\\1\">\\2</ulink>"
# m()("(</?)subtitle>") >> "\\1para>"
# $_ .= "</sect1>" if /<sect1[> ]/
]
regexlist2 = [
m()(r"<br\s*/?>") >> "",
m()(r"(</?)em>") >> r"\1emphasis>",
m()(r"<code>") >> "<userinput>",
m()(r"</code>") >> "</userinput>",
m()(r"<link>") >> "<function>",
m()(r"</link>") >> "</function>",
m()(r"(?s)\s*</screen>") >> "</screen>",
# m()(r"<ul>") >> "</para><programlisting>\n",
# m()(r"</ul>") >> "</programlisting><para>",
m()(r"<ul>") >> "<itemizedlist>",
m()(r"</ul>") >> "</itemizedlist>",
# m()(r"<li>") >> "",
# m()(r"</li>") >> ""
m()(r"<li>") >> "<listitem><para>",
m()(r"</li>") >> "</para></listitem>\n",
]
class htm2dbk_conversion(htm2dbk_conversion_base):
def __init__(self):
self.version = "" # str(date.today)
self.filename = "."
def convert(self,text): # $text
txt = text.replace("<!--VERSION-->", self.version)
for conv in self.regexlist:
txt &= conv
return txt.replace("--filename--", self.filename)
def convert2(self,text): # $text
txt = text.replace("<!--VERSION-->", self.version)
for conv in self.regexlist:
txt &= conv
return txt
class htm2dbk_document(htm2dbk_conversion):
""" create document, add(text) and get the value() """
doctype = (
'<!DOCTYPE book PUBLIC "-//OASIS//DTD'+
' DocBook XML V4.1.2//EN"'+"\n"+
' "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd">'+
"\n")
book_start = '<book><chapter><title>Documentation</title>'+"\n"
book_end_chapters = '</chapter>'+"\n"
book_end = '</book>'+"\n"
def __init__(self):
htm2dbk_conversion.__init__(self)
self.text = self.doctype + self.book_start
def add(self,text):
if self.text & m()("<reference"):
self.text += self.book_end_chapters ; self.book_end_chapters = ""
self.text += self.convert(text).replace(
"<br />","") & (
m()("<link>([^<>]*)</link>") >> "<function>\\1</function>") & (
m()("(?s)(<refentryinfo>\s*)<sect1info>" +
"(<date>[^<>]*</date>)</sect1info>") >> "\\1\\2")
def value(self):
return self.text + self.book_end_chapters + self.book_end
def htm2dbk_files(args):
doc = htm2dbk_document()
for filename in args:
try:
f = open(filename, "r")
doc.filename = filename
doc.add(f.read())
f.close()
except IOError, e:
print >> sys.stderr, "can not open "+filename
return doc.value()
def html2docbook(text):
""" the C comment may contain html markup - simulate with docbook tags """
return htm2dbk_conversion().convert2(text)
if __name__ == "__main__":
print htm2dbk_files(sys.argv[1:])
|
rivimey/rwmapmaker
|
zziplib/docs/zzipdoc/htm2dbk.py
|
Python
|
gpl-3.0
| 7,044
| 0.017888
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import pkg_resources
import setuptools
import clint
import requests
import requests_toolbelt
import pkginfo
import twine
from twine._installed import Installed
def _registered_commands(group='twine.registered_commands'):
registered_commands = pkg_resources.iter_entry_points(group=group)
return dict((c.name, c) for c in registered_commands)
def list_dependencies_and_versions():
return [
('pkginfo', Installed(pkginfo).version),
('requests', requests.__version__),
('setuptools', setuptools.__version__),
('requests-toolbelt', requests_toolbelt.__version__),
('clint', clint.__version__),
]
def dep_versions():
return ', '.join(
'{0}: {1}'.format(*dependency)
for dependency in list_dependencies_and_versions()
)
def dispatch(argv):
registered_commands = _registered_commands()
parser = argparse.ArgumentParser(prog="twine")
parser.add_argument(
"--version",
action="version",
version="%(prog)s version {0} ({1})".format(twine.__version__,
dep_versions()),
)
parser.add_argument(
"command",
choices=registered_commands.keys(),
)
parser.add_argument(
"args",
help=argparse.SUPPRESS,
nargs=argparse.REMAINDER,
)
args = parser.parse_args(argv)
main = registered_commands[args.command].load()
main(args.args)
|
sigmavirus24/twine
|
twine/cli.py
|
Python
|
apache-2.0
| 2,154
| 0
|
"""Solvers of systems of polynomial equations. """
from sympy.polys import Poly, groebner, roots
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyerrors import (ComputationFailed,
PolificationFailed, CoercionFailed)
from sympy.utilities import postfixes
from sympy.simplify import rcollect
from sympy.core import S
class SolveFailed(Exception):
"""Raised when solver's conditions weren't met. """
def solve_poly_system(seq, *gens, **args):
"""
Solve a system of polynomial equations.
Examples
========
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -sqrt(2)), (2, sqrt(2))]
"""
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('solve_poly_system', len(seq), exc)
if len(polys) == len(opt.gens) == 2:
f, g = polys
a, b = f.degree_list()
c, d = g.degree_list()
if a <= 2 and b <= 2 and c <= 2 and d <= 2:
try:
return solve_biquadratic(f, g, opt)
except SolveFailed:
pass
return solve_generic(polys, opt)
def solve_biquadratic(f, g, opt):
"""Solve a system of two bivariate quadratic polynomial equations.
Examples
========
>>> from sympy.polys import Options, Poly
>>> from sympy.abc import x, y
>>> from sympy.solvers.polysys import solve_biquadratic
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')
>>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(1/3, 3), (41/27, 11/9)]
>>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')
>>> b = Poly(-y + x - 4, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(-sqrt(29)/2 + 7/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + sqrt(29)/2)]
"""
G = groebner([f, g])
if len(G) == 1 and G[0].is_ground:
return None
if len(G) != 2:
raise SolveFailed
p, q = G
x, y = opt.gens
p = Poly(p, x, expand=False)
q = q.ltrim(-1)
p_roots = [ rcollect(expr, y) for expr in roots(p).keys() ]
q_roots = roots(q).keys()
solutions = []
for q_root in q_roots:
for p_root in p_roots:
solution = (p_root.subs(y, q_root), q_root)
solutions.append(solution)
return sorted(solutions)
def solve_generic(polys, opt):
"""
Solve a generic system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
References
==========
.. [Buchberger01] B. Buchberger, Groebner Bases: A Short
Introduction for Systems Theorists, In: R. Moreno-Diaz,
B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
February, 2001
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
and Algorithms, Springer, Second Edition, 1997, pp. 112
Examples
========
>>> from sympy.polys import Poly, Options
>>> from sympy.solvers.polysys import solve_generic
>>> from sympy.abc import x, y
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(x - y + 5, x, y, domain='ZZ')
>>> b = Poly(x + y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(-1, 4)]
>>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
>>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(11/3, 13/3)]
>>> a = Poly(x**2 + y, x, y, domain='ZZ')
>>> b = Poly(x + y*4, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(0, 0), (1/4, -1/16)]
"""
def _is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.monoms():
if any(m > 0 for m in monom[:-1]):
return False
return True
def _subs_root(f, gen, zero):
"""Replace generator with a root so that the result is nice. """
p = f.as_expr({gen: zero})
if f.degree(gen) >= 2:
p = p.expand(deep=False)
return p
def _solve_reduced_system(system, gens, entry=False):
"""Recursively solves reduced polynomial systems. """
if len(system) == len(gens) == 1:
zeros = roots(system[0], gens[-1]).keys()
return [ (zero,) for zero in zeros ]
basis = groebner(system, gens, polys=True)
if len(basis) == 1 and basis[0].is_ground:
if not entry:
return []
else:
return None
univariate = filter(_is_univariate, basis)
if len(univariate) == 1:
f = univariate.pop()
else:
raise NotImplementedError("only zero-dimensional systems supported (finite number of solutions)")
gens = f.gens
gen = gens[-1]
zeros = roots(f.ltrim(gen)).keys()
if not zeros:
return []
if len(basis) == 1:
return [ (zero,) for zero in zeros ]
solutions = []
for zero in zeros:
new_system = []
new_gens = gens[:-1]
for b in basis[:-1]:
eq = _subs_root(b, gen, zero)
if eq is not S.Zero:
new_system.append(eq)
for solution in _solve_reduced_system(new_system, new_gens):
solutions.append(solution + (zero,))
return solutions
try:
result = _solve_reduced_system(polys, opt.gens, entry=True)
except CoercionFailed:
raise NotImplementedError
if result is not None:
return sorted(result)
else:
return None
def solve_triangulated(polys, *gens, **args):
"""
Solve a polynomial system using Gianni-Kalkbrenner algorithm.
The algorithm proceeds by computing one Groebner basis in the ground
domain and then by iteratively computing polynomial factorizations in
appropriately constructed algebraic extensions of the ground domain.
Examples
========
>>> from sympy.solvers.polysys import solve_triangulated
>>> from sympy.abc import x, y, z
>>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1]
>>> solve_triangulated(F, x, y, z)
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
References
==========
1. Patrizia Gianni, Teo Mora, Algebraic Solution of System of
Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra,
Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989
"""
G = groebner(polys, gens, polys=True)
G = list(reversed(G))
domain = args.get('domain')
if domain is not None:
for i, g in enumerate(G):
G[i] = g.set_domain(domain)
f, G = G[0].ltrim(-1), G[1:]
dom = f.get_domain()
zeros = f.ground_roots()
solutions = set([])
for zero in zeros:
solutions.add(((zero,), dom))
var_seq = reversed(gens[:-1])
vars_seq = postfixes(gens[1:])
for var, vars in zip(var_seq, vars_seq):
_solutions = set([])
for values, dom in solutions:
H, mapping = [], zip(vars, values)
for g in G:
_vars = (var,) + vars
if g.has_only_gens(*_vars) and g.degree(var) != 0:
h = g.ltrim(var).eval(dict(mapping))
if g.degree(var) == h.degree():
H.append(h)
p = min(H, key=lambda h: h.degree())
zeros = p.ground_roots()
for zero in zeros:
if not zero.is_Rational:
dom_zero = dom.algebraic_field(zero)
else:
dom_zero = dom
_solutions.add(((zero,) + values, dom_zero))
solutions = _solutions
solutions = list(solutions)
for i, (solution, _) in enumerate(solutions):
solutions[i] = solution
return sorted(solutions)
|
flacjacket/sympy
|
sympy/solvers/polysys.py
|
Python
|
bsd-3-clause
| 9,192
| 0.001523
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('rh', '0001_initial'),
('estoque', '0005_auto_20141001_0953'),
('comercial', '0007_auto_20141006_1852'),
('almoxarifado', '0003_auto_20140917_0843'),
]
operations = [
migrations.CreateModel(
name='LinhaListaMaterial',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade_requisitada', models.DecimalField(max_digits=10, decimal_places=2)),
('quantidade_ja_atendida', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialDoContrato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.OneToOneField(null=True, blank=True, to='comercial.ContratoFechado')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entregue', models.BooleanField(default=False)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
('entregue_para', models.ForeignKey(related_name=b'entregue_para_set', to='rh.Funcionario')),
('entregue_por', models.ForeignKey(related_name=b'entregue_por_set', to='rh.Funcionario')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialEntregue'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialCompra'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialDoContrato'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
]
|
dudanogueira/microerp
|
microerp/almoxarifado/migrations/0004_auto_20141006_1957.py
|
Python
|
lgpl-3.0
| 6,177
| 0.004371
|
__author__ = "Martin Jakomin, Mateja Rojko"
"""
Classes for boolean operators:
- Var
- Neg
- Or
- And
- Const
Functions:
- nnf
- simplify
- cnf
- solve
- simplify_cnf
"""
import itertools
# functions
def nnf(f):
""" Returns negation normal form """
return f.nnf()
def simplify(f):
""" Simplifies the expression """
return nnf(f).simplify()
def cnf(f):
""" Returns conjunctive normal form """
return nnf(f).cnf().simplify()
def solve(f, v):
""" Solves the expression using the variable values v """
return f.solve(v)
def simplify_cnf(f, v):
""" Simplifies the cnf form using the variable values v """
return cnf(f).simplify_cnf(v).simplify()
# classes
class Var():
"""
Variable
"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def solve(self, v):
return v[self.name]
def simplify_cnf(self, v):
if self.name in v:
return Const(v[self.name])
else:
return self
def nnf(self):
return self
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return 1
class Neg():
"""
Negation operator
"""
def __init__(self,v):
self.value = v
def __str__(self):
return "~" + str(self.value.__str__())
def solve(self, v):
return not(self.value.solve(v))
def simplify_cnf(self, v):
if self.value.name in v:
return Const(not(v[self.value.name]))
else:
return self
def nnf(self):
v = self.value
if isinstance(v, Var):
return Neg(v)
elif isinstance(v, Neg):
return v.value.nnf()
elif isinstance(v, And):
return Or([Neg(x) for x in v.value]).nnf()
elif isinstance(v, Or):
return And([Neg(x) for x in v.value]).nnf()
elif isinstance(v, Const):
return v.negate()
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return self.value.length()
class And():
"""
And operator
"""
def __init__(self,lst):
self.value = lst
def __str__(self):
s = "("
for i in self.value:
s += str(i)+" & "
s = s[:len(s)-3]
return s + ")"
def solve(self, v):
for l in self.value:
if l.solve(v) is False:
return False
return True
def simplify_cnf(self, v):
return And([x.simplify_cnf(v) for x in self.value])
def nnf(self):
return And([x.nnf() for x in self.value])
def simplify(self):
s = [x.simplify() for x in self.value]
# And list flatten
ns = []
for x in s:
if isinstance(x, And):
ns.extend(x.value)
else:
ns.append(x)
s = ns
snames = [x.simplify().__str__() for x in s]
s2 = []
for i, x in enumerate(s):
if Neg(x).nnf().__str__() in snames[i+1:]:
return Const(False)
elif isinstance(x, Const):
if x.value is False:
return Const(False)
elif snames[i] not in snames[i+1:]:
s2.append(x)
if len(s2) < 1:
return Const(True)
elif len(s2) is 1:
return s2[0]
return And(s2)
def cnf(self):
return And([x.cnf().simplify() for x in self.value])
def length(self):
return sum([x.length() for x in self.value])
class Or():
"""
Or operator
"""
def __init__(self, lst):
self.value = lst
def __str__(self):
s = "("
for i in self.value:
s += str(i)+" | "
s = s[:len(s)-3]
return s + ")"
def solve(self, v):
for l in self.value:
if l.solve(v) is True:
return True
return False
def simplify_cnf(self, v):
return Or([x.simplify_cnf(v) for x in self.value])
def nnf(self):
return Or([x.nnf() for x in self.value])
def simplify(self):
s = [x.simplify() for x in self.value]
# Or list flatten
ns = []
for x in s:
if isinstance(x,Or):
ns.extend(x.value)
else:
ns.append(x)
s = ns
snames = [x.simplify().__str__() for x in s]
s2 = []
for i, x in enumerate(s):
if Neg(x).nnf().__str__() in snames[i+1:]:
return Const(True)
elif isinstance(x, Const):
if x.value is True:
return Const(True)
elif snames[i] not in snames[i+1:]:
s2.append(x)
if len(s2) < 1:
return Const(False)
elif len(s2) is 1:
return s2[0]
return Or(s2)
def cnf(self):
s = [x.cnf().simplify() for x in self.value]
s1 = [x.value if isinstance(x, And) else [x] for x in s]
s2 = []
for e in itertools.product(*s1):
s3 = []
for x in e:
if isinstance(x,Or):
s3.extend(x.value)
else:
s3.append(x)
s2.append(Or(s3))
if len(s2) is 1:
return s2[0]
return And(s2)
def length(self):
return sum([x.length() for x in self.value])
class Const():
"""
Constant
"""
def __init__(self, c):
self.value = c
def __str__(self):
return str(self.value)
def solve(self, v):
return self.value
def simplify_cnf(self, v):
return self
def nnf(self):
return self
def negate(self):
if self.value is True:
return Const(False)
return Const(True)
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return 1
|
MartinGHub/lvr-sat
|
SAT/bool.py
|
Python
|
bsd-3-clause
| 6,053
| 0.000991
|
from time import sleep
from tqdm import tqdm
import requests
url = "http://raw.githubusercontent.com/Alafazam/lecture_notes/master/Cormen%20.pdf"
response = requests.get(url, stream=True)
with open("10MB", "wb") as handle:
total_length = int(response.headers.get('content-length'))/1024
for data in tqdm(response.iter_content(chunk_size=1024),total=total_length, leave=True, unit='KB'):
handle.write(data)
# with open("10MB", 'wb') as f:
# r = requests.get(url, stream=True)
# for chunk in tqdm(r.iter_content()):
# f.write(chunk)
# from tqdm import tqdm
# for i in tqdm(range(10000)):
# sleep(0.01)
|
Alafazam/simple_projects
|
misc/test_tqdm.py
|
Python
|
mit
| 615
| 0.011382
|
from mock import patch
from tests import BaseTestCase
from redash.tasks import refresh_schemas
class TestRefreshSchemas(BaseTestCase):
def test_calls_refresh_of_all_data_sources(self):
self.factory.data_source # trigger creation
with patch(
"redash.tasks.queries.maintenance.refresh_schema.delay"
) as refresh_job:
refresh_schemas()
refresh_job.assert_called()
def test_skips_paused_data_sources(self):
self.factory.data_source.pause()
with patch(
"redash.tasks.queries.maintenance.refresh_schema.delay"
) as refresh_job:
refresh_schemas()
refresh_job.assert_not_called()
self.factory.data_source.resume()
with patch(
"redash.tasks.queries.maintenance.refresh_schema.delay"
) as refresh_job:
refresh_schemas()
refresh_job.assert_called()
|
alexanderlz/redash
|
tests/tasks/test_refresh_schemas.py
|
Python
|
bsd-2-clause
| 934
| 0
|
from pymongo import MongoClient
from passlib.app import custom_app_context as pwd
client = MongoClient( host = "db" )
ride_sharing = client.ride_sharing
users = ride_sharing.users
users.insert_one( {
'username' : 'sid',
'password_hash' : pwd.encrypt( 'test' ),
'role' : 'driver' } )
|
sidthakur/simple-user-management-api
|
user_auth/app/create_db.py
|
Python
|
gpl-3.0
| 299
| 0.040134
|
"""This module prints lists that may or may not contain nested lists"""
def print_lol(the_list):
"""This function takes a positional argument: called "the_list", which is any
Python list which may include nested lists. Each data item in the provided lists
recursively printed to the screen on its own line."""
for each_item in the_list:
if isinstance(each_item,list):
print_lol(each_item)
else:
print(each_item)
|
tdean1995/HFPythonSandbox
|
dist/nester/nester.py
|
Python
|
apache-2.0
| 481
| 0.008316
|
import os
import PIL
import math
import PIL
from PIL import Image
class MandelbrotImage:
def __init__(self, folder):
self.folder = folder
self.data_folder = os.path.join(folder, 'data')
self.image_folder = os.path.join(folder, 'image')
if not os.path.isdir(self.image_folder):
os.makedirs(self.image_folder)
def list_data_files(self):
fnames = [fname for fname in os.listdir(self.data_folder)]
fnames = [fname for fname in fnames if fname.endswith('.data')]
fnames.sort(key=lambda x: int(x.split(".")[0]))
return fnames
def data_file_to_data(self, filepath):
with open(os.path.join(self.data_folder, filepath)) as file:
data = file.read()
data = data.split(" ")
width, height, max_iterations, precision = data[:4]
data = data[4:]
return int(width), int(height), int(max_iterations), int(precision), data
def data_to_pixel_data(self, data, coloring_scheme):
pixel_data = []
for i in xrange(0, len(data), 3):
escape_time = data[i]
z_real = data[i+1]
z_imag = data[i+2]
color = coloring_scheme(escape_time, z_real, z_imag, max_iter)
pixel_data.append(color)
return pixel_data
def pixel_data_to_image(self, filename, pixel_data, width, height):
image = Image.new('RGB', (width, height))
image.putdata(pixel_data)
image.save(os.path.join(self.image_folder, filename))
def coloring(escape_time, z_real, z_imag, max_iterations):
escape_time = int(escape_time)
z_real = float(z_real)
z_imag = float(z_imag)
max_iterations = int(max_iterations)
if escape_time == max_iterations + 1:
return (255, 255, 255)
else:
q = escape_time - math.log(math.log((z_real ** 2 + z_imag ** 2))/(2*math.log(2)))
return (int(q*255./max_iterations), 0, 0)
f = "1"
A = MandelbrotImage("1")
for idx, file in enumerate(A.list_data_files()):
width, height, max_iter, precision, data = A.data_file_to_data(file)
pixel_data = A.data_to_pixel_data(data, coloring)
A.pixel_data_to_image("%s.png" % idx, pixel_data, width, height)
print "Done with file %s" % file
|
alansammarone/mandelbrot
|
mandelbrot_image.py
|
Python
|
gpl-3.0
| 2,037
| 0.025037
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/bio_engineer/bio_component/shared_bio_component_food_duration_2.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/bio_engineer/bio_component/shared_bio_component_food_duration_2.py
|
Python
|
mit
| 470
| 0.046809
|
'''
Created on Jan 19, 2013
@author: dsnowdon
'''
import os
import tempfile
import datetime
import json
import logging
from naoutil.jsonobj import to_json_string, from_json_string
from naoutil.general import find_class
import robotstate
from event import *
from action import *
from naoutil.naoenv import make_environment
'''
Here we define the memory locations used to store state
'''
MEM_SECURITY_DISTANCE = "WandererSecurityDistance"
MEM_HEADING = "WandererWalkHeading"
MEM_WALK_PATH = "WandererWalkPath"
MEM_DETECTED_FACE_DIRECTION = "WandererFaceDirection"
MEM_PLANNED_ACTIONS = "WandererActionsPlanned"
MEM_CURRENT_ACTIONS = "WandererActionsInProgress"
MEM_COMPLETED_ACTIONS = "WandererActionsCompleted"
MEM_CURRENT_EVENT = "WandererEvent"
MEM_MAP = "WandererMap"
MEM_LOCATION = "WandererLocation"
EVENT_LOOK_FOR_PEOPLE = "WandererEventLookForPeople"
DEFAULT_CONFIG_FILE = "wanderer"
PROPERTY_PLANNER_CLASS = "plannerClass"
DEFAULT_PLANNER_CLASS = "wanderer.randomwalk.RandomWalk"
PROPERTY_EXECUTOR_CLASS = "executorClass"
DEFAULT_EXECUTOR_CLASS = "wanderer.wanderer.PlanExecutor"
PROPERTY_MAPPER_CLASS = "mapperClass"
DEFAULT_MAPPER_CLASS = "wanderer.wanderer.NullMapper"
PROPERTY_UPDATER_CLASSES = "updaterClasses"
PROPERTY_HTTP_PORT = "httpPort"
DEFAULT_HTTP_PORT = 8080
PROPERTY_DATA_COLLECTOR_HOST = "dataCollectorHost"
PROPERTY_DATA_COLLECTOR_PORT = "dataCollectorPort"
PROPERTY_LOOK_FOR_PEOPLE = "lookForPeople"
STATIC_WEB_DIR = "web"
CENTRE_BIAS = False
HEAD_HORIZONTAL_OFFSET = 0
WANDERER_NAME = "wanderer"
# START GLOBALS
# We put instances of planners, executors and mappers here so we don't need to continually create
# new instances
planner_instance = None
executor_instance = None
mapper_instance = None
updater_instances = None
# END GLOBALS
wanderer_logger = logging.getLogger("wanderer.wanderer")
def init_state(env, startPos):
# declare events
env.memory.declareEvent(EVENT_LOOK_FOR_PEOPLE);
# getData & removeData throw errors if the value is not set,
# so ensure all the memory locations we want to use are initialised
env.memory.insertData(MEM_CURRENT_EVENT, None)
# set "security distance"
env.memory.insertData(MEM_SECURITY_DISTANCE, "0.25")
# should we look for people as we go?
lookForPeople = env.get_property(DEFAULT_CONFIG_FILE, PROPERTY_LOOK_FOR_PEOPLE)
if lookForPeople:
env.memory.raiseEvent(EVENT_LOOK_FOR_PEOPLE, True)
env.log("Looking for people")
else:
env.memory.raiseEvent(EVENT_LOOK_FOR_PEOPLE, False)
env.log("Not looking for people")
# set initial position (in list of positions)
env.memory.insertData(MEM_WALK_PATH, [startPos])
# current actions and completed actions
env.memory.insertData(MEM_PLANNED_ACTIONS, "")
env.memory.insertData(MEM_CURRENT_ACTIONS, "")
env.memory.insertData(MEM_COMPLETED_ACTIONS, "")
def shutdown(env):
planner = get_planner_instance(env)
planner.shutdown()
executor = get_executor_instance(env, None)
executor.shutdown()
mapper = get_mapper_instance(env)
mapper.shutdown()
updater_instances = get_updaters(env)
for updater in updater_instances:
updater.shutdown()
'''
Base class for wanderer planning.
Handles generating plans and reacting to events
'''
class Planner(object):
def __init__(self, env_):
super(Planner, self).__init__()
self.env = env_
def handleEvent(self, event, state):
plan = self.dispatch(event, state)
save_plan(self.env, plan)
log_plan(self.env, "New plan", plan)
return plan
# return true if this event should cause the current plan to be executed and
# a new plan created to react to it
def does_event_interrupt_plan(self, event, state):
return True
def dispatch(self, event, state):
methodName = 'handle'+ event.name()
try:
method = getattr(self, methodName)
return method(event, state)
except AttributeError:
self.env.log("Unimplemented event handler for: {}".format(event.name()))
def shutdown(self):
pass
'''
Base class for executing plans. Since we may need to trigger choreographe
boxes we delegate actually performing a single action to an actionExecutor
which in most cases will be the choreographe box that called us.
The actionExecutor must implement do_action(action) and all_done()
'''
class PlanExecutor(object):
def __init__(self, env, actionExecutor):
super(PlanExecutor, self).__init__()
self.env = env
self.actionExecutor = actionExecutor
def perform_next_action(self):
self.env.log("perform next action")
# save completed action to history if there is one
completedAction = get_current_action(self.env)
self.env.log("Completed action = {}".format(repr(completedAction)))
if not completedAction is None:
if not isinstance(completedAction, NullAction):
push_completed_action(self.env, completedAction)
# if we have moved, then save current location
if isinstance(completedAction, Move):
self._have_moved_wrapper()
self.env.log("set current action to NullAction")
# ensure that current action is cleared until we have another one
set_current_action(self.env, NullAction())
self.env.log("pop from plan")
# pop first action from plan
action = pop_planned_action(self.env)
if action is None:
self.env.log("No next action")
self.actionExecutor.all_done()
else:
self.env.log("Next action = {}".format(repr(action)))
set_current_action(self.env, action)
self.actionExecutor.do_action(action)
self.env.log("perform_next_action done")
# get current and previous positions and call have_moved
# it's not intended that this method be overridden
def _have_moved_wrapper(self):
self.env.log("Have moved")
pos = get_position(self.env)
lastPos = get_last_position(self.env)
self.have_moved(lastPos, pos)
save_waypoint(self.env, pos)
# hook for base classes to implement additional functionality
# after robot has moved
def have_moved(self, previousPos, currentPos):
pass
def save_position(self):
pos = get_position(self.env)
save_waypoint(self.env, pos)
def shutdown(self):
pass
'''
Abstract mapping class
'''
class AbstractMapper(object):
def __init__(self, env):
super(AbstractMapper, self).__init__()
self.env = env
# update map based on new sensor data
def update(self, position, sensors):
pass
# return the current map
def get_map(self):
return None
def shutdown(self):
pass
'''
Null mapper - does nothing, just a place holder for when no mapping is actually required
'''
class NullMapper(AbstractMapper):
def __init__(self, env):
super(NullMapper, self).__init__(env)
'''
Mapper that does no actual mapping, but logs all data to file for future analysis
'''
class FileLoggingMapper(AbstractMapper):
def __init__(self, env, save_data=True):
super(FileLoggingMapper, self).__init__(env)
self.save_data = save_data
if self.save_data:
self.open_data_file()
# save the data to file
def update(self, position, sensors):
if self.save_data:
self.save_update_data(position, sensors)
def open_data_file(self):
self.logFilename = tempfile.mktemp()
self.env.log("Saving sensor data to {}".format(self.logFilename))
self.first_write = True
try:
self.logFile = open(self.logFilename, 'r+')
except IOError:
self.env.log("Failed to open file: {}".format(self.logFilename))
self.logFile = None
def save_update_data(self, position, sensors):
if self.logFile:
data = { 'timestamp' : self.timestamp(),
'position' : position,
'leftSonar' : sensors.get_sensor('LeftSonar'),
'rightSonar' : sensors.get_sensor('RightSonar') }
jstr = json.dumps(data)
#self.env.log("Mapper.update: "+jstr)
if not self.first_write:
self.logFile.write(",\n")
self.logFile.write(jstr)
self.first_write = False
self.logFile.flush()
def timestamp(self):
return datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
# TODO should really block write access while doing this
def write_sensor_data_to_file(self, fp, buffer_size=1024):
if self.logFile:
self.logFile.seek(0)
fp.write('[\n')
while 1:
copy_buffer = self.logFile.read(buffer_size)
if copy_buffer:
fp.write(copy_buffer)
else:
break
fp.write(' ]\n')
self.logFile.seek(0, 2)
def shutdown(self):
if self.logFile:
self.logFile.close()
'''
Get the instance of the planner, creating an instance of the configured class if we don't already
have a planner instance
'''
def get_planner_instance(env):
global planner_instance
if not planner_instance:
fqcn = env.get_property(DEFAULT_CONFIG_FILE, PROPERTY_PLANNER_CLASS, DEFAULT_PLANNER_CLASS)
env.log("Creating a new planner instance of {}".format(fqcn))
klass = find_class(fqcn)
planner_instance = klass(env)
return planner_instance
'''
Get the instance of the plan executor, creating an instance of the class specified in the configuration
file if necessary.
'''
def get_executor_instance(env, actionExecutor):
global executor_instance
if not executor_instance:
fqcn = env.get_property(DEFAULT_CONFIG_FILE, PROPERTY_EXECUTOR_CLASS, DEFAULT_EXECUTOR_CLASS)
env.log("Creating a new executor instance of {}".format(fqcn))
klass = find_class(fqcn)
executor_instance = klass(env, actionExecutor)
# NOT THREAD SAFE
# even if we already had an instance of an executor the choreographe object might have become
# stale so we refresh it. We only have one executor instance at once so this should be OK
executor_instance.actionExecutor = actionExecutor
return executor_instance
'''
Get the instance of the mapper to use
'''
def get_mapper_instance(env):
global mapper_instance
if not mapper_instance:
fqcn = env.get_property(DEFAULT_CONFIG_FILE, PROPERTY_MAPPER_CLASS, DEFAULT_MAPPER_CLASS)
env.log("Creating a new mapper instance of {}".format(fqcn))
klass = find_class(fqcn)
mapper_instance = klass(env)
return mapper_instance
def run_updaters(env, position, sensors):
global wanderer_logger
# do the map update
mapper = get_mapper_instance(env)
if mapper:
try:
mapper.update(position, sensors)
except TypeError as e:
wanderer_logger.error("Error running mapper {0} update: {1}".format(repr(mapper), e))
# run any other updaters
updater_instances = get_updaters(env)
for updater in updater_instances:
try:
updater. update(position, sensors)
except TypeError as e:
wanderer_logger.error("Error running updater {0} update: {1}".format(repr(updater), e))
def get_updaters(env):
global updater_instances
if not updater_instances:
updater_instances = []
fqcns = env.get_property(DEFAULT_CONFIG_FILE, PROPERTY_UPDATER_CLASSES)
if fqcns:
for fqcn in fqcns:
env.log("Creating a new updater instance of {}".format(fqcn))
klass = find_class(fqcn)
updater = klass(env)
if updater:
updater_instances.append(updater)
return updater_instances
def make_wanderer_environment(box_):
env = make_environment(box_)
env.set_application_name(WANDERER_NAME)
return env
def load_event(env):
return from_json_string(env.memory.getData(MEM_CURRENT_EVENT))
def save_event(env, event):
env.memory.insertData(MEM_CURRENT_EVENT, to_json_string(event))
def load_plan(env):
return from_json_string(env.memory.getData(MEM_PLANNED_ACTIONS))
def save_plan(env, plan):
env.memory.insertData(MEM_PLANNED_ACTIONS, to_json_string(plan))
def load_completed_actions(env):
return from_json_string(env.memory.getData(MEM_COMPLETED_ACTIONS))
def save_completed_actions(env, actions):
env.memory.insertData(MEM_COMPLETED_ACTIONS, to_json_string(actions))
def pop_planned_action(env):
plan = load_plan(env)
action = None
if not plan is None:
if len(plan) > 0:
action = plan[0]
plan = plan[1:]
else:
plan = []
save_plan(env, plan)
return action
def get_current_action(env):
return from_json_string(env.memory.getData(MEM_CURRENT_ACTIONS))
def set_current_action(env, action):
env.memory.insertData(MEM_CURRENT_ACTIONS, to_json_string(action))
def push_completed_action(env, action):
actions = load_completed_actions(env)
if actions is None:
actions = []
actions.append(action)
save_completed_actions(env, actions)
def log_plan(env, msg, plan):
env.log(msg)
for p in plan:
env.log(str(p))
def save_direction(env, hRad):
env.memory.insertData(MEM_HEADING, hRad)
'''
Get the entire path
'''
def get_path(env):
return env.memory.getData(MEM_WALK_PATH)
def set_path(env, path):
env.memory.insertData(MEM_WALK_PATH, path)
'''
Get the last position the robot was at by looking at the path
'''
def get_last_position(env):
path = get_path(env)
pos = None
if not path is None:
try:
pos = path[-1]
except IndexError:
pass
return pos
'''
Get the current position of the robot
'''
def get_position(env):
# 1 = FRAME_WORLD
return env.motion.getPosition("Torso", 1, True)
def save_waypoint(env, waypoint):
path = get_path(env)
if path is None:
path = []
path.append(waypoint)
env.log("Path = "+str(path))
set_path(env, path)
|
davesnowdon/nao-wanderer
|
wanderer/src/main/python/wanderer/wanderer.py
|
Python
|
gpl-2.0
| 14,410
| 0.005968
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attivita', '0012_attivita_centrale_operativa'),
]
operations = [
migrations.AddField(
model_name='partecipazione',
name='centrale_operativa',
field=models.BooleanField(default=False, db_index=True),
),
]
|
CroceRossaItaliana/jorvik
|
attivita/migrations/0013_partecipazione_centrale_operativa.py
|
Python
|
gpl-3.0
| 448
| 0
|
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
# Copyright (C) 2020 UCLouvain
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""API for manipulating organisation."""
from functools import partial
from elasticsearch.exceptions import NotFoundError
from .models import OrganisationIdentifier, OrganisationMetadata
from ..api import IlsRecord, IlsRecordsIndexer, IlsRecordsSearch
from ..fetchers import id_fetcher
from ..item_types.api import ItemTypesSearch
from ..libraries.api import LibrariesSearch, Library
from ..minters import id_minter
from ..providers import Provider
from ..utils import sorted_pids
from ..vendors.api import Vendor, VendorsSearch
# provider
OrganisationProvider = type(
'OrganisationProvider',
(Provider,),
dict(identifier=OrganisationIdentifier, pid_type='org')
)
# minter
organisation_id_minter = partial(id_minter, provider=OrganisationProvider)
# fetcher
organisation_id_fetcher = partial(id_fetcher, provider=OrganisationProvider)
class OrganisationsSearch(IlsRecordsSearch):
"""Organisation search."""
class Meta:
"""Meta class."""
index = 'organisations'
doc_types = None
fields = ('*', )
facets = {}
default_filter = None
def get_record_by_viewcode(self, viewcode, fields=None):
"""Search by viewcode."""
query = self.filter('term', code=viewcode).extra(size=1)
if fields:
query = query.source(includes=fields)
response = query.execute()
if response.hits.total.value != 1:
raise NotFoundError(
f'Organisation viewcode {viewcode}: Result not found.')
return response.hits.hits[0]._source
class Organisation(IlsRecord):
"""Organisation class."""
minter = organisation_id_minter
fetcher = organisation_id_fetcher
provider = OrganisationProvider
model_cls = OrganisationMetadata
@classmethod
def get_all(cls):
"""Get all organisations."""
return sorted([
Organisation.get_record_by_id(_id)
for _id in Organisation.get_all_ids()
], key=lambda org: org.get('name'))
@classmethod
def all_code(cls):
"""Get all code."""
return [org.get('code') for org in cls.get_all()]
@classmethod
def get_record_by_viewcode(cls, viewcode):
"""Get record by view code."""
result = OrganisationsSearch().filter(
'term',
code=viewcode
).execute()
if result['hits']['total']['value'] != 1:
raise Exception(
'Organisation (get_record_by_viewcode): Result not found.')
return result['hits']['hits'][0]['_source']
@classmethod
def get_record_by_online_harvested_source(cls, source):
"""Get record by online harvested source.
:param source: the record source
:return: Organisation record or None.
"""
results = OrganisationsSearch().filter(
'term', online_harvested_source=source).scan()
try:
return Organisation.get_record_by_pid(next(results).pid)
except StopIteration:
return None
@property
def organisation_pid(self):
"""Get organisation pid ."""
return self.pid
def online_circulation_category(self):
"""Get the default circulation category for online resources."""
results = ItemTypesSearch().filter(
'term', organisation__pid=self.pid).filter(
'term', type='online').source(['pid']).scan()
try:
return next(results).pid
except StopIteration:
return None
def get_online_locations(self):
"""Get list of online locations."""
return [library.online_location
for library in self.get_libraries() if library.online_location]
def get_libraries_pids(self):
"""Get all libraries pids related to the organisation."""
results = LibrariesSearch().source(['pid'])\
.filter('term', organisation__pid=self.pid)\
.scan()
for result in results:
yield result.pid
def get_libraries(self):
"""Get all libraries related to the organisation."""
pids = self.get_libraries_pids()
for pid in pids:
yield Library.get_record_by_pid(pid)
def get_vendor_pids(self):
"""Get all vendor pids related to the organisation."""
results = VendorsSearch().source(['pid'])\
.filter('term', organisation__pid=self.pid)\
.scan()
for result in results:
yield result.pid
def get_vendors(self):
"""Get all vendors related to the organisation."""
pids = self.get_vendor_pids()
for pid in pids:
yield Vendor.get_record_by_pid(pid)
def get_links_to_me(self, get_pids=False):
"""Record links.
:param get_pids: if True list of linked pids
if False count of linked records
"""
from ..acq_receipts.api import AcqReceiptsSearch
library_query = LibrariesSearch()\
.filter('term', organisation__pid=self.pid)
receipt_query = AcqReceiptsSearch() \
.filter('term', organisation__pid=self.pid)
links = {}
if get_pids:
libraries = sorted_pids(library_query)
receipts = sorted_pids(receipt_query)
else:
libraries = library_query.count()
receipts = receipt_query.count()
if libraries:
links['libraries'] = libraries
if receipts:
links['acq_receipts'] = receipts
return links
def reasons_not_to_delete(self):
"""Get reasons not to delete record."""
cannot_delete = {}
links = self.get_links_to_me()
if links:
cannot_delete['links'] = links
return cannot_delete
def is_test_organisation(self):
"""Check if this is a test organisation."""
if self.get('code') == 'cypress':
return True
return False
class OrganisationsIndexer(IlsRecordsIndexer):
"""Holdings indexing class."""
record_cls = Organisation
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
super().bulk_index(record_id_iterator, doc_type='org')
|
rero/reroils-app
|
rero_ils/modules/organisations/api.py
|
Python
|
gpl-2.0
| 7,019
| 0
|
import numpy as np
from rdkit.Chem import MolFromSmiles
from features import atom_features, bond_features
degrees = [0, 1, 2, 3, 4, 5]
class MolGraph(object):
def __init__(self):
self.nodes = {} # dict of lists of nodes, keyed by node type
def new_node(self, ntype, features=None, rdkit_ix=None):
new_node = Node(ntype, features, rdkit_ix)
self.nodes.setdefault(ntype, []).append(new_node)
return new_node
def add_subgraph(self, subgraph):
old_nodes = self.nodes
new_nodes = subgraph.nodes
for ntype in set(old_nodes.keys()) | set(new_nodes.keys()):
old_nodes.setdefault(ntype, []).extend(new_nodes.get(ntype, []))
def sort_nodes_by_degree(self, ntype):
nodes_by_degree = {i : [] for i in degrees}
for node in self.nodes[ntype]:
nodes_by_degree[len(node.get_neighbors(ntype))].append(node)
new_nodes = []
for degree in degrees:
cur_nodes = nodes_by_degree[degree]
self.nodes[(ntype, degree)] = cur_nodes
new_nodes.extend(cur_nodes)
self.nodes[ntype] = new_nodes
def feature_array(self, ntype):
assert ntype in self.nodes
return np.array([node.features for node in self.nodes[ntype]])
def rdkit_ix_array(self):
return np.array([node.rdkit_ix for node in self.nodes['atom']])
def neighbor_list(self, self_ntype, neighbor_ntype):
assert self_ntype in self.nodes and neighbor_ntype in self.nodes
neighbor_idxs = {n : i for i, n in enumerate(self.nodes[neighbor_ntype])}
return [[neighbor_idxs[neighbor]
for neighbor in self_node.get_neighbors(neighbor_ntype)]
for self_node in self.nodes[self_ntype]]
class Node(object):
__slots__ = ['ntype', 'features', '_neighbors', 'rdkit_ix']
def __init__(self, ntype, features, rdkit_ix):
self.ntype = ntype
self.features = features
self._neighbors = []
self.rdkit_ix = rdkit_ix
def add_neighbors(self, neighbor_list):
for neighbor in neighbor_list:
self._neighbors.append(neighbor)
neighbor._neighbors.append(self)
def get_neighbors(self, ntype):
return [n for n in self._neighbors if n.ntype == ntype]
def graph_from_smiles_tuple(smiles_tuple):
graph_list = [graph_from_smiles(s) for s in smiles_tuple]
big_graph = MolGraph()
for subgraph in graph_list:
big_graph.add_subgraph(subgraph)
# This sorting allows an efficient (but brittle!) indexing later on.
big_graph.sort_nodes_by_degree('atom')
return big_graph
def graph_from_smiles(smiles):
graph = MolGraph()
mol = MolFromSmiles(smiles)
if not mol:
raise ValueError("Could not parse SMILES string:", smiles)
atoms_by_rd_idx = {}
for atom in mol.GetAtoms():
new_atom_node = graph.new_node('atom', features=atom_features(atom), rdkit_ix=atom.GetIdx())
atoms_by_rd_idx[atom.GetIdx()] = new_atom_node
for bond in mol.GetBonds():
atom1_node = atoms_by_rd_idx[bond.GetBeginAtom().GetIdx()]
atom2_node = atoms_by_rd_idx[bond.GetEndAtom().GetIdx()]
new_bond_node = graph.new_node('bond', features=bond_features(bond))
new_bond_node.add_neighbors((atom1_node, atom2_node))
atom1_node.add_neighbors((atom2_node,))
mol_node = graph.new_node('molecule')
mol_node.add_neighbors(graph.nodes['atom'])
return graph
|
HIPS/neural-fingerprint
|
neuralfingerprint/mol_graph.py
|
Python
|
mit
| 3,492
| 0.002864
|
import csv
import operator
import itertools
import math
import logger1
import re
#main piece of code for calculating & wwriting alignments from processed data
def calculateAlignments(utterances, markers, smoothing, outputFile, shouldWriteHeader, corpusType='CHILDES'):
markers = checkMarkers(markers)
groupedUtterances = group(utterances)
metaData = metaDataExtractor(groupedUtterances,markers,corpusType)
results = runFormula(metaData, markers, smoothing,corpusType)
writeFile(results, outputFile, shouldWriteHeader)
return results
# Converts list of markers in a message to categories
def determineCategories(msgMarkers,catdict,useREs=False):
msgCats = []
#iterate over catdict items {category: [words/REs]}
for cd in catdict.items():
if useREs:
if any(any(wordre.match(marker) for marker in msgMarkers) for wordre in cd[1]): #if REs, see if any tokens match each RE
msgCats.append(cd[0])
else:
if any(word in msgMarkers for word in cd[1]): #if just words, see if any word in category also in msg
msgCats.append(cd[0])
return msgCats
# Groups tweets by conversation ids
def group(utterances):
utterances.sort(key=operator.itemgetter('convId'))
list1 = []
for key, items in itertools.groupby(utterances, operator.itemgetter('convId')):
list1.append(list(items))
return list1
#code to convert marker list structure to {category: [words]} structure
def makeCatDict(markers,useREs=False):
mdict = {}
for m in markers:
marker = re.compile(''.join([m["marker"], '$'])) if useREs else m["marker"]
if m["category"] in mdict:
mdict[m["category"]].append(marker)
else:
mdict[m["category"]] = [marker]
#mdict[m["category"]] = mdict.get(m["category"],[]).append(m["marker"]) #Need to swap marker and category labels
#mdict[m["marker"]] = mdict.get(m["marker"],[]).append(m["category"])
return(mdict)
#Given a conversation & the list of markers, extract counts of speaker & replier using each marker
def findMarkersInConvo(markers,convo):
ba = {} # Number of times Person A and person B says the marker["marker"]
bna = {}
nbna = {}
nba = {}
for utterance in convo:
for j, marker in enumerate(markers):
word = marker["marker"]
msgMarker = word in utterance["msgMarkers"]
replyMarker = word in utterance["replyMarkers"]
if msgMarker and replyMarker:
ba[word] = ba.get(word,0) + 1
elif replyMarker and not msgMarker:
bna[word] = bna.get(word,0) + 1
elif not replyMarker and msgMarker:
nba[word] = nba.get(word,0) + 1
else:
nbna[word] = nbna.get(word,0) + 1
return({'ba': ba,'bna': bna,'nba': nba,'nbna': nbna})
#Copying portions of one dictionary to another (faster than copy(), if you can believe it!)
def addFeats(toAppend,utterance,renameIds=True,corpusType=''):
if renameIds:
toAppend["speakerId"] = utterance["msgUserId"]
toAppend["replierId"] = utterance["replyUserId"]
else:
toAppend["speakerId"] = utterance["speakerId"]
toAppend["replierId"] = utterance["replierId"]
if(corpusType=='Twitter'):
toAppend["reciprocity"] = utterance["reciprocity"]
toAppend["verifiedSpeaker"] = bool(utterance["verifiedSpeaker"])
toAppend["verifiedReplier"] = bool(utterance["verifiedReplier"])
toAppend["speakerFollowers"] = utterance["speakerFollowers"]
toAppend["replierFollowers"] = utterance["replierFollowers"]
elif(corpusType=='CHILDES'):
toAppend["corpus"] = utterance["corpus"]
toAppend["docId"] = utterance["docId"]
return(toAppend)
# calculates the marker usage counts from conversations
def metaDataExtractor(groupedUtterances, markers,corpusType=''):
results = []
for i, convo in enumerate(groupedUtterances):
if(i % 2500 is 10):
logger1.log("On " + str(i) + " of " + str(len(groupedUtterances)))
toAppend = findMarkersInConvo(markers,convo)
toAppend = addFeats(toAppend,convo[0],True,corpusType)
results.append(toAppend)
return results
# extracts a list of markers from the marker dictionary
def allMarkers(markers):
categories = []
for marker in markers:
categories.append(marker["marker"])
return list(set(categories))
# creates a dictionary corresponding to a single row of the final output (speaker-replier-marker triplet)
def createAlignmentDict(category,result,smoothing,corpusType=''):
toAppend = {}
ba = int(result["ba"].get(category, 0))
bna = int(result["bna"].get(category, 0))
nbna = int(result["nbna"].get(category, 0))
nba = int(result["nba"].get(category, 0))
#Calculating alignment only makes sense if we've seen messages with and without the marker
if (((ba+nba)==0 or (bna+nbna)==0)):
return(None)
toAppend = addFeats(toAppend,result,False,corpusType)
toAppend["category"] = category
#Calculating Echoes of Power alignment
powerNum = ba
powerDenom = ba+nba
baseNum = ba+bna
baseDenom = ba+nba+bna+nbna
if(powerDenom != 0 and baseDenom != 0):
dnmalignment = powerNum/powerDenom - baseNum/baseDenom
toAppend["dnmalignment"] = dnmalignment
else:
toAppend["dnmalignment"] = False
powerNum = ba
powerDenom = ba+nba
baseDenom = bna+nbna
baseNum = bna
powerProb = math.log((powerNum+smoothing)/float(powerDenom+2*smoothing))
baseProb = math.log((baseNum+smoothing)/float(baseDenom+2*smoothing))
alignment = powerProb - baseProb
toAppend["alignment"] = alignment
toAppend["ba"] = ba
toAppend["bna"] = bna
toAppend["nba"] = nba
toAppend["nbna"] = nbna
return(toAppend)
# Gets us from the meta-data to the final output file
def runFormula(results, markers, smoothing,corpusType):
toReturn = []
categories = allMarkers(markers)
for i, result in enumerate(results):
if(i % 1000 is 10):
logger1.log("On result " + str(i) + " of " + str(len(results)))
for j, category in enumerate(categories):
toAppend = createAlignmentDict(category,result,smoothing,corpusType)
if toAppend is not None:
toReturn.append(toAppend)
toReturn = sorted(toReturn, key=lambda k: (k["speakerId"],k["replierId"],k["category"]))
return toReturn
# Writes stuff to the output file
def writeFile(results, outputFile, shouldWriteHeader):
if len(results) == 0:
logger1.log("No results to write =(")
return
toWrite = []
header = sorted(list(results[0].keys()))
for row in results:
toAppend = []
for key in header:
toAppend.append(row[key])
toWrite.append(toAppend)
if shouldWriteHeader:
with open(outputFile, "w", newline='') as f:
writer = csv.writer(f)
writer.writerows([header])
f.close()
with open(outputFile, "a", newline='') as f:
writer = csv.writer(f)
writer.writerows(toWrite)
f.close()
# Reads a list of markers from the markersFile
def readMarkers(markersFile,dialect=None):
if dialect is None:
reader = csv.reader(open(markersFile))
else:
reader = csv.reader(open(markersFile),dialect=dialect)
markers = []
#print('marker\tcategory')
for i, row in enumerate(reader):
toAppend = {}
toAppend["marker"] = row[0]
if(len(row) > 1):
toAppend["category"] = row[1]
else:
toAppend["category"] = row[0]
markers.append(toAppend)
#print(toAppend["marker"]+'\t'+toAppend["category"])
return markers
# checks & adapts the structure of the marker list to the appropriate one
def checkMarkers(markers):
toReturn = []
for marker in markers:
if isinstance(marker, str):
toReturn.append({"marker": marker, "category": marker})
else:
toReturn.append(marker)
return toReturn
|
langcog/alignment
|
parsers/alignment.py
|
Python
|
gpl-2.0
| 7,365
| 0.035709
|
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
# For backwards compatibility, continue to make the collections ABCs
# available through the collections module.
from _collections_abc import *
import _collections_abc
__all__ += _collections_abc.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
################################################################################
### namedtuple
################################################################################
_class_template = """\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
@property
def __dict__(self):
'A new OrderedDict mapping field names to their values'
return OrderedDict(zip(self._fields, self))
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return self.__dict__
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
return None
{field_defs}
"""
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) != str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
'identifiers: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
return self + Counter()
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
return Counter() - self
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap (helper for configparser and string.Template)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
|
Orav/kbengine
|
kbe/src/lib/python/Lib/collections/__init__.py
|
Python
|
lgpl-3.0
| 43,096
| 0.003202
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.005)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, None, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
|
wiggi/huntercore
|
qa/rpc-tests/txn_clone.py
|
Python
|
mit
| 7,555
| 0.006353
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
# -----------------------------------------------------------------
# Create the configuration
definition = ConfigurationDefinition()
# Add optional arguments
definition.add_section("wavelengths")
definition.sections["wavelengths"].add_optional("unit", str, "the unit of the wavelengths", "micron")
definition.sections["wavelengths"].add_optional("min", float, "the minimum wavelength", 0.09)
definition.sections["wavelengths"].add_optional("max", float, "the maximum wavelength", 2000)
definition.sections["wavelengths"].add_optional("npoints", int, "the number of wavelength points", 100)
definition.sections["wavelengths"].add_optional("min_zoom", float, "the minimum wavelength of the zoomed-in grid", 1)
definition.sections["wavelengths"].add_optional("max_zoom", float, "the maximum wavelength of the zoomed-in grid", 30)
definition.sections["wavelengths"].add_optional("npoints_zoom", int, "the number of wavelength points in the zoomed-in grid", 100)
definition.add_optional("packages", float, "the number of photon packages per wavelength", 2e5)
definition.add_flag("selfabsorption", "enable dust self-absorption")
definition.add_optional("dust_grid", str, "the type of dust grid to use (bintree, octtree or cartesian)", "bintree")
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/config/initialize_fit.py
|
Python
|
mit
| 1,745
| 0.005161
|
#!/usr/bin/env python2.7
"""Docker From Scratch Workshop - Level 4: Add overlay FS.
Goal: Instead of re-extracting the image, use it as a read-only layer
(lowerdir), and create a copy-on-write layer for changes (upperdir).
HINT: Don't forget that overlay fs also requires a workdir.
Read more on overlay FS here:
https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt
"""
from __future__ import print_function
import linux
import tarfile
import uuid
import click
import os
import stat
import traceback
def _get_image_path(image_name, image_dir, image_suffix='tar'):
return os.path.join(image_dir, os.extsep.join([image_name, image_suffix]))
def _get_container_path(container_id, container_dir, *subdir_names):
return os.path.join(container_dir, container_id, *subdir_names)
def create_container_root(image_name, image_dir, container_id, container_dir):
image_path = _get_image_path(image_name, image_dir)
assert os.path.exists(image_path), "unable to locate image %s" % image_name
# TODO: Instead of creating the container_root and extracting to it,
# create an images_root.
# keep only one rootfs per image and re-use it
container_root = _get_container_path(container_id, container_dir, 'rootfs')
if not os.path.exists(container_root):
os.makedirs(container_root)
with tarfile.open(image_path) as t:
# Fun fact: tar files may contain *nix devices! *facepalm*
members = [m for m in t.getmembers()
if m.type not in (tarfile.CHRTYPE, tarfile.BLKTYPE)]
t.extractall(container_root, members=members)
# TODO: create directories for copy-on-write (uppperdir), overlay workdir,
# and a mount point
# TODO: mount the overlay (HINT: use the MS_NODEV flag to mount)
return container_root # return the mountpoint for the mounted overlayfs
@click.group()
def cli():
pass
def makedev(dev_path):
for i, dev in enumerate(['stdin', 'stdout', 'stderr']):
os.symlink('/proc/self/fd/%d' % i, os.path.join(dev_path, dev))
os.symlink('/proc/self/fd', os.path.join(dev_path, 'fd'))
# Add extra devices
DEVICES = {'null': (stat.S_IFCHR, 1, 3), 'zero': (stat.S_IFCHR, 1, 5),
'random': (stat.S_IFCHR, 1, 8), 'urandom': (stat.S_IFCHR, 1, 9),
'console': (stat.S_IFCHR, 136, 1), 'tty': (stat.S_IFCHR, 5, 0),
'full': (stat.S_IFCHR, 1, 7)}
for device, (dev_type, major, minor) in DEVICES.iteritems():
os.mknod(os.path.join(dev_path, device),
0o666 | dev_type, os.makedev(major, minor))
def _create_mounts(new_root):
# Create mounts (/proc, /sys, /dev) under new_root
linux.mount('proc', os.path.join(new_root, 'proc'), 'proc', 0, '')
linux.mount('sysfs', os.path.join(new_root, 'sys'), 'sysfs', 0, '')
linux.mount('tmpfs', os.path.join(new_root, 'dev'), 'tmpfs',
linux.MS_NOSUID | linux.MS_STRICTATIME, 'mode=755')
# Add some basic devices
devpts_path = os.path.join(new_root, 'dev', 'pts')
if not os.path.exists(devpts_path):
os.makedirs(devpts_path)
linux.mount('devpts', devpts_path, 'devpts', 0, '')
makedev(os.path.join(new_root, 'dev'))
def contain(command, image_name, image_dir, container_id, container_dir):
linux.unshare(linux.CLONE_NEWNS) # create a new mount namespace
linux.mount(None, '/', None, linux.MS_PRIVATE | linux.MS_REC, None)
new_root = create_container_root(
image_name, image_dir, container_id, container_dir)
print('Created a new root fs for our container: {}'.format(new_root))
_create_mounts(new_root)
old_root = os.path.join(new_root, 'old_root')
os.makedirs(old_root)
linux.pivot_root(new_root, old_root)
os.chdir('/')
linux.umount2('/old_root', linux.MNT_DETACH) # umount old root
os.rmdir('/old_root') # rmdir the old_root dir
os.execvp(command[0], command)
@cli.command(context_settings=dict(ignore_unknown_options=True,))
@click.option('--image-name', '-i', help='Image name', default='ubuntu')
@click.option('--image-dir', help='Images directory',
default='/workshop/images')
@click.option('--container-dir', help='Containers directory',
default='/workshop/containers')
@click.argument('Command', required=True, nargs=-1)
def run(image_name, image_dir, container_dir, command):
container_id = str(uuid.uuid4())
pid = os.fork()
if pid == 0:
# This is the child, we'll try to do some containment here
try:
contain(command, image_name, image_dir, container_id,
container_dir)
except Exception:
traceback.print_exc()
os._exit(1) # something went wrong in contain()
# This is the parent, pid contains the PID of the forked process
# wait for the forked child, fetch the exit status
_, status = os.waitpid(pid, 0)
print('{} exited with status {}'.format(pid, status))
if __name__ == '__main__':
cli()
|
Fewbytes/rubber-docker
|
levels/04_overlay/rd.py
|
Python
|
mit
| 5,063
| 0
|
#!usr/bin/env python
# -*- coding: utf-8! -*-
from collections import Counter, OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.cluster.hierarchy import ward, dendrogram
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics.pairwise import pairwise_distances
from librosa.segment import agglomerative
from HACluster import VNClusterer, Clusterer
from sklearn.preprocessing import StandardScaler
from ete3 import Tree, NodeStyle, TreeStyle, AttrFace, faces, TextFace
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def pca_cluster(slice_matrix, slice_names, feature_names, prefix='en',
nb_clusters=3):
"""
Run pca on matrix and visualize samples in 1st PCs, with word loadings projected
on top. The colouring of the samples is provided by running a cluster analysis
on the samples in these first dimensions.
"""
sns.set_style('dark')
sns.plt.rcParams['axes.linewidth'] = 0.2
fig, ax1 = sns.plt.subplots()
slice_matrix = StandardScaler().fit_transform(slice_matrix)
pca = PCA(n_components=2)
pca_matrix = pca.fit_transform(slice_matrix)
pca_loadings = pca.components_.transpose()
# first plot slices:
x1, x2 = pca_matrix[:,0], pca_matrix[:,1]
ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none')
# clustering on top (for colouring):
clustering = AgglomerativeClustering(linkage='ward', affinity='euclidean', n_clusters=nb_clusters)
clustering.fit(pca_matrix)
# add slice names:
for x, y, name, cluster_label in zip(x1, x2, slice_names, clustering.labels_):
ax1.text(x, y, name.split('_')[0][:3], ha='center', va="center",
color=plt.cm.spectral(cluster_label / 10.),
fontdict={'family': 'Arial', 'size': 10})
# now loadings on twin axis:
ax2 = ax1.twinx().twiny()
l1, l2 = pca_loadings[:,0], pca_loadings[:,1]
ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none');
for x, y, l in zip(l1, l2, feature_names):
ax2.text(x, y, l ,ha='center', va="center", size=8, color="darkgrey",
fontdict={'family': 'Arial', 'size': 9})
# control aesthetics:
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_xticklabels([])
ax1.set_xticks([])
ax1.set_yticklabels([])
ax1.set_yticks([])
ax2.set_xticklabels([])
ax2.set_xticks([])
ax2.set_yticklabels([])
ax2.set_yticks([])
sns.plt.tight_layout()
sns.plt.savefig('../outputs/'+prefix+'_pca.pdf', bbox_inches=0)
plt.clf()
def natural_cluster(slice_matrix, slice_names, prefix='en'):
"""
Perform plain cluster analysis on sample matrix, without
taking into account the chronology of the corpus.
"""
slice_matrix = StandardScaler().fit_transform(slice_matrix)
dist_matrix = pairwise_distances(slice_matrix, metric='euclidean')
clusterer = Clusterer(dist_matrix, linkage='ward')
clusterer.cluster(verbose=0)
short_names = [l.split('_')[0][:5]+l.split('_')[1] for l in slice_names]
tree = clusterer.dendrogram.ete_tree(short_names)
tree.write(outfile='../outputs/'+prefix+'_natural_clustering.newick')
def vnc_cluster(slice_matrix, slice_names, prefix='en'):
slice_matrix = StandardScaler().fit_transform(slice_matrix)
dist_matrix = pairwise_distances(slice_matrix, metric='euclidean')
clusterer = VNClusterer(dist_matrix, linkage='ward')
clusterer.cluster(verbose=0)
short_names = [l.split('_')[0][:5]+l.split('_')[1] for l in slice_names]
t = clusterer.dendrogram.ete_tree(short_names)
t.write(outfile='../outputs/'+prefix+"_vnc_clustering.newick")
def segment_cluster(slice_matrix, slice_names, nb_segments):
slice_matrix = StandardScaler().fit_transform(slice_matrix)
slice_matrix = np.asarray(slice_matrix).transpose() # librosa assumes that data[1] = time axis
segment_starts = agglomerative(data=slice_matrix, k=nb_segments)
break_points = []
for i in segment_starts:
if i > 0: # skip first one, since it's always a segm start!
break_points.append(slice_names[i])
return(break_points)
def bootstrap_segmentation(n_iter, nb_mfw_sampled, corpus_matrix,
slice_names, prefix='en', nb_segments=3, random_state=2015):
np.random.seed(random_state)
corpus_matrix = np.asarray(corpus_matrix)
sample_cnts = OrderedCounter()
for sn in slice_names:
sample_cnts[sn] = []
for i in range(nb_segments):
sample_cnts[sn].append(0)
for nb in range(n_iter):
print('===============\niteration:', nb+1)
# sample a subset of the features in our matrix:
rnd_indices = np.random.randint(low=0, high=corpus_matrix.shape[1], size=nb_mfw_sampled)
sampled_matrix = corpus_matrix[:,rnd_indices]
# get which breaks are selected and adjust the cnts:
selected_breaks = segment_cluster(sampled_matrix, slice_names, nb_segments=nb_segments)
for i, break_ in enumerate(selected_breaks):
sample_cnts[break_][i] += 1
plt.rcParams['font.family'] = 'arial'
plt.rcParams['font.size'] = 8
plt.clf()
plt.figure(figsize=(10,20))
sample_names, breakpoints_cnts = zip(*sample_cnts.items())
pos = [i for i, n in enumerate(sample_names)][::-1] # reverse for legibility
plt.yticks(pos, [n[:3].replace('_', '') if n.endswith(('_1', '_0')) else ' ' for n in sample_names])
axes = plt.gca()
axes.set_xlim([0,n_iter])
colors = sns.color_palette('hls', nb_segments)
for i in range(nb_segments-1):
cnts = [c[i] for c in breakpoints_cnts]
plt.barh(pos, cnts, align='center', color=colors[i], linewidth=0, label="Boundary "+str(i+1))
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='on')
plt.tick_params(axis='x', which='both', top='off')
plt.legend()
plt.savefig('../outputs/'+prefix+'_bootstrap_segment'+str(nb_segments)+'.pdf')
|
mikekestemont/beckett
|
code/analysis.py
|
Python
|
mit
| 6,372
| 0.007062
|
from pycipher import Vigenere
import unittest
class TestVigenere(unittest.TestCase):
def test_encipher(self):
keys = ('GERMAN',
'CIPHERS')
plaintext = ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')
ciphertext = ('gftpesmlzvkysrfbqeyxlhwkedrncqkjxtiwqpdzocwvjfuicbpl',
'cjrkiwyjqyrpdfqxfywkmxemfdrteltmkyalsatrfhszhaymozgo')
for i,key in enumerate(keys):
enc = Vigenere(key).encipher(plaintext[i])
self.assertEqual(enc.upper(), ciphertext[i].upper())
def test_decipher(self):
keys = ('GERMAN',
'CIPHERS')
plaintext= ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')
ciphertext = ('gftpesmlzvkysrfbqeyxlhwkedrncqkjxtiwqpdzocwvjfuicbpl',
'cjrkiwyjqyrpdfqxfywkmxemfdrteltmkyalsatrfhszhaymozgo')
for i,key in enumerate(keys):
dec = Vigenere(key).decipher(ciphertext[i])
self.assertEqual(dec.upper(), plaintext[i].upper())
if __name__ == '__main__':
unittest.main()
|
jameslyons/pycipher
|
tests/test_vigenere.py
|
Python
|
mit
| 1,247
| 0.008019
|
import unicodecsv
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.utils.encoding import force_text
from six.moves import map
from oioioi.base.permissions import make_request_condition
from oioioi.base.utils import request_cached
from oioioi.participants.controllers import ParticipantsController
from oioioi.participants.models import Participant
def is_contest_with_participants(contest):
rcontroller = contest.controller.registration_controller()
return isinstance(rcontroller, ParticipantsController)
def is_onsite_contest(contest):
if not is_contest_with_participants(contest):
return False
from oioioi.participants.admin import OnsiteRegistrationParticipantAdmin
rcontroller = contest.controller.registration_controller()
padmin = rcontroller.participant_admin
return padmin and issubclass(padmin, OnsiteRegistrationParticipantAdmin)
@make_request_condition
def contest_has_participants(request):
return is_contest_with_participants(request.contest)
@make_request_condition
def has_participants_admin(request):
rcontroller = request.contest.controller.registration_controller()
return getattr(rcontroller, 'participant_admin', None) is not None
@make_request_condition
def contest_is_onsite(request):
return is_onsite_contest(request.contest)
@request_cached
def get_participant(request):
try:
return Participant.objects.get(contest=request.contest, user=request.user)
except Participant.DoesNotExist:
return None
@make_request_condition
@request_cached
def can_register(request):
if get_participant(request) is not None:
return False
rcontroller = request.contest.controller.registration_controller()
return rcontroller.can_register(request)
@make_request_condition
@request_cached
def can_edit_registration(request):
participant = get_participant(request)
if participant is None:
return False
rcontroller = request.contest.controller.registration_controller()
return rcontroller.can_edit_registration(request, participant)
@make_request_condition
@request_cached
def can_unregister(request):
participant = get_participant(request)
if participant is None:
return False
rcontroller = request.contest.controller.registration_controller()
return rcontroller.can_unregister(request, participant)
@make_request_condition
@request_cached
def is_participant(request):
rcontroller = request.contest.controller.registration_controller()
qs = User.objects.filter(id=request.user.id)
return rcontroller.filter_participants(qs).exists()
def _fold_registration_models_tree(object):
"""Function for serialize_participants_data. Walks over model of
the object, gets models related to the model and lists
all their fields."""
result = []
objects_used = [object]
# https://docs.djangoproject.com/en/1.9/ref/models/meta/#migrating-old-meta-api
def get_all_related_objects(_meta):
return [
f
for f in _meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete
]
objs = [
getattr(object, rel.get_accessor_name())
for rel in get_all_related_objects(object._meta)
if hasattr(object, rel.get_accessor_name())
]
while objs:
current = objs.pop(0)
if current is None:
continue
objects_used.append(current)
for field in current._meta.fields:
if (
field.remote_field is not None
and getattr(current, field.name) not in objects_used
):
objs.append(getattr(current, field.name))
for obj in objects_used:
for field in obj._meta.fields:
if not field.auto_created:
if field.remote_field is None:
result += [(obj, field)]
return result
def serialize_participants_data(request, participants):
"""Serializes all personal data of participants to a table.
:param participants: A QuerySet from table participants.
"""
if not participants.exists():
return {'no_participants': True}
display_email = request.contest.controller.show_email_in_participants_data
keys = ['username', 'user ID', 'first name', 'last name'] + (
['email address'] if display_email else []
)
def key_name(attr):
(obj, field) = attr
return str(obj.__class__.__name__) + ": " + field.verbose_name.title()
set_of_keys = set(keys)
for participant in participants:
for key in map(key_name, _fold_registration_models_tree(participant)):
if key not in set_of_keys:
set_of_keys.add(key)
keys.append(key)
def key_value(attr):
(obj, field) = attr
return (key_name((obj, field)), field.value_to_string(obj))
data = []
for participant in participants:
values = dict(list(map(key_value, _fold_registration_models_tree(participant))))
values['username'] = participant.user.username
values['user ID'] = participant.user.id
values['first name'] = participant.user.first_name
values['last name'] = participant.user.last_name
if display_email:
values['email address'] = participant.user.email
data.append([values.get(key, '') for key in keys])
return {'keys': keys, 'data': data}
def render_participants_data_csv(request, participants, name):
data = serialize_participants_data(request, participants)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % (
name,
"personal-data",
)
if 'no_participants' not in data:
writer = unicodecsv.writer(response)
writer.writerow(list(map(force_text, data['keys'])))
for row in data['data']:
writer.writerow(list(map(force_text, row)))
return response
|
sio2project/oioioi
|
oioioi/participants/utils.py
|
Python
|
gpl-3.0
| 6,045
| 0.000496
|
"""
Support for Tellstick lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.tellstick/
"""
from homeassistant.components import tellstick
from homeassistant.components.light import ATTR_BRIGHTNESS, Light
from homeassistant.components.tellstick import (DEFAULT_SIGNAL_REPETITIONS,
ATTR_DISCOVER_DEVICES,
ATTR_DISCOVER_CONFIG)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Tellstick lights."""
if (discovery_info is None or
discovery_info[ATTR_DISCOVER_DEVICES] is None or
tellstick.TELLCORE_REGISTRY is None):
return
signal_repetitions = discovery_info.get(ATTR_DISCOVER_CONFIG,
DEFAULT_SIGNAL_REPETITIONS)
add_devices(TellstickLight(
tellstick.TELLCORE_REGISTRY.get_device(switch_id), signal_repetitions)
for switch_id in discovery_info[ATTR_DISCOVER_DEVICES])
class TellstickLight(tellstick.TellstickDevice, Light):
"""Representation of a Tellstick light."""
def __init__(self, tellstick_device, signal_repetitions):
"""Initialize the light."""
self._brightness = 255
tellstick.TellstickDevice.__init__(self,
tellstick_device,
signal_repetitions)
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
def set_tellstick_state(self, last_command_sent, last_data_sent):
"""Update the internal representation of the switch."""
from tellcore.constants import TELLSTICK_TURNON, TELLSTICK_DIM
if last_command_sent == TELLSTICK_DIM:
if last_data_sent is not None:
self._brightness = int(last_data_sent)
self._state = self._brightness > 0
else:
self._state = last_command_sent == TELLSTICK_TURNON
def _send_tellstick_command(self, command, data):
"""Handle the turn_on / turn_off commands."""
from tellcore.constants import (TELLSTICK_TURNOFF, TELLSTICK_DIM)
if command == TELLSTICK_TURNOFF:
self.tellstick_device.turn_off()
elif command == TELLSTICK_DIM:
self.tellstick_device.dim(self._brightness)
else:
raise NotImplementedError(
"Command not implemented: {}".format(command))
def turn_on(self, **kwargs):
"""Turn the switch on."""
from tellcore.constants import TELLSTICK_DIM
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness is not None:
self._brightness = brightness
self.call_tellstick(TELLSTICK_DIM, self._brightness)
def turn_off(self, **kwargs):
"""Turn the switch off."""
from tellcore.constants import TELLSTICK_TURNOFF
self.call_tellstick(TELLSTICK_TURNOFF)
|
justyns/home-assistant
|
homeassistant/components/light/tellstick.py
|
Python
|
mit
| 3,211
| 0
|
import json
import hashlib
from django.db import models
from django.db.models import Count, Func
from django.contrib.postgres.fields import ArrayField
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.utils.translation import gettext_lazy as _
# from social.apps.django_app.default.models import UserSocialAuth
from django.contrib.auth.models import Permission, Group, PermissionsMixin
from django.db import transaction
from random import randint
from django.core.cache import cache
from mptt.models import MPTTModel, TreeForeignKey
from netfields import InetAddressField, NetManager
from django_gravatar.helpers import get_gravatar_url
from . import now
# from lazysignup.utils import is_lazy_user
# Travis payload format:
# https://docs.travis-ci.com/user/notifications#Webhooks-Delivery-Format
class SiteUpdate(models.Model):
started = models.DateTimeField(
default=None,
null=True, blank=True,
db_index=True
)
finished = models.DateTimeField(
auto_now_add=True,
db_index=True,
null=True, blank=True
)
sha1 = models.CharField(max_length=40, editable=False, unique=True)
commit_time = models.DateTimeField(
db_index=True,
null=True, blank=True
)
commit_message = models.CharField(
max_length=150,
editable=False,
null=True, blank=True
)
travis_raw = models.TextField(null=True, blank=True)
log = models.TextField(null=True, blank=True)
class Meta:
verbose_name = _("Site update")
verbose_name_plural = _("Site updates")
@property
def travis_raw_pretty(self):
if self.travis_raw:
parsed = json.loads(self.travis_raw)
return json.dumps(parsed, indent=4, sort_keys=True)
else:
return ""
@property
def length(self):
if self.finished and self.started:
return self.finished-self.started
else:
return None
def __str__(self):
return self.sha1
class AddedChanged(models.Model):
added = models.DateTimeField(
auto_now_add=True,
db_index=True,
# default=now,
)
changed = models.DateTimeField(
auto_now=True,
db_index=True,
# default=now
)
# , editable=False
class Meta:
abstract = True
class UserManager(BaseUserManager):
def create_user(self, email, username=None, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
username=username,
is_staff=False,
is_active=True,
is_superuser=False,
last_login=now(),
date_joined=now()
)
user.set_password(password)
user.save(using=self._db)
return user
def random(self):
"""TODO"""
# there can be deleted items
with transaction.atomic():
count = self.aggregate(count=Count('id'))['count']
random_index = randint(0, count - 1)
return self.all()[random_index]
def create_superuser(self, email, username, password):
user = self.create_user(email, username, password)
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
objects = UserManager()
USERNAME_FIELD = 'email'
email = models.EmailField(
verbose_name='Email',
max_length=255,
unique=True,
db_index=True,
blank=True, null=True,
default=None,
)
username = models.CharField(
max_length=200,
db_index=True,
# unique=True,
default='',
blank=True, null=True,
help_text=_("This is an unique identifier, not actual username. Can be a session \
key for temporary users")
)
# is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(
default=False,
help_text=_("Designates whether this user can access the admin site.")
)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(auto_now_add=True, db_index=True)
first_name = models.CharField(
max_length=200,
blank=True, null=True,
)
last_name = models.CharField(
max_length=200,
blank=True, null=True,
)
date_last_pass_sent = models.DateTimeField(null=True)
skype = models.CharField(max_length=200, blank=True, null=True)
discord = models.CharField(max_length=200, blank=True, null=True)
phone = models.CharField(max_length=200, blank=True, null=True)
city = models.CharField(max_length=200, blank=True, null=True)
browser_on_creation = models.CharField(
max_length=300,
db_index=True,
default=None,
blank=True, null=True,
help_text=_("Browser string used when this user was created")
)
created_from_ip = models.GenericIPAddressField(blank=True, null=True)
timezone_str = models.CharField(
max_length=50,
db_index=True,
default='UTC',
)
# avatar = models.ForeignKey(
# 'images.Image',
# null=True,
# blank=True,
# # help_text=_("Avatar image")
# )
permissions = models.ManyToManyField(
Permission,
related_name="permissions",
blank=True
)
groups = models.ManyToManyField(
Group,
related_name="groups",
blank=True
)
telegram_chat_id = models.IntegerField(
blank=True, null=True,
)
class Meta:
verbose_name = _("User")
verbose_name_plural = _("Users")
def gravatar(self, size_in_px=25):
"""Return authorized social accounts"""
return get_gravatar_url(self.email, size=size_in_px)
# @property
# def social_accounts(self):
# """Return authorized social accounts"""
# return UserSocialAuth.objects.filter(user=self)
@property
def is_lazy(self):
return True
# return is_lazy_user(self)
def get_full_name(self):
"Used in Admin. Dajngo wants this to be defined."
return "{} {}".format(self.first_name, self.last_name)
def get_short_name(self):
"Used in Admin. Dajngo wants this to be defined."
return self.email
def __str__(self):
# if self.is_lazy:
# return "{}".format(_('Anonymous'))
if self.first_name:
return self.first_name
elif self.email:
return self.email
else:
return "User {}".format(self.pk)
# pip install django-mptt
class Tree(MPTTModel):
parent = TreeForeignKey(
'self',
default=None,
null=True,
blank=True,
db_index=True,
# related_name="%(app_label)s_%(class)s_parent",
# related_name="%(app_label)s_%(class)s_children",
related_name='children',
verbose_name=_("Parent element"),
on_delete=models.SET_NULL,
)
class Meta:
abstract = True
class Comment(Tree):
author = models.ForeignKey(
'core.User',
default=None,
null=True,
blank=True,
on_delete=models.SET_NULL,
)
src = models.TextField()
class LoginAttempt(models.Model):
'''
A login attempt record (both successful and not).
If user field is set then login was successful.
Instead login and password fields are set.
'''
# https://docs.python.org/3/library/ipaddress.html
# inet = InetAddressField(primary_key=True)
ip = InetAddressField()
login = models.CharField(
max_length=260,
null=True, blank=True,
)
password = models.CharField(
max_length=260,
null=True, blank=True,
)
user = models.ForeignKey(
'core.User',
default=None,
null=True,
blank=True,
on_delete=models.SET_NULL,
)
time = models.DateTimeField(
auto_now_add=True,
db_index=True,
null=True, blank=True,
)
# success = models.BooleanField(default=False)
objects = NetManager()
class Unnest(Func):
function = 'UNNEST'
class IP(models.Model):
# https://docs.python.org/3/library/ipaddress.html
# inet = InetAddressField(primary_key=True)
inet = InetAddressField()
open_ports = ArrayField(
models.IntegerField(),
blank=True,
null=True
)
objects = NetManager()
class Meta:
verbose_name = _('IP')
verbose_name_plural = _('IP-addresses')
@classmethod
def stat(cls):
"""Return Port and how many IPs have it open"""
return cls.objects \
.annotate(port=Unnest('open_ports', distinct=True)) \
.values('port') \
.annotate(count=Count('port')) \
.order_by('-count', '-port')
@classmethod
def with_open_ports(cls, ports):
"""Return Port and how many IPs have it open"""
return cls.objects.filter(open_ports__contains=ports)
def __str__(self):
# from django.contrib.postgres.aggregates import ArrayAgg
# print(IP.objects.aggregate(arrayagg=ArrayAgg('inet')))
# print(IP.objects.values('open_ports')\
# .annotate(number_of_days=Count('open_ports', distinct=True)))
# print(IP.objects.filter()\
# .aggregate(Avg('open_ports')))
# print(IP.objects.aggregate(All('open_ports')))
# print(IP.stat())
# .group_by('inet'))
# print(IP.objects.values('inet').annotate(arr_els=Unnest('open_ports')))
# .values_list('arr_els', flat=True).distinct())
return str(self.inet)
class Hostname(models.Model):
name = models.CharField(
max_length=260,
help_text="example.org, host.example.org"
)
# 2 level domain?
is_domain = models.BooleanField(default=False)
class Meta:
# unique_together = (("name", "domain"),)
# index_together = [["name", "domain"], ]
verbose_name = _("Hostname")
verbose_name_plural = _("Hostnames")
@property
def key(self):
return 'host_{}'.format(
hashlib.md5(str(self).encode('utf-8')).hexdigest()
)
@property
def last_visited(self):
key = self.key+'lastvisit'
return cache.get(key)
@last_visited.setter
def last_visited(self, t):
key = self.key+'lastvisit'
return cache.set(key, t, 60)
def last_visit_older(self, s):
# print(self, self.last_visited)
if self.last_visited is None:
return True
# return now() - self.last_visited > timedelta(seconds=3)
# @classmethod
# def from_string(cls, s):
# host_arr = s.split('.')
# host_part = '.'.join(host_arr[:-2])
# domain_part = '.'.join(host_arr[-2:])
# # try:
# domain, c = Domain.objects.get_or_create(name=domain_part)
# # except:
# # client.captureException()
# domain.clean()
# host, c = Hostname.objects.get_or_create(
# name=host_part,
# domain=domain
# )
# return host
def __eq__(self, other):
if other is None:
return False
if str(self) == str(other):
# if self.name == other.name and \
# self.domain == other.domain:
return True
return False
def __str__(self):
if self.name:
return '{}.{}'.format(self.name, str(self.domain))
else:
return str(self.domain)
# class Country(models.Model):
# name_ru = models.CharField(max_length=150)
# name_en = models.CharField(max_length=150)
# code = models.CharField(max_length=2)
# truecountry = models.IntegerField(default=0, null=False)
# class Meta:
# db_table = 'countries'
# ordering = ('name_en',)
# verbose_name_plural = "Countries"
# def __str__(self):
# lng = django.utils.translation.get_language()
# if 'ru' == lng:
# return self.name_ru
# return self.name_en
# class PersonManager(models.Manager):
# def get_queryset(self):
# return super().get_queryset() \
# .select_related('name',
# class URLScheme(models.Model):
# """http://en.wikipedia.org/wiki/URI_scheme"""
# name = models.CharField(max_length=260)
# class Meta:
# db_table = 'url_scheme'
# def __eq__(self, other):
# if other is None or self is None:
# return False
# if self.name == str(other):
# return True
# return False
# def __str__(self):
# return self.name
# class URL(models.Model):
# """scheme://username:password@example.org:8042/path?query#fragment"""
# cookies_file = '/var/www/xdev/tmp/url_cookies.txt'
# scheme = models.ForeignKey(URLScheme, null=True, blank=True)
# host = models.ForeignKey(Hostname, null=True, blank=True)
# path_str = models.CharField(max_length=260, help_text="/path/in/url",
# null=True, blank=True, default=None)
# # image = models.ForeignKey('Image', null=True, blank=True)
# # query = hstore.DictionaryField(null=True, blank=True)
# query = models.CharField(max_length=260, null=True, blank=True,
# help_text="?query")
# fragment = models.CharField(max_length=260, null=True, blank=True,
# help_text="#fragment")
# # objects = hstore.HStoreManager()
# status_code = models.IntegerField(default=None, null=True, blank=True)
# redirect = models.ForeignKey('self', null=True, blank=True, default=None,
# db_column='redirect_id', related_name='+')
# v = models.IntegerField(default=0, help_text="asd")
# class Meta:
# db_table = 'url'
# unique_together = (("scheme", "host", "path_str",
# "query", "fragment"), )
# # index_together = [["name", "domain"], ]
# verbose_name = "URL"
# verbose_name_plural = "URLs"
# @property
# def sha1(self):
# s = str(self)
# if isinstance(s, six.text_type):
# s = s.encode('utf-8')
# return hashlib.sha1(s).hexdigest()
# @property
# def links_abs(self):
# """Absolute URLs from the page. Return QuerySet of URL models."""
# links = self.soup.find_all('a')
# u = str(self.final_url)
# s = set([urljoin(u, tag.get('href', None)) for tag in links
# if tag.get('href', None) is not None])
# def id(x):
# return URL.from_string(x).id
# ids = list(map(id, s))
# return URL.objects.filter(pk__in=ids)
# @property
# def final_url(self):
# if self.redirect:
# return self.redirect
# return self
# def get(self):
# "Returns [Request object] or None. See 'requests' pkg"
# key = 'url_data_{}_r'.format(self.sha1)
# r = cache.get(key)
# if r is not None:
# return r
# wait = 4
# while not self.host.last_visit_older(wait):
# sleep(wait)
# headers = {
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:29.0)'
# ' Gecko/20100101 Firefox/29.0'
# }
# try:
# r = requests.get(str(self), headers=headers)
# except requests.exceptions.ConnectionError:
# client.captureException()
# return None
# if r.history:
# u_redirected = URL.from_string(r.url)
# if settings.DEBUG:
# print('got redirect to:', u_redirected)
# if self.redirect != u_redirected and self.redirect != self:
# self.redirect = u_redirected
# self.save()
# cache.set(key, r, 60*60)
# self.host.last_visited = now()
# return r
# @property
# def key(self):
# return 'url_data_{}'.format(self.sha1)
# def download(self, wait=4, **kwargs):
# return self.get().content
# @classmethod
# def download_url(cls, url, filename, **kwargs):
# "Download URL and save it to FILENAME."
# # endfile = os.path.basename(url) + '.jpg'
# headers = {'User-Agent':
# 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:29.0)
# Gecko/20100101 Firefox/29.0'}
# import requests
# r = requests.get(url, headers=headers, stream=True)
# if r.status_code == 200:
# with open(filename, 'wb') as f:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk: # filter out keep-alive new chunks
# f.write(chunk)
# else:
# return r.status_code
# return url
# def data_to_unicode(self, **kwargs):
# """Extract META tags from HTML.
#
# and try to convert data to Unicode string"""
# from article.parser.html import guess_html_encoding
# # update = kwargs.get('update', False)
# data = self.download(**kwargs)
# # print(data.decode("cp1251", 'ignore'))
# s, enc = guess_html_encoding(data)
# if enc is not None:
# # print(enc)
# # print(s)
# return s
# try:
# return data.decode('utf-8')
# except UnicodeDecodeError as e:
# try:
# return data.decode('cp1251')
# except UnicodeDecodeError as e:
# return str(e)
# @property
# def soup(self):
# key = 'url_soup_{}'.format(self.sha1)
# soup = cache.get(key)
# if soup is None:
# soup = BeautifulSoup(self.data_to_unicode())
# # cache.set(key, soup)
# return soup
# def matches(self, d=None, h=None, path=None, f=None, q=None):
# if d is not None and not d.lower() == str(self.host.domain).lower():
# return False
# if h is not None and not re.match(h, str(self.host)):
# return False
# if path is not None and not re.match(path, self.path_str):
# return False
# if f is not None and not re.match(f, self.fragment):
# return False
# if q is not None and not re.match(q, self.query):
# return False
# return True
# @property
# def obj(self):
# return 'todo'
# @classmethod
# def from_string(cls, s):
# if isinstance(s, cls):
# return s
# o = urlparse(s)
# scheme, c = URLScheme.objects.get_or_create(name=o.scheme)
# host = Hostname.from_string(o.hostname)
# u, c = cls.objects.get_or_create(scheme=scheme,
# host=host,
# path_str=o.path,
# query=o.query,
# fragment=o.fragment)
# return u
# def __eq__(self, other):
# if other is None or self is None:
# return False
# #if self.url == other.url and self.url is not None:
# # return True
# else:
# if self.scheme == other.scheme and \
# self.host == other.host and \
# self.path_str == other.path_str and \
# self.query == other.query:
# return True
# return False
# return NotImplemented
# def __str__(self):
# s = "{}://{}".format(str(self.scheme), self.host)
# if self.path_str:
# s += self.path_str
# if self.query:
# s += "?" + self.query
# if self.fragment:
# s += "#" + self.fragment
# if self.scheme and self.host:
# return s
# else:
# return NotImplemented
# class UrlObject(models.Model):
# # url = models.ForeignKey(URL, primary_key=True)
# url = models.OneToOneField(URL, primary_key=True)
# # obj = models.ForeignKey(Hostname, null=True, blank=True, default=None)
# content_type = models.ForeignKey(ContentType, null=True)
# object_id = models.PositiveIntegerField(null=True)
# obj = GenericForeignKey('content_type', 'object_id')
# v = models.IntegerField(default=0)
# class Meta:
# db_table = 'obj_from_url'
# # ordering = ('name',)
# def __str__(self):
# return self.obj
# class Translated(models.Model):
# translation_of = models.ForeignKey(
# 'self',
# default=None,
# null=True,
# blank=True,
# related_name="%(app_label)s_%(class)s_translated",
# verbose_name=_("Translation of")
# )
# lng = models.ForeignKey(
# Language,
# default=None,
# null=True,
# blank=True,
# related_name="%(app_label)s_%(class)s_lng",
# verbose_name=_("Language")
# )
# def get_translation(self, language):
# if self.lng == language:
# return self
# if self.translation_of is not None:
# pass
# return
# class Meta:
# abstract = True
# class Language(models.Model):
# name = models.CharField(
# max_length=150,
# help_text="Original language name"
# )
# name_en = models.CharField(max_length=150, help_text="Name in English")
# code = models.CharField(
# max_length=2,
# help_text="2 chars",
# unique=True,
# primary_key=True,
# verbose_name=_("Code")
# )
# class Meta:
# db_table = 'languages'
# ordering = ('name',)
# verbose_name = _("Language")
# verbose_name_plural = _("Languages")
# def __str__(self):
# return self.name
|
pashinin-com/pashinin.com
|
src/core/models.py
|
Python
|
gpl-3.0
| 22,154
| 0.000045
|
from .widget_svg_layout import SVGLayoutBox
from .widget_fullscreen import FullscreenBox
|
openseat/ipylayoutwidgets
|
ipylayoutwidgets/widgets/__init__.py
|
Python
|
bsd-3-clause
| 88
| 0.011364
|
"""
WSGI config for Bilyric project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
cuongnb14/bilyric
|
config/wsgi.py
|
Python
|
mit
| 1,707
| 0
|
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
from botocore.exceptions import ClientError
import mock
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources.aws import shape_validate
from c7n.resources.ebs import (
CopyInstanceTags,
EncryptInstanceVolumes,
CopySnapshot,
Delete,
ErrorHandler,
SnapshotQueryParser as QueryParser
)
from .common import BaseTest
class SnapshotQueryParse(BaseTest):
def test_query(self):
qfilters = [
{'Name': 'tag:Name', 'Values': ['Snapshot1']},
{'Name': 'status', 'Values': ['completed']}]
self.assertEqual(qfilters, QueryParser.parse(qfilters))
def test_invalid_query(self):
self.assertRaises(
PolicyValidationError, QueryParser.parse, {})
self.assertRaises(
PolicyValidationError, QueryParser.parse, [None])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [{'X': 1}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': 'completed'}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': ['Completed']}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'snapshot-id', 'Values': [1]}])
class SnapshotErrorHandler(BaseTest):
def test_tag_error(self):
snaps = [{'SnapshotId': 'aa'}]
error_response = {
"Error": {
"Message": "The snapshot 'aa' does not exist.",
"Code": "InvalidSnapshot.NotFound",
}
}
client = mock.MagicMock()
client.create_tags.side_effect = ClientError(error_response, 'CreateTags')
p = self.load_policy({
"name": "snap-copy",
"resource": "ebs-snapshot",
'actions': [{'type': 'tag', 'tags': {'bar': 'foo'}}]})
tagger = p.resource_manager.actions[0]
tagger.process_resource_set(client, snaps, [{'Key': 'bar', 'Value': 'foo'}])
client.create_tags.assert_called_once()
def test_remove_snapshot(self):
snaps = [{'SnapshotId': 'a'}, {'SnapshotId': 'b'}, {'SnapshotId': 'c'}]
t1 = list(snaps)
ErrorHandler.remove_snapshot('c', t1)
self.assertEqual([t['SnapshotId'] for t in t1], ['a', 'b'])
ErrorHandler.remove_snapshot('d', snaps)
self.assertEqual(len(snaps), 3)
def test_get_bad_snapshot_malformed(self):
operation_name = "DescribeSnapshots"
error_response = {
"Error": {
"Message": 'Invalid id: "snap-malformedsnap"',
"Code": "InvalidSnapshotID.Malformed",
}
}
e = ClientError(error_response, operation_name)
snap = ErrorHandler.extract_bad_snapshot(e)
self.assertEqual(snap, "snap-malformedsnap")
def test_get_bad_snapshot_notfound(self):
operation_name = "DescribeSnapshots"
error_response = {
"Error": {
"Message": "The snapshot 'snap-notfound' does not exist.",
"Code": "InvalidSnapshot.NotFound",
}
}
e = ClientError(error_response, operation_name)
snap = ErrorHandler.extract_bad_snapshot(e)
self.assertEqual(snap, "snap-notfound")
def test_get_bad_volume_malformed(self):
operation_name = "DescribeVolumes"
error_response = {
"Error": {
"Message": 'Invalid id: "vol-malformedvolume"',
"Code": "InvalidVolumeID.Malformed",
}
}
e = ClientError(error_response, operation_name)
vol = ErrorHandler.extract_bad_volume(e)
self.assertEqual(vol, "vol-malformedvolume")
def test_get_bad_volume_notfound(self):
operation_name = "DescribeVolumes"
error_response = {
"Error": {
"Message": "The volume 'vol-notfound' does not exist.",
"Code": "InvalidVolume.NotFound",
}
}
e = ClientError(error_response, operation_name)
vol = ErrorHandler.extract_bad_volume(e)
self.assertEqual(vol, "vol-notfound")
def test_snapshot_copy_related_tags_missing_volumes(self):
factory = self.replay_flight_data(
"test_ebs_snapshot_copy_related_tags_missing_volumes")
p = self.load_policy(
{
"name": "copy-related-tags",
"resource": "aws.ebs-snapshot",
"filters": [{"tag:Test": "Test"}],
"actions": [
{
"type": "copy-related-tag",
"resource": "ebs",
"key": "VolumeId",
"tags": "*"
}
]
},
session_factory=factory
)
try:
resources = p.run()
except ClientError:
# it should filter missing volume and not throw an error
self.fail("This should have been handled in ErrorHandler.extract_bad_volume")
self.assertEqual(len(resources), 1)
try:
factory().client("ec2").describe_volumes(
VolumeIds=[resources[0]["VolumeId"]]
)
except ClientError as e:
# this should not filter missing volume and will throw an error
msg = e.response["Error"]["Message"]
err = e.response["Error"]["Code"]
self.assertEqual(err, "InvalidVolume.NotFound")
self.assertEqual(msg, f"The volume '{resources[0]['VolumeId']}' does not exist.")
class SnapshotAccessTest(BaseTest):
def test_snapshot_access(self):
# pre conditions, 2 snapshots one shared to a separate account, and one
# shared publicly. 2 non matching volumes, one not shared, one shared
# explicitly to its own account.
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_cross_account")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": ["cross-account"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r["SnapshotId"]: r["c7n:CrossAccountViolations"] for r in resources},
{"snap-7f9496cf": ["619193117841"], "snap-af0eb71b": ["all"]},
)
class SnapshotDetachTest(BaseTest):
def test_volume_detach(self):
factory = self.replay_flight_data('test_ebs_detach')
p = self.load_policy(
{
'name': 'volume-detach',
'resource': 'ebs',
'filters': [{'VolumeId': 'vol-0850cf7c8e949c318'}],
'actions': [
{
'type': 'detach'
}
]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client('ec2')
volumelist = []
volumelist.append(resources[0]['VolumeId'])
response = client.describe_volumes(VolumeIds=volumelist)
for resp in response['Volumes']:
for attachment in resp['Attachments']:
self.assertTrue(attachment['State'] == "detached" or
attachment['State'] == "detaching")
class SnapshotCopyTest(BaseTest):
def test_snapshot_copy(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
self.change_environment(AWS_DEFAULT_REGION="us-west-2")
factory = self.replay_flight_data("test_ebs_snapshot_copy")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": [{"tag:ASV": "RoadKill"}],
"actions": [
{
"type": "copy",
"target_region": "us-east-1",
"target_key": "82645407-2faa-4d93-be71-7d6a8d59a5fc",
}
],
},
config=dict(region="us-west-2"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client("ec2")
tags = client.describe_tags(
Filters=[
{"Name": "resource-id", "Values": [resources[0]["c7n:CopiedSnapshot"]]}
]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in tags}
self.assertEqual(tags["ASV"], "RoadKill")
class SnapshotAmiSnapshotTest(BaseTest):
def test_snapshot_ami_snapshot_filter(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
# DEFAULT_REGION needs to be set to west for recording
factory = self.replay_flight_data("test_ebs_ami_snapshot_filter")
# first case should return only resources that are ami snapshots
p = self.load_policy(
{
"name": "ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": False}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
# second case should return resources that are NOT ami snapshots
policy = self.load_policy(
{
"name": "non-ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": True}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotUnusedTest(BaseTest):
def test_snapshot_unused(self):
factory = self.replay_flight_data("test_ebs_snapshot_unused")
p = self.load_policy(
{
"name": "snap-unused",
"resource": "ebs-snapshot",
"filters": [{"type": "unused", "value": True}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
policy = self.load_policy(
{
"name": "snap-used",
"resource": "ebs-snapshot",
"filters": [{"type": "unused", "value": False}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotTrimTest(BaseTest):
def test_snapshot_trim(self):
factory = self.replay_flight_data("test_ebs_snapshot_delete")
p = self.load_policy(
{
"name": "snapshot-trim",
"resource": "ebs-snapshot",
"filters": [{"tag:InstanceId": "not-null"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class AttachedInstanceTest(BaseTest):
def test_ebs_instance_filter(self):
factory = self.replay_flight_data("test_ebs_instance_filter")
p = self.load_policy(
{
"name": "attached-instance-test",
"resource": "ebs",
"filters": [
{"type": "instance", "key": "tag:Name", "value": "CompiledLambda"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class ResizeTest(BaseTest):
def test_resize_action(self):
factory = self.replay_flight_data("test_ebs_modifyable_action")
client = factory().client("ec2")
# Change a volume from 32 gb gp2 and 100 iops (sized based) to
# 64gb and 500 iops.
vol_id = "vol-0073dcd216489ea1b"
p = self.load_policy(
{
"name": "resizable",
"resource": "ebs",
"filters": ["modifyable", {"VolumeId": vol_id}],
"actions": [
{
"type": "modify",
"volume-type": "io1",
"size-percent": 200,
"iops-percent": 500,
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(resources[0]["Iops"], 100)
self.assertEqual(resources[0]["Size"], 32)
vol = client.describe_volumes(VolumeIds=[vol_id])["Volumes"][0]
self.assertEqual(vol["Iops"], 500)
self.assertEqual(vol["Size"], 64)
def test_resize_filter(self):
# precondition, 6 volumes, 4 not modifyable.
factory = self.replay_flight_data("test_ebs_modifyable_filter")
output = self.capture_logging("custodian.filters", level=logging.DEBUG)
p = self.load_policy(
{"name": "resizable", "resource": "ebs", "filters": ["modifyable"]},
session_factory=factory,
)
resources = p.run()
self.assertEqual(
{r["VolumeId"] for r in resources},
{"vol-0073dcd216489ea1b", "vol-0e4cba7adc4764f79"},
)
self.assertEqual(
output.getvalue().strip(),
("filtered 4 of 6 volumes due to [('instance-type', 2), "
"('vol-mutation', 1), ('vol-type', 1)]")
)
class CopyInstanceTagsTest(BaseTest):
def test_copy_instance_tags(self):
# More a functional/coverage test then a unit test.
self.patch(CopyInstanceTags, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_copy_instance_tags")
volume_id = "vol-2b047792"
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy(
{
"name": "test-copy-instance-tags",
"resource": "ebs",
"actions": [{"type": "copy-instance-tags", "tags": ["Name"]}],
},
config={"region": "us-west-2"},
session_factory=factory,
)
policy.run()
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags["Name"], "CompileLambda")
class VolumePostFindingTest(BaseTest):
def test_volume_post_finding(self):
factory = self.replay_flight_data('test_ebs_snapshot')
p = self.load_policy({
'name': 'vol-finding',
'resource': 'aws.ebs',
'actions': [{
'type': 'post-finding',
'types': [
'Software and Configuration Checks/OrgStandard/abc-123']}]},
session_factory=factory)
resources = p.resource_manager.resources()
rfinding = p.resource_manager.actions[0].format_resource(
resources[0])
self.maxDiff = None
self.assertEqual(
rfinding,
{'Details': {
'AwsEc2Volume': {
'Attachments': [{'AttachTime': '2017-03-28T14:55:28+00:00',
'DeleteOnTermination': True,
'InstanceId': 'i-0a0b51bcf11a8cdfb',
'Status': 'attached'}],
'CreateTime': '2017-03-28T14:55:28.486000+00:00',
'Size': 8,
'SnapshotId': 'snap-037f1f9e6c8ea4d65'}},
'Id': 'arn:aws:ec2:us-east-1:644160558196:volume/vol-01adbb6a4f175941d',
'Partition': 'aws',
'Region': 'us-east-1',
'Type': 'AwsEc2Volume'})
shape_validate(
rfinding['Details']['AwsEc2Volume'],
'AwsEc2VolumeDetails', 'securityhub')
class VolumeSnapshotTest(BaseTest):
def test_volume_snapshot(self):
factory = self.replay_flight_data("test_ebs_snapshot")
policy = self.load_policy(
{
"name": "test-ebs-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-01adbb6a4f175941d"}],
"actions": ["snapshot"],
},
session_factory=factory,
)
policy.run()
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-01adbb6a4f175941d"]}]
)
self.assertEqual(len(snapshot_data["Snapshots"]), 1)
def test_volume_snapshot_copy_tags(self):
factory = self.replay_flight_data("test_ebs_snapshot_copy_tags")
policy = self.load_policy(
{
"name": "ebs-test-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-0252f61378ede9d01"}],
"actions": [{"type": "snapshot", "copy-tags": ['Name', 'Stage']}]
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-0252f61378ede9d01"]}]
)
rtags = {t['Key']: t['Value'] for t in resources[0]['Tags']}
rtags.pop('DoNotCopy')
rtags['custodian_snapshot'] = ''
for s in snapshot_data['Snapshots']:
self.assertEqual(rtags, {t['Key']: t['Value'] for t in s['Tags']})
def test_volume_snapshot_copy_volume_tags(self):
factory = self.replay_flight_data("test_ebs_snapshot_copy_volume_tags")
policy = self.load_policy(
{
"name": "ebs-test-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-0252f61378ede9d01"}],
"actions": [{"type": "snapshot",
"copy-volume-tags": False,
"tags": {'test-tag': 'custodian'}}]
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-0252f61378ede9d01"]}]
)
for s in snapshot_data['Snapshots']:
self.assertEqual({'test-tag': 'custodian'}, {t['Key']: t['Value'] for t in s['Tags']})
class VolumeDeleteTest(BaseTest):
def test_volume_delete_force(self):
self.patch(Delete, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_force_delete")
policy = self.load_policy(
{
"name": "test-ebs",
"resource": "ebs",
"filters": [{"VolumeId": "vol-d0790258"}],
"actions": [{"type": "delete", "force": True}],
},
session_factory=factory,
)
resources = policy.run()
try:
factory().client("ec2").describe_volumes(
VolumeIds=[resources[0]["VolumeId"]]
)
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "InvalidVolume.NotFound")
else:
self.fail("Volume still exists")
class EncryptExtantVolumesTest(BaseTest):
def test_encrypt_volumes(self):
self.patch(EncryptInstanceVolumes, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_encrypt_volumes")
policy = self.load_policy(
{
"name": "ebs-remediate-attached",
"resource": "ebs",
"filters": [
{"Encrypted": False}, {"VolumeId": "vol-0f53c81b92b4ecfce"}
],
"actions": [
{
"type": "encrypt-instance-volumes",
"delay": 0.001,
"key": "alias/encryptebs",
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
volumes = session_factory().client("ec2").describe_volumes(
Filters=[
{
"Name": "attachment.instance-id",
"Values": [r["Attachments"][0]["InstanceId"]],
}
]
)
for v in volumes["Volumes"]:
self.assertTrue(v["Attachments"][0]["DeleteOnTermination"])
self.assertTrue(v["Encrypted"])
if "Tags" in v:
self.assertNotIn(
"maid-crypt-remediation", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-origin-volume", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-instance-device", [i["Key"] for i in v["Tags"]]
)
class TestKmsAlias(BaseTest):
def test_ebs_kms_alias(self):
session_factory = self.replay_flight_data("test_ebs_aws_managed_kms_keys")
p = self.load_policy(
{
"name": "ebs-aws-managed-kms-keys-filters",
"resource": "ebs",
"filters": [
{
"type": "kms-alias",
"key": "AliasName",
"value": "^(alias/aws/)",
"op": "regex",
}
],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-14a3cd9d")
class EbsFaultToleranceTest(BaseTest):
def test_ebs_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-fault-tolerant",
"resource": "ebs",
"filters": ["fault-tolerant"],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-c5eaa459")
def test_ebs_non_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_non_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-non-fault-tolerant",
"resource": "ebs",
"filters": [{"type": "fault-tolerant", "tolerant": False}],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-abdb8d37")
class PiopsMetricsFilterTest(BaseTest):
def test_ebs_metrics_percent_filter(self):
session = self.replay_flight_data("test_ebs_metrics_percent_filter")
policy = self.load_policy(
{
"name": "ebs-unused-piops",
"resource": "ebs",
"filters": [
{
"type": "metrics",
"name": "VolumeConsumedReadWriteOps",
"op": "lt",
"value": 50,
"statistics": "Maximum",
"days": 1,
"percent-attr": "Iops",
}
],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
class HealthEventsFilterTest(BaseTest):
def test_ebs_health_events_filter(self):
session_factory = self.replay_flight_data("test_ebs_health_events_filter")
policy = self.load_policy(
{
"name": "ebs-health-events-filter",
"resource": "ebs",
"filters": [{"type": "health-event", "types": ["AWS_EBS_VOLUME_LOST"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
self.assertTrue(
("c7n:HealthEvent" in r) and
("Description" in e for e in r["c7n:HealthEvent"])
)
|
capitalone/cloud-custodian
|
tests/test_ebs.py
|
Python
|
apache-2.0
| 25,361
| 0.000789
|
# -*- coding: utf-8 -*-
__all__ = ['host_blueprint']
from datetime import datetime, timedelta
# dateutil
from dateutil.parser import parse as dtparse
# flask
from flask import (
Flask, request, session, g,
redirect, url_for, abort,
render_template, flash, jsonify,
Blueprint, abort,
send_from_directory,
current_app,
)
# flask login
from flask.ext.login import login_required, fresh_login_required, current_user
# flask-wtf
from flask.ext.wtf import Form
from wtforms import validators
from wtforms import TextField, PasswordField, SelectField, BooleanField
from wtforms_html5 import EmailField
# requests
import requests
from requests.auth import HTTPBasicAuth
# model
from model.db import db
from model.db import object_to_dict, objects_to_list, update_object_with_dict
from model.user import UserAccount, UserQuota
from model.host import Host
host_blueprint = Blueprint('host_blueprint', __name__)
@host_blueprint.route('/hosts', methods=['GET'])
@login_required
def host_hosts():
username = current_user.username
print 'host_hosts:', locals()
# get user account properties
user_account = UserAccount.query.filter_by(username=username).one()
dct = object_to_dict(user_account)
return render_template(
'host-hosts.html',
**dct
)
@host_blueprint.route('/hosts/all', methods=['POST'])
@login_required
def host_hosts_all():
username = current_user.username
usertype = current_user.usertype
print 'host_hosts_all:', locals()
if usertype != 'super':
data = {}
return jsonify(data)
hosts = Host.query.all()
_hosts = objects_to_list(hosts)
data = {
'hosts': _hosts,
}
return jsonify(data)
@host_blueprint.route('/host/create', methods=['POST'])
@login_required
def host_create():
username = current_user.username
usertype = current_user.usertype
_host = request.json['host']
print 'host_add:', locals()
if usertype != 'super':
data = {}
return jsonify(data)
name = _host['name']
host = _host['host']
port = _host['port']
auth_username = _host['auth_username']
auth_password = _host['auth_password']
ram_capacity = _host['ram_capacity']
ram_reserved = _host['ram_reserved']
if '[' in name and '-' in name and ']' in name and \
'[' in host and '-' in host and ']' in host:
_hosts = []
hosts = []
# name base/range
s = name.find('[')
e = name.find(']')
name_base = name[:s]
name_range = name[s + 1:e]
name_range = name_range.strip(' ').strip()
name_range = map(int, name_range.split('-'))
name_range[1] += 1
# host base/range
s = host.find('[')
e = host.find(']')
host_base = host[:s]
host_range = host[s + 1:e]
host_range = host_range.strip(' ').strip()
host_range = map(int, host_range.split('-'))
host_range[1] += 1
for i, j in zip(range(*name_range), range(*host_range)):
__host = {
'name': '%s%i' % (name_base, i),
'host': '%s%i' % (host_base, j),
'port': port,
'auth_username': auth_username,
'auth_password': auth_password,
'ram_capacity': ram_capacity,
'ram_reserved': ram_reserved,
}
__host['created'] = __host['updated'] = datetime.utcnow()
host = Host(**__host)
db.session.add(host)
hosts.append(host)
db.session.commit()
for host in hosts:
__host = object_to_dict(host)
_hosts.append(__host)
data = {
'hosts': _hosts,
}
else:
_host['created'] = _host['updated'] = datetime.utcnow()
host = Host(**_host)
db.session.add(host)
db.session.commit()
_host = object_to_dict(host)
data = {
'host': _host,
}
return jsonify(data)
@host_blueprint.route('/host/update', methods=['POST'])
@login_required
def host_update():
username = current_user.username
usertype = current_user.usertype
_host = request.json['host']
print 'host_update:', locals()
if usertype != 'super':
data = {}
return jsonify(data)
host = Host.query.get(_host['id'])
assert host is not None
_host['updated'] = datetime.utcnow()
update_object_with_dict(host, _host)
db.session.commit()
_host = object_to_dict(host)
data = {
'host': _host,
}
return jsonify(data)
@host_blueprint.route('/host/remove', methods=['POST'])
@login_required
def host_remove():
username = current_user.username
usertype = current_user.usertype
id = request.json['id']
print 'host_remove:', locals()
if usertype != 'super':
data = {}
return jsonify(data)
host = Host.query.get(id)
assert host is not None
db.session.delete(host)
db.session.commit()
data = {}
return jsonify(data)
|
mtasic85/dockyard
|
host.py
|
Python
|
mit
| 5,231
| 0.006117
|
# -*- encoding:utf-8 -*-
"""
交易执行代理模块
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from contextlib import contextmanager
from functools import total_ordering
from enum import Enum
import numpy as np
import pandas as pd
from . import ABuTradeDrawer
from . import ABuTradeExecute
__author__ = '阿布'
__weixin__ = 'abu_quant'
class EOrderSameRule(Enum):
"""对order_pd中对order判断为是否相同使用的规则"""
"""order有相同的symbol和买入日期就认为是相同"""
ORDER_SAME_BD = 0
"""order有相同的symbol, 买入日期,和卖出日期,即不考虑价格,只要日期相同就相同"""
ORDER_SAME_BSD = 1
"""order有相同的symbol, 买入日期,相同的买入价格,即单子买入时刻都相同"""
ORDER_SAME_BDP = 2
"""order有相同的symbol, 买入日期, 买入价格, 并且相同的卖出日期和价格才认为是相同,即买入卖出时刻都相同"""
ORDER_SAME_BSPD = 3
@total_ordering
class AbuOrderPdProxy(object):
"""
包装交易订单构成的pd.DataFrame对象,外部debug因子的交易结果,寻找交易策略的问题使用,
支持两个orders_pd的并集,交集,差集,类似set的操作,同时支持相等,不等,大于,小于
的比较操作,eg如下:
orders_pd1 = AbuOrderPdProxy(orders_pd1)
with orders_pd1.proxy_work(orders_pd2) as (order1, order2):
a = order1 | order2 # 两个交易结果的并集
b = order1 & order2 # 两个交易结果的交集
c = order1 - order2 # 两个交易结果的差集(在order1中,但不在order2中)
d = order2 - order1 # 两个交易结果的差集(在order2中,但不在order1中)
eq = order1 == order2 # 两个交易结果是否相同
lg = order1 > order2 # order1唯一的交易数量是否大于order2
lt = order1 < order2 # order1唯一的交易数量是否小于order2
"""
def __init__(self, orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
初始化函数需要pd.DataFrame对象,暂时未做类型检测
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
# 需要copy因为会添加orders_pd的列属性等
self.orders_pd = orders_pd.copy()
self.same_rule = same_rule
# 并集, 交集, 差集运算结果存储
self.op_result = None
self.last_op_metrics = {}
@contextmanager
def proxy_work(self, orders_pd):
"""
传人需要比较的orders_pd,构造ABuOrderPdProxy对象,返回使用者,
对op_result进行统一分析
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return:
"""
# 运算集结果重置
self.op_result = None
# 实例化比较的ABuOrderPdProxy对象
other = AbuOrderPdProxy(orders_pd)
try:
yield self, other
finally:
if isinstance(self.op_result, pd.DataFrame):
# 如果有并集, 交集, 差集运算结果存储,
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
metrics = AbuMetricsBase(self.op_result, None, None, None)
metrics.fit_metrics_order()
self.last_op_metrics['win_rate'] = metrics.win_rate
self.last_op_metrics['gains_mean'] = metrics.gains_mean
self.last_op_metrics['losses_mean'] = metrics.losses_mean
self.last_op_metrics['sum_profit'] = self.op_result['profit'].sum()
self.last_op_metrics['sum_profit_cg'] = self.op_result['profit_cg'].sum()
def __and__(self, other):
""" & 操作符的重载,计算两个交易集的交集"""
# self.op = 'intersection(order1 & order2)'
self.op_result = intersection_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __or__(self, other):
""" | 操作符的重载,计算两个交易集的并集"""
# self.op = 'union(order1 | order2)'
self.op_result = union_in_2orders(self.orders_pd, other.orders_pd)
return self.op_result
def __sub__(self, other):
""" - 操作符的重载,计算两个交易集的差集"""
self.op_result = difference_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __eq__(self, other):
""" == 操作符的重载,计算两个交易集的是否相同"""
return (self - other).empty and (other - self).empty
def __gt__(self, other):
""" > 操作符的重载,计算两个交易集的大小, 类被total_ordering装饰,可以支持lt等操作符"""
unique_cnt = find_unique_group_symbol(self.orders_pd).shape[0]
other_unique_cnt = find_unique_group_symbol(other.orders_pd).shape[0]
return unique_cnt > other_unique_cnt
def union_in_2orders(orders_pd, other_orders_pd):
"""
并集:分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中所有不同的交易,
注意这里不认为在相同的交易日买入相同的股票,两笔交易就一样,这里只是两个orders_pd合并
后使用drop_duplicates做了去除完全一样的order,即结果为并集:
orders_pd | cmp_orders_pd或orders_pd.union(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return: orders_pd | cmp_orders_pd
"""
orders_pd = orders_pd.append(other_orders_pd)
orders_pd = orders_pd.drop_duplicates()
return orders_pd
def _same_pd(order, other_orders_pd, same_rule):
"""
根据same_rule的规则从orders_pd和other_orders_pd中返回相同的df
:param order: orders_pd中的一行order记录数据
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则
:return: 从orders_pd和other_orders_pd中返回相同的df
"""
symbol = order.symbol
buy_day = order['buy_date']
buy_price = order['buy_price']
sell_day = order['sell_date']
sell_price = order['sell_price']
if same_rule == EOrderSameRule.ORDER_SAME_BD:
# 只根据买入时间和买入symbol确定是否相同,即认为在相同的交易日买入相同的股票,两笔交易就一样,忽略其它所有order中的因素
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSD:
# 根据买入时间,卖出时间和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BDP:
# 根据买入时间,买入价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['buy_price'] == buy_price)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSPD:
# 根据买入时间,卖出时间, 买入价格和卖出价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)
& (other_orders_pd['buy_price'] == buy_price)
& (other_orders_pd['sell_price'] == sell_price)]
else:
raise TypeError('same_rule type is {}!!'.format(same_rule))
return same_pd
def intersection_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
交集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中相同的交易,
即结果为交集:orders_pd & cmp_orders_pd或orders_pd.intersection(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd & cmp_orders_pd
"""
def _intersection(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 如果是空,说明不相交
return False
# 相交, intersection=1,是交集
return True
orders_pd['intersection'] = orders_pd.apply(_intersection, axis=1)
return orders_pd[orders_pd['intersection'] == 1]
def difference_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
差集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd的不同交易,
注意返回的结果是存在orders_pd中的交易,但不在cmp_orders_pd中的交易,即结果
为差集:orders_pd - cmp_orders_pd或orders_pd.difference(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd - cmp_orders_pd
"""
def _difference(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 没有相同的说明是差集
return True
# 有相同的说明不是差集
return False
orders_pd['difference'] = orders_pd.apply(_difference, axis=1)
return orders_pd[orders_pd['difference'] == 1]
def find_unique_group_symbol(order_pd):
"""
按照'buy_date', 'symbol'分组后,只筛选组里的第一个same_group.iloc[0]
:param order_pd:
:return:
"""
def _find_unique_group_symbol(same_group):
# 只筛选组里的第一个, 即同一个交易日,对一个股票的交易只保留一个order
return same_group.iloc[0]
# 按照'buy_date', 'symbol'分组后apply same_handle
order_pds = order_pd.groupby(['buy_date', 'symbol']).apply(_find_unique_group_symbol)
return order_pds
def find_unique_symbol(order_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
order_pd中如果一个buy_date对应的一个symbol有多条交易记录,过滤掉,
注意如果在对应多条记录中保留一个,使用find_unique_group_symbol
:param order_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
def _find_unique_symbol(order):
"""根据order的symbol和buy_date在原始order_pd中进行复合条件筛选,结果same_pd如果只有1个就唯一,否则就是重复的"""
same_pd = _same_pd(order, order_pd, same_rule)
if same_pd.empty or same_pd.shape[0] == 1:
return False
# 同一天一个symbol有多条记录的一个也没留,都过滤
return True
same_mark = order_pd.apply(_find_unique_symbol, axis=1)
return order_pd[same_mark == 0]
def trade_summary(orders, kl_pd, draw=False, show_info=True):
"""
主要将AbuOrder对象序列转换为pd.DataFrame对象orders_pd,以及将
交易单子时间序列转换交易行为顺序序列,绘制每笔交易的细节交易图,以及
简单文字度量输出
:param orders: AbuOrder对象序列
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param draw: 是否可视化交易细节图示
:param show_info: 是否输出交易文字信息
"""
# AbuOrder对象序列转换为pd.DataFrame对象orders_pd
orders_pd = ABuTradeExecute.make_orders_pd(orders, kl_pd)
# 交易单子时间序列转换交易行为顺序序列
action_pd = ABuTradeExecute.transform_action(orders_pd)
summary = ''
if draw:
# 绘制每笔交易的细节交易图
ABuTradeDrawer.plot_his_trade(orders, kl_pd)
if show_info:
# simple的意思是没有计算交易费用
simple_profit = 'simple profit: {} \n'.format(ABuTradeExecute.calc_simple_profit(orders, kl_pd))
summary += simple_profit
# 每笔交易收益期望
mean_win_profit = 'mean win profit {} \n'.format(np.mean(orders_pd[orders_pd.result == 1]['profit']))
summary += mean_win_profit
# 每笔交易亏损期望
mean_loss_profit = 'mean loss profit {} \n'.format(np.mean(orders_pd[orders_pd.result == -1]['profit']))
summary += mean_loss_profit
# 盈利笔数
win_cnt = 0 if len(orders_pd[orders_pd.result == 1].result.value_counts().values) <= 0 else \
orders_pd[orders_pd.result == 1].result.value_counts().values[0]
# 亏损笔数
loss_cnt = 0 if len(orders_pd[orders_pd.result == -1].result.value_counts().values) <= 0 else \
orders_pd[orders_pd.result == -1].result.value_counts().values[0]
# 胜率
win_rate = 'win rate ' + str('*@#')
if win_cnt + loss_cnt > 0:
win_rate = 'win rate: {}%'.format(float(win_cnt) / float(float(loss_cnt) + float(win_cnt)))
summary += win_rate
return orders_pd, action_pd, summary
|
bbfamily/abu
|
abupy/TradeBu/ABuTradeProxy.py
|
Python
|
gpl-3.0
| 14,496
| 0.001752
|
from tabulate import tabulate
class Response():
message = None;
data = None;
def print(self):
if self.message:
if type(self.message) == "str":
print(self.message)
elif type(self.message) == "list":
for message in self.message:
print("{}\n".format(message))
if (self.data):
if len(self.data["rows"]) > 0:
print(tabulate(self.data["rows"], headers=self.data["headers"]))
else:
print("Empty!")
|
mozey/taskmage
|
taskmage/response.py
|
Python
|
mit
| 557
| 0.008977
|
import rply
from ..lexer import lexers
__all__ = ('parsers',)
class Parsers(object):
def __init__(self):
self._fpg = None
self._fp = None
self._spg = None
self._sp = None
@property
def fpg(self):
if self._fpg is None:
self._fpg = rply.ParserGenerator(
[rule.name for rule in lexers.flg.rules],
precedence=[]
)
return self._fpg
@property
def fp(self):
if self._fp is None:
self._fp = self.fpg.build()
return self._fp
@property
def spg(self):
if self._spg is None:
self._spg = rply.ParserGenerator(
[rule.name for rule in lexers.slg.rules],
precedence=[]
)
return self._spg
@property
def sp(self):
if self._sp is None:
self._sp = self.spg.build()
return self._sp
parsers = Parsers()
# Load productions
from .filter import fpg # noqa
from .structure import spg # noqa
|
funkybob/rattle
|
rattle/parser/__init__.py
|
Python
|
mit
| 1,048
| 0
|
# -*- coding: utf-8 -*-
# (C) 2017 Muthiah Annamalai
# This file is part of open-tamil examples
# This code is released under public domain
import joblib
# Ref API help from : https://scikit-learn.org
import numpy as np
import random
import string
import time
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
# project modules
from classifier_eng_vs_ta import jaffna_transliterate
from preprocess import Feature
def data1(filename):
x = np.loadtxt(open(filename, "r"), delimiter=",")
y = np.ones(shape=(x.shape[0], 1))
return (x, y)
def data0(filename):
x = np.loadtxt(open(filename, "r"), delimiter=",")
y = np.zeros(shape=(x.shape[0], 1))
return (x, y)
DEBUG = False
x1, y1 = data1("tamilvu_dictionary_words.txt.csv")
x0, y0 = data0("english_dictionary_words.jaffna.csv")
az_x0, az_y0 = data0("english_dictionary_words.azhagi.csv")
cm_x0, cm_y0 = data0("english_dictionary_words.combinational.csv")
x1 = x1.take(range(0, x0.shape[0]), axis=0)
y1 = np.ones((x0.shape[0], 1))
## Scale the data for the training
X = np.concatenate((x0, x1), axis=0)
Y = np.concatenate((y0, y1), axis=0)
# Y = Y.take(range(0,X.shape[0]),axis=0).ravel()
Y = Y.ravel()
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
scaler = StandardScaler()
scaler.fit(X_train)
joblib.dump(scaler, "test_scaler.pkl") # scaler Dump for webapps
print("Size of Training set => %d" % X_train.shape[0])
print("Size of Test set => %d" % X_test.shape[0])
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
###########
## Build training set for the model
## solver='sgd',activation='logistic',
## We have a 4-layer model
nn = MLPClassifier(hidden_layer_sizes=(15, 15, 10, 5),
activation='logistic',
max_iter=100000, alpha=0.01, solver='lbfgs')
# Try 1-layer simple model with logistic activation
# nn = MLPClassifier(
# hidden_layer_sizes=(8, 8, 7), solver="lbfgs"
# ) # activation='logistic',max_iter=1000,early_stopping=True,solver='lbfgs')
# max_iter=500,solver='sgd',activation='logistic')
print(nn)
nn.fit(X_train, Y_train)
joblib.dump(
nn, "nn-%s.pkl" % time.ctime()
) # change dump name to test_nn.pkl for webapps
Y_pred = nn.predict(X_test)
print(" accuracy => ", accuracy_score(Y_pred.ravel(), Y_test))
score = nn.score(X_test, Y_test)
print("Score => ")
print(score)
print(confusion_matrix(Y_test, Y_pred.ravel()))
print(classification_report(Y_test, Y_pred.ravel()))
def process_word(s):
if any([l in string.ascii_lowercase for l in s]):
s = jaffna_transliterate(s)
print(u"Transliterated to %s" % s)
print(u"Checking in NN '%s'" % s)
try:
f = Feature.get(s)
scaled_feature = scaler.transform(np.array(f.data()).reshape(1, -1))
y = nn.predict(scaled_feature)
print(scaled_feature)
print(y)
if y.ravel() > 0:
print(u"%s -> TAMIL world (most likely)" % s)
else:
print(u"%s -> ENG word (most likely)" % s)
except Exception as ioe:
print("SKIPPING => ", ioe.message)
return
for w in [
u"hello",
u"ஆரொன்",
u"உகந்த",
u"கம்புயுடர்",
u"கம்ப்யூட்டர்",
u"பியூடிபுல்",
"pupil",
"beautiful",
"summer",
"sinful",
"google",
"facebook",
"microsoft",
"swift",
]:
process_word(w)
while True:
s = input(u">> ").decode("utf-8")
s = s.strip().lower()
if s == "end":
break
if len(s) < 1:
continue
process_word(s)
|
Ezhil-Language-Foundation/open-tamil
|
examples/classifier/modelprocess2.py
|
Python
|
mit
| 3,817
| 0.001339
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import study
from google.cloud.aiplatform_v1beta1.types import study as gca_study
from google.cloud.aiplatform_v1beta1.types import vizier_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import VizierServiceGrpcTransport
class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport):
"""gRPC AsyncIO backend transport for VizierService.
Vertex Vizier API.
Vizier service is a GCP service to solve blackbox optimization
problems, such as tuning machine learning hyperparameters and
searching over deep learning architectures.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_study(
self,
) -> Callable[[vizier_service.CreateStudyRequest], Awaitable[gca_study.Study]]:
r"""Return a callable for the create study method over gRPC.
Creates a Study. A resource name will be generated
after creation of the Study.
Returns:
Callable[[~.CreateStudyRequest],
Awaitable[~.Study]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_study" not in self._stubs:
self._stubs["create_study"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy",
request_serializer=vizier_service.CreateStudyRequest.serialize,
response_deserializer=gca_study.Study.deserialize,
)
return self._stubs["create_study"]
@property
def get_study(
self,
) -> Callable[[vizier_service.GetStudyRequest], Awaitable[study.Study]]:
r"""Return a callable for the get study method over gRPC.
Gets a Study by name.
Returns:
Callable[[~.GetStudyRequest],
Awaitable[~.Study]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_study" not in self._stubs:
self._stubs["get_study"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/GetStudy",
request_serializer=vizier_service.GetStudyRequest.serialize,
response_deserializer=study.Study.deserialize,
)
return self._stubs["get_study"]
@property
def list_studies(
self,
) -> Callable[
[vizier_service.ListStudiesRequest],
Awaitable[vizier_service.ListStudiesResponse],
]:
r"""Return a callable for the list studies method over gRPC.
Lists all the studies in a region for an associated
project.
Returns:
Callable[[~.ListStudiesRequest],
Awaitable[~.ListStudiesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_studies" not in self._stubs:
self._stubs["list_studies"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/ListStudies",
request_serializer=vizier_service.ListStudiesRequest.serialize,
response_deserializer=vizier_service.ListStudiesResponse.deserialize,
)
return self._stubs["list_studies"]
@property
def delete_study(
self,
) -> Callable[[vizier_service.DeleteStudyRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete study method over gRPC.
Deletes a Study.
Returns:
Callable[[~.DeleteStudyRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_study" not in self._stubs:
self._stubs["delete_study"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy",
request_serializer=vizier_service.DeleteStudyRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_study"]
@property
def lookup_study(
self,
) -> Callable[[vizier_service.LookupStudyRequest], Awaitable[study.Study]]:
r"""Return a callable for the lookup study method over gRPC.
Looks a study up using the user-defined display_name field
instead of the fully qualified resource name.
Returns:
Callable[[~.LookupStudyRequest],
Awaitable[~.Study]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "lookup_study" not in self._stubs:
self._stubs["lookup_study"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy",
request_serializer=vizier_service.LookupStudyRequest.serialize,
response_deserializer=study.Study.deserialize,
)
return self._stubs["lookup_study"]
@property
def suggest_trials(
self,
) -> Callable[
[vizier_service.SuggestTrialsRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the suggest trials method over gRPC.
Adds one or more Trials to a Study, with parameter values
suggested by Vertex Vizier. Returns a long-running operation
associated with the generation of Trial suggestions. When this
long-running operation succeeds, it will contain a
[SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].
Returns:
Callable[[~.SuggestTrialsRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "suggest_trials" not in self._stubs:
self._stubs["suggest_trials"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials",
request_serializer=vizier_service.SuggestTrialsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["suggest_trials"]
@property
def create_trial(
self,
) -> Callable[[vizier_service.CreateTrialRequest], Awaitable[study.Trial]]:
r"""Return a callable for the create trial method over gRPC.
Adds a user provided Trial to a Study.
Returns:
Callable[[~.CreateTrialRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_trial" not in self._stubs:
self._stubs["create_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial",
request_serializer=vizier_service.CreateTrialRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["create_trial"]
@property
def get_trial(
self,
) -> Callable[[vizier_service.GetTrialRequest], Awaitable[study.Trial]]:
r"""Return a callable for the get trial method over gRPC.
Gets a Trial.
Returns:
Callable[[~.GetTrialRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_trial" not in self._stubs:
self._stubs["get_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/GetTrial",
request_serializer=vizier_service.GetTrialRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["get_trial"]
@property
def list_trials(
self,
) -> Callable[
[vizier_service.ListTrialsRequest], Awaitable[vizier_service.ListTrialsResponse]
]:
r"""Return a callable for the list trials method over gRPC.
Lists the Trials associated with a Study.
Returns:
Callable[[~.ListTrialsRequest],
Awaitable[~.ListTrialsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_trials" not in self._stubs:
self._stubs["list_trials"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/ListTrials",
request_serializer=vizier_service.ListTrialsRequest.serialize,
response_deserializer=vizier_service.ListTrialsResponse.deserialize,
)
return self._stubs["list_trials"]
@property
def add_trial_measurement(
self,
) -> Callable[[vizier_service.AddTrialMeasurementRequest], Awaitable[study.Trial]]:
r"""Return a callable for the add trial measurement method over gRPC.
Adds a measurement of the objective metrics to a
Trial. This measurement is assumed to have been taken
before the Trial is complete.
Returns:
Callable[[~.AddTrialMeasurementRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "add_trial_measurement" not in self._stubs:
self._stubs["add_trial_measurement"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement",
request_serializer=vizier_service.AddTrialMeasurementRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["add_trial_measurement"]
@property
def complete_trial(
self,
) -> Callable[[vizier_service.CompleteTrialRequest], Awaitable[study.Trial]]:
r"""Return a callable for the complete trial method over gRPC.
Marks a Trial as complete.
Returns:
Callable[[~.CompleteTrialRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "complete_trial" not in self._stubs:
self._stubs["complete_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial",
request_serializer=vizier_service.CompleteTrialRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["complete_trial"]
@property
def delete_trial(
self,
) -> Callable[[vizier_service.DeleteTrialRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete trial method over gRPC.
Deletes a Trial.
Returns:
Callable[[~.DeleteTrialRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_trial" not in self._stubs:
self._stubs["delete_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial",
request_serializer=vizier_service.DeleteTrialRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_trial"]
@property
def check_trial_early_stopping_state(
self,
) -> Callable[
[vizier_service.CheckTrialEarlyStoppingStateRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the check trial early stopping
state method over gRPC.
Checks whether a Trial should stop or not. Returns a
long-running operation. When the operation is successful, it
will contain a
[CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse].
Returns:
Callable[[~.CheckTrialEarlyStoppingStateRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "check_trial_early_stopping_state" not in self._stubs:
self._stubs[
"check_trial_early_stopping_state"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState",
request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["check_trial_early_stopping_state"]
@property
def stop_trial(
self,
) -> Callable[[vizier_service.StopTrialRequest], Awaitable[study.Trial]]:
r"""Return a callable for the stop trial method over gRPC.
Stops a Trial.
Returns:
Callable[[~.StopTrialRequest],
Awaitable[~.Trial]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "stop_trial" not in self._stubs:
self._stubs["stop_trial"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/StopTrial",
request_serializer=vizier_service.StopTrialRequest.serialize,
response_deserializer=study.Trial.deserialize,
)
return self._stubs["stop_trial"]
@property
def list_optimal_trials(
self,
) -> Callable[
[vizier_service.ListOptimalTrialsRequest],
Awaitable[vizier_service.ListOptimalTrialsResponse],
]:
r"""Return a callable for the list optimal trials method over gRPC.
Lists the pareto-optimal Trials for multi-objective Study or the
optimal Trials for single-objective Study. The definition of
pareto-optimal can be checked in wiki page.
https://en.wikipedia.org/wiki/Pareto_efficiency
Returns:
Callable[[~.ListOptimalTrialsRequest],
Awaitable[~.ListOptimalTrialsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_optimal_trials" not in self._stubs:
self._stubs["list_optimal_trials"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials",
request_serializer=vizier_service.ListOptimalTrialsRequest.serialize,
response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize,
)
return self._stubs["list_optimal_trials"]
__all__ = ("VizierServiceGrpcAsyncIOTransport",)
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py
|
Python
|
apache-2.0
| 29,300
| 0.001365
|
"""Response classes used by urllib.
The base class, addbase, defines a minimal file-like interface,
including read() and readline(). The typical response object is an
addinfourl instance, which defines an info() method that returns
headers and a geturl() method that returns the url.
"""
class addbase(object):
"""Base class for addinfo and addclosehook."""
# XXX Add a method to expose the timeout on the underlying socket?
def __init__(self, fp):
# TODO(jhylton): Is there a better way to delegate using io?
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
# TODO(jhylton): Make sure an object with readlines() is also iterable
if hasattr(self.fp, "readlines"):
self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
def __iter__(self):
# Assigning `__iter__` to the instance doesn't work as intended
# because the iter builtin does something like `cls.__iter__(obj)`
# and thus fails to find the _bound_ method `obj.__iter__`.
# Returning just `self.fp` works for built-in file objects but
# might not work for general file-like objects.
return iter(self.fp)
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
if self.fp:
self.fp.close()
self.fp = None
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
self.__iter__ = None
self.__next__ = None
def __enter__(self):
if self.fp is None:
raise ValueError("I/O operation on closed file")
return self
def __exit__(self, type, value, traceback):
self.close()
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
|
lfcnassif/MultiContentViewer
|
release/modules/ext/libreoffice/program/python-core-3.3.0/lib/urllib/response.py
|
Python
|
lgpl-3.0
| 3,021
| 0.001324
|
import json
from django.db.models import Q, Subquery
from django.core.management.base import BaseCommand
from readthedocs.oauth.models import RemoteRepository
from readthedocs.oauth.services import registry
from readthedocs.oauth.services.base import SyncServiceError
from readthedocs.projects.models import Project
from readthedocs.organizations.models import Organization
class Command(BaseCommand):
help = "Re-connect RemoteRepository to Project"
def add_arguments(self, parser):
parser.add_argument('organization', nargs='+', type=str)
parser.add_argument(
'--no-dry-run',
action='store_true',
default=False,
help='Update database with the changes proposed.',
)
# If owners does not have their RemoteRepository synced, it could
# happen we don't find a matching Project (see --force-owners-social-resync)
parser.add_argument(
'--only-owners',
action='store_true',
default=False,
help='Connect repositories only to organization owners.',
)
parser.add_argument(
'--force-owners-social-resync',
action='store_true',
default=False,
help='Force to re-sync RemoteRepository for organization owners.',
)
def _force_owners_social_resync(self, organization):
for owner in organization.owners.all():
for service_cls in registry:
for service in service_cls.for_user(owner):
try:
service.sync()
except SyncServiceError:
print(f'Service {service} failed while syncing. Skipping...')
def _connect_repositories(self, organization, no_dry_run, only_owners):
connected_projects = []
# TODO: consider using same login than RemoteRepository.matches method
# https://github.com/readthedocs/readthedocs.org/blob/49b03f298b6105d755554f7dc7e97a3398f7066f/readthedocs/oauth/models.py#L185-L194
remote_query = (
Q(ssh_url__in=Subquery(organization.projects.values('repo'))) |
Q(clone_url__in=Subquery(organization.projects.values('repo')))
)
for remote in RemoteRepository.objects.filter(remote_query).order_by('created'):
admin = json.loads(remote.json).get('permissions', {}).get('admin')
if only_owners and remote.users.first() not in organization.owners.all():
# Do not connect a RemoteRepository if the User is not owner of the organization
continue
if not admin:
# Do not connect a RemoteRepository where the User is not admin of the repository
continue
if not organization.users.filter(username=remote.users.first().username).exists():
# Do not connect a RemoteRepository if the use does not belong to the organization
continue
# Projects matching
# - RemoteRepository URL
# - are under the Organization
# - not connected to a RemoteRepository already
# - was not connected previously by this call to the script
projects = Project.objects.filter(
Q(repo=remote.ssh_url) | Q(repo=remote.clone_url),
organizations__in=[organization.pk],
remote_repository__isnull=True
).exclude(slug__in=connected_projects)
for project in projects:
connected_projects.append(project.slug)
if no_dry_run:
remote.project = project
remote.save()
print(f'{project.slug: <40} {remote.pk: <10} {remote.html_url: <60} {remote.users.first().username: <20} {admin: <5}') # noqa
print('Total:', len(connected_projects))
if not no_dry_run:
print(
'Changes WERE NOT applied to the database. '
'Run it with --no-dry-run to save the changes.'
)
def handle(self, *args, **options):
no_dry_run = options.get('no_dry_run')
only_owners = options.get('only_owners')
force_owners_social_resync = options.get('force_owners_social_resync')
for organization in options.get('organization'):
try:
organization = Organization.objects.get(slug=organization)
if force_owners_social_resync:
self._force_owners_social_resync(organization)
self._connect_repositories(organization, no_dry_run, only_owners)
except Organization.DoesNotExist:
print(f'Organization does not exist. organization={organization}')
|
rtfd/readthedocs.org
|
readthedocs/oauth/management/commands/reconnect_remoterepositories.py
|
Python
|
mit
| 4,783
| 0.002091
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
# Delete stale NICs
orm.NetworkInterface.objects.filter(machine__deleted=True).delete()
for nic in orm.NetworkInterface.objects.select_related('machine',
'network').all():
userid = nic.machine.userid
nic.userid = userid
nic.save()
network = nic.network
for attr in ["ipv4", "ipv6"]:
address = getattr(nic, attr)
if address:
ipversion = 4 if attr == "ipv4" else 6
subnet = nic.network.subnets.get(ipversion=ipversion)
orm.IPAddress.objects.create(network=network,
subnet=subnet,
nic=nic,
userid=userid,
address=address)
def backwards(self, orm):
"Write your backwards methods here."
for ip in orm.IPAddress.objects.filter(deleted=False):
nic = ip.nic
attr = "ipv4" if nic.subnet.ipversion == 4 else "ipv6"
setattr(nic, attr, ip.address)
nic.save()
models = {
'db.backend': {
'Meta': {'ordering': "['clustername']", 'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'disk_templates': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.floatingip': {
'Meta': {'object_name': 'FloatingIP'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipv4': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15', 'db_index': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'null': 'True', 'to': "orm['db.VirtualMachine']"}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'to': "orm['db.Network']"}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.ipaddress': {
'Meta': {'unique_together': "(('network', 'address'),)", 'object_name': 'IPAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'floating_ip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'to': "orm['db.Network']"}),
'nic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.NetworkInterface']"}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'to': "orm['db.Subnet']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.ippooltable': {
'Meta': {'object_name': 'IPPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ip_pools'", 'null': 'True', 'to': "orm['db.Subnet']"})
},
'db.macprefixpooltable': {
'Meta': {'object_name': 'MacPrefixPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'external_router': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flavor': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'floating_ip_pool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'network'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'device_owner': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ipv4': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'}),
'ipv6': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.VirtualMachine']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.Network']"}),
'security_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.SecurityGroup']", 'null': 'True', 'symmetrical': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'ACTIVE'", 'max_length': '32'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.quotaholderserial': {
'Meta': {'ordering': "['serial']", 'object_name': 'QuotaHolderSerial'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'serial': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_index': 'True'})
},
'db.securitygroup': {
'Meta': {'object_name': 'SecurityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'db.subnet': {
'Meta': {'object_name': 'Subnet'},
'cidr': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dns_nameservers': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'host_routes': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipversion': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subnets'", 'to': "orm['db.Network']"})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']", 'on_delete': 'models.PROTECT'}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'BUILD'", 'max_length': '30'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'task_job_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'db.virtualmachinediagnostic': {
'Meta': {'ordering': "['-created']", 'object_name': 'VirtualMachineDiagnostic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'diagnostics'", 'to': "orm['db.VirtualMachine']"}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
}
}
complete_apps = ['db']
symmetrical = True
|
grnet/synnefo
|
snf-cyclades-app/synnefo/db/migrations/old/0080_nics_to_ips.py
|
Python
|
gpl-3.0
| 20,524
| 0.008234
|
import logging, openravepy, prpy
from prpy.action import ActionMethod
from prpy.planning.base import PlanningError
from contextlib import contextmanager
logger = logging.getLogger('herbpy')
@ActionMethod
def Grasp(robot, obj, manip=None, preshape=[0., 0., 0., 0.],
tsrlist=None, render=True, **kw_args):
"""
@param robot The robot performing the push grasp
@param obj The object to push grasp
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param preshape The grasp preshape for the hand
@param tsrlist A list of TSRChain objects to use for planning to grasp pose
(if None, the 'grasp' tsr from tsrlibrary is used)
@param render Render tsr samples and push direction vectors during planning
"""
HerbGrasp(robot, obj, manip=manip, preshape=preshape,
tsrlist=tsrlist, render=render)
@ActionMethod
def PushGrasp(robot, obj, push_distance=0.1, manip=None,
preshape=[0., 0., 0., 0.], push_required=True,
tsrlist=None, render=True, **kw_args):
"""
@param robot The robot performing the push grasp
@param obj The object to push grasp
@param distance The distance to push before grasping
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param push_required If true, throw exception if a plan for the pushing
movement cannot be found. If false, continue with grasp even if push
cannot be executed.
@param preshape The grasp preshape for the hand
@param tsrlist A list of TSRChain objects to use for planning to grasp pose
(if None, the 'grasp' tsr from tsrlibrary is used)
@param render Render tsr samples and push direction vectors during planning
"""
if tsrlist is None:
tsrlist = robot.tsrlibrary(obj, 'push_grasp', push_distance=push_distance)
HerbGrasp(robot, obj, manip=manip, preshape=preshape,
push_distance=push_distance,
tsrlist=tsrlist, render=render)
def HerbGrasp(robot, obj, push_distance=None, manip=None,
preshape=[0., 0., 0., 0.],
push_required=False,
tsrlist=None,
render=True,
**kw_args):
"""
@param robot The robot performing the push grasp
@param obj The object to push grasp
@param distance The distance to push before grasping (if None, no pushing)
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param preshape The grasp preshape for the hand
@param push_required If true, throw exception if a plan for the pushing
movement cannot be found. If false, continue with grasp even if push
cannot be executed. (only used if distance is not None)
@param render Render tsr samples and push direction vectors during planning
"""
if manip is None:
with robot.GetEnv():
manip = robot.GetActiveManipulator()
# Move the hand to the grasp preshape
manip.hand.MoveHand(*preshape)
# Get the grasp tsr
if tsrlist is None:
tsrlist = robot.tsrlibrary(obj, 'grasp')
# Plan to the grasp
with prpy.viz.RenderTSRList(tsrlist, robot.GetEnv(), render=render):
manip.PlanToTSR(tsrlist)
if push_distance is not None:
ee_in_world = manip.GetEndEffectorTransform()
push_direction = ee_in_world[:3,2]
# Move the object into the hand
env = robot.GetEnv()
with env:
obj_in_world = obj.GetTransform()
# First move back until collision
stepsize = 0.01
total_distance = 0.0
while not env.CheckCollision(robot, obj) and total_distance <= push_distance:
obj_in_world[:3,3] -= stepsize*push_direction
total_distance += stepsize
obj.SetTransform(obj_in_world)
# Then move forward until just out of collision
stepsize = 0.001
while env.CheckCollision(robot, obj):
obj_in_world[:3,3] += stepsize*push_direction
obj.SetTransform(obj_in_world)
# Manipulator must be active for grab to work properly
p = openravepy.KinBody.SaveParameters
with robot.CreateRobotStateSaver(p.ActiveManipulator):
robot.SetActiveManipulator(manip)
robot.Grab(obj)
# Now execute the straight line movement
with prpy.viz.RenderVector(ee_in_world[:3,3], push_direction,
push_distance, robot.GetEnv(), render=render):
try:
with prpy.rave.Disabled(obj):
manip.PlanToEndEffectorOffset(direction = push_direction,
distance = push_distance,
**kw_args)
except PlanningError, e:
if push_required:
raise
else:
logger.warn('Could not find a plan for straight line push. Ignoring.')
robot.Release(obj)
# Now close the hand to grasp
manip.hand.CloseHand()
# Manipulator must be active for grab to work properly
p = openravepy.KinBody.SaveParameters
with robot.CreateRobotStateSaver(p.ActiveManipulator):
robot.SetActiveManipulator(manip)
robot.Grab(obj)
@ActionMethod
def Lift(robot, obj, distance=0.05, manip=None, render=True, **kw_args):
"""
@param robot The robot performing the push grasp
@param obj The object to lift
@param distance The distance to lift the cup
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param render Render tsr samples and push direction vectors during planning
"""
if manip is None:
with robot.GetEnv():
manip = robot.GetActiveManipulator()
# Check for collision and disable anything in collision
creport = openravepy.CollisionReport()
disabled_objects = []
# Resolve inconsistencies in grabbed objects
if robot.CheckSelfCollision():
grabbed_objs = robot.GetGrabbed()
for obj in grabbed_objs:
robot.Release(obj)
for obj in grabbed_objs:
robot.Grab(obj)
# Create list of any current collisions so those can be disabled
while robot.GetEnv().CheckCollision(robot, creport):
collision_obj = creport.plink2.GetParent()
disabled_objects.append(collision_obj)
collision_obj.Enable(False)
for obj in disabled_objects:
obj.Enable(True)
# Perform the lift
with prpy.rave.AllDisabled(robot.GetEnv(), disabled_objects):
lift_direction = [0., 0., 1.]
lift_distance = distance
ee_in_world = manip.GetEndEffectorTransform()
with prpy.viz.RenderVector(ee_in_world[:3,3], lift_direction,
distance, robot.GetEnv(), render=render):
manip.PlanToEndEffectorOffset(direction=lift_direction,
distance=lift_distance,
**kw_args)
@ActionMethod
def Place(robot, obj, on_obj, given_point_on=None, manip=None, render=True, **kw_args):
"""
Place an object onto another object
This assumes the 'point_on' tsr is defined for the on_obj and
the 'place' tsr is defined for obj
@param robot The robot performing the push grasp
@param obj The object to place
@param on_obj The object to place obj on
<<<<<<< HEAD
@param given_point_on 4x4 numpy array (pose matrix) "X"-marked location on on_obj, in on_obj's coordinates.
=======
@param given_point_on "X"-marked location on on_obj, in on_obj's coordinates.
>>>>>>> 9f308684b627a4226976116aa37f44343fa92eb8
@param manip The manipulator to perform the grasp with
(if None active manipulator is used)
@param render Render tsr samples and push direction vectors during planning
"""
if manip is None:
with robot.GetEnv():
manip = robot.GetActiveManipulator()
# Get a tsr to sample places to put the glass
obj_extents = obj.ComputeAABB().extents()
obj_radius = max(obj_extents[0], obj_extents[1])
if (given_point_on == None):
dest_tsr = robot.tsrlibrary(on_obj, 'point_on', padding=obj_radius)
else:
# Given a point on the on_obj to place obj
dest_tsr = robot.tsrlibrary(on_obj, 'given_point_on', given_point_on, manip=manip);
# Now use this to get a tsr for sampling ee_poses
place_tsr = robot.tsrlibrary(obj, 'place', pose_tsr_chain = dest_tsr[0], manip=manip)
# Plan to the grasp
with prpy.viz.RenderTSRList(place_tsr, robot.GetEnv(), render=render):
manip.PlanToTSR(place_tsr)
# Open the hand
manip.hand.OpenHand()
# Release the object
robot.Release(obj)
|
mharding01/herbpy
|
src/herbpy/action/grasping.py
|
Python
|
bsd-3-clause
| 9,013
| 0.005658
|
import pygame
import src.graphics as graphics
import src.colours as colours
import src.config as config
import src.scenes.scenebase as scene_base
from src.minigames.hunt.input_handler import InputHandler
from src.gui.clickable import Clickable
from src.resolution_asset_sizer import ResolutionAssetSizer
from src.tiled_map import TiledMap
from src.game_object.deadly_area import DeadlyArea
from src.minigames.hunt.player import Player
from src.minigames.hunt.collectible import Collectible
class Hunt(scene_base.SceneBase):
"""The Hunt minigame...pretty much snake"""
def __init__(self, previous, current_stage=1):
self.current_stage = current_stage
self.file = './assets/game-data/levels/minigames/hunt/minigame-hunt-' + str(self.current_stage) + '.tmx'
self.surface = graphics.get_window_surface()
self.tiled_map = TiledMap(self.file, self.surface)
self.sprites = self.tiled_map.sprites
self.player = self.get_player()
self.collectibles = pygame.sprite.Group([sprite for sprite in self.sprites if isinstance(sprite, Collectible)])
self.collideables = pygame.sprite.Group([sprite for sprite in self.sprites if isinstance(sprite, DeadlyArea)])
scene_base.SceneBase.__init__(
self,
InputHandler(self),
graphics.get_controller()
)
self.previous = previous
self.width, self.height = pygame.display.get_window_size()
def update(self, delta_time):
self.sprites.update(delta_time, self.tiled_map)
self.player.handle_collision(self.collectibles, self.collideables)
if not self.player.alive():
self.reset()
if self.has_completed_minigame():
self.previous.open_secured_door()
self.switch_to_scene(self.previous)
elif self.has_won():
self.next_stage()
def has_won(self):
has_no_enemies = True
for sprite in self.sprites:
if isinstance(sprite, Collectible):
has_no_enemies = False
return has_no_enemies
def has_completed_minigame(self):
return self.has_won() and self.current_stage == 3
def render(self):
self.surface.fill(colours.RED)
self.sprites.draw(self.surface)
def get_player(self):
for sprite in self.sprites:
if isinstance(sprite, Player):
return sprite
def reset(self):
self.__init__(self.previous, self.current_stage)
def next_stage(self):
self.current_stage += 1
self.__init__(self.previous, self.current_stage)
|
joereynolds/Mr-Figs
|
src/minigames/hunt/game.py
|
Python
|
gpl-3.0
| 2,622
| 0.002288
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import request
from indico.modules.auth.controllers import (RHAccounts, RHAdminImpersonate, RHLinkAccount, RHLogin, RHLoginForm,
RHLogout, RHRegister, RHRemoveAccount, RHResetPassword)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('auth', __name__, template_folder='templates', virtual_template_folder='auth')
_bp.add_url_rule('/login/', 'login', RHLogin, methods=('GET', 'POST'))
_bp.add_url_rule('/login/<provider>/', 'login', RHLogin)
_bp.add_url_rule('/login/<provider>/form', 'login_form', RHLoginForm)
_bp.add_url_rule('/login/<provider>/link-account', 'link_account', RHLinkAccount, methods=('GET', 'POST'))
_bp.add_url_rule('/logout/', 'logout', RHLogout)
_bp.add_url_rule('/register/', 'register', RHRegister, methods=('GET', 'POST'), defaults={'provider': None})
_bp.add_url_rule('/register/<provider>', 'register', RHRegister, methods=('GET', 'POST'))
_bp.add_url_rule('/reset-password/', 'resetpass', RHResetPassword, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/users/impersonate', 'admin_impersonate', RHAdminImpersonate, methods=('POST',))
with _bp.add_prefixed_rules('/user/<int:user_id>', '/user'):
_bp.add_url_rule('/accounts/', 'accounts', RHAccounts, methods=('GET', 'POST'))
_bp.add_url_rule('/accounts/<identity>/remove/', 'remove_account', RHRemoveAccount, methods=('POST',))
@_bp.url_defaults
def _add_user_id(endpoint, values):
if endpoint in {'auth.accounts', 'auth.remove_account'} and 'user_id' not in values:
values['user_id'] = request.view_args.get('user_id')
# Legacy URLs
auth_compat_blueprint = _compat_bp = IndicoBlueprint('compat_auth', __name__)
_compat_bp.add_url_rule('/user/login', 'login', make_compat_redirect_func(_bp, 'login'))
_compat_bp.add_url_rule('/user/register', 'register', make_compat_redirect_func(_bp, 'register'))
|
indico/indico
|
indico/modules/auth/blueprint.py
|
Python
|
mit
| 2,168
| 0.005996
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.models import Group, User
def add_moderator_group(apps, schema_editor):
g = Group.objects.create(name="moderators")
g.save()
for user in User.objects.all():
# add any existing admin users
# to the moderators group when we create it
if user.is_superuser:
g.user_set.add(user)
class Migration(migrations.Migration):
dependencies = [("auth", "0008_alter_user_username_max_length")]
operations = [migrations.RunPython(add_moderator_group)]
|
DemocracyClub/EveryElection
|
every_election/apps/core/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 621
| 0
|
import nuke
import pyblish.api
class ExtractSceneSave(pyblish.api.Extractor):
"""
"""
hosts = ['nuke']
order = pyblish.api.Extractor.order - 0.45
families = ['scene']
label = 'Scene Save'
def process(self, instance):
self.log.info('saving scene')
nuke.scriptSave()
|
mkolar/pyblish-kredenc
|
pyblish_kredenc/plugins/nuke/extract_scene_save.py
|
Python
|
lgpl-3.0
| 313
| 0
|
"""Responses."""
from io import StringIO
from csv import DictWriter
from flask import Response, jsonify, make_response
from .__version__ import __version__
class ApiResult:
"""A representation of a generic JSON API result."""
def __init__(self, data, metadata=None, **kwargs):
"""Store input arguments.
Args:
data (dict): A dictionary built up for the API to return
metadata (dict): A dictionary of keys and values to add to the
metadata field of the return object.
"""
self.data = data
self.extra_metadata = metadata
self.kwargs = kwargs
def to_response(self):
"""Make a response from the data."""
metadata = self.metadata(self.extra_metadata)
obj = {
**self.data,
**self.kwargs,
'metadata': metadata
}
return jsonify(obj)
@staticmethod
def metadata(extra_metadata=None):
"""Return metadata."""
from .models import SourceData
obj = {
'version': __version__,
'datasetMetadata': [item.to_json() for item in
SourceData.query.all()]
}
if extra_metadata:
obj.update(extra_metadata)
return obj
class QuerySetApiResult(ApiResult):
"""A representation of a list of records (Python dictionaries)."""
def __init__(self, record_list, return_format, metadata=None, **kwargs):
"""Store the list of records and the format."""
super().__init__(record_list, metadata, **kwargs)
self.record_list = record_list
self.return_format = return_format
def to_response(self):
"""Convert the list of records into a response."""
if self.return_format == 'csv' and self.record_list:
return self.csv_response(self.record_list)
elif self.return_format == 'csv': # and not self.record_list
return make_response('', 204)
# Default is JSON
return self.json_response(self.record_list, self.extra_metadata,
**self.kwargs)
@staticmethod
def csv_response(record_list):
"""CSV Response."""
string_io = StringIO()
header = record_list[0].keys()
writer = DictWriter(f=string_io, fieldnames=header)
writer.writeheader()
writer.writerows((item for item in record_list))
result = string_io.getvalue()
return Response(result, mimetype='text/csv')
@staticmethod
def json_response(record_list, extra_metadata, **kwargs):
"""Convert a list of records into a JSON response."""
obj = {
**kwargs,
'results': record_list,
'resultSize': len(record_list),
'metadata': ApiResult.metadata(extra_metadata)
}
return jsonify(obj)
# TODO: (jef/jkp 2017-08-29) Add methods for:
# * return warnings, errors
# * return version number
# * documentation
# Needs: Decision on how these should be returned.
|
jkpr/pma-api
|
pma_api/response.py
|
Python
|
mit
| 3,061
| 0
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
def related_activities(project):
"""
Check if related project has an IATI identifier and relation.
:param project: Project object
:return: All checks passed boolean, [Check results]
"""
checks = []
all_checks_passed = True
related_projects_count = project.related_projects.count()
for rp in project.related_projects.prefetch_related('related_project').all():
if not (rp.related_project or rp.related_iati_id):
all_checks_passed = False
checks.append(('error', 'related project or IATI identifier not specified'))
elif rp.related_project and not rp.related_project.iati_activity_id:
all_checks_passed = False
checks.append(('error', 'related project (id: %s) has no IATI identifier specified' %
str(rp.related_project.pk)))
if not rp.relation:
all_checks_passed = False
checks.append(('error', 'relation missing for related project'))
if related_projects_count > 0 and all_checks_passed:
checks.append(('success', 'has valid related project(s)'))
return all_checks_passed, checks
|
akvo/akvo-rsr
|
akvo/iati/checks/fields/related_activities.py
|
Python
|
agpl-3.0
| 1,444
| 0.003463
|
from formatting import print_call
import credentials
import os.path
import re
import xmlrpclib
def _get_ch_params():
# Initialise variables when required
from core.config import FullConfParser
fcp = FullConfParser()
username = fcp.get("auth.conf").get("certificates").get("username")
ch_host = fcp.get("auth.conf").get("clearinghouse").get("host")
ch_port = fcp.get("auth.conf").get("clearinghouse").get("port")
ch_end = fcp.get("auth.conf").get("clearinghouse").get("endpoint")
return (username, ch_host, ch_port, ch_end)
def api_call(method_name, endpoint=None, params=[], username=None,
verbose=False):
user, _, _, ch_end = _get_ch_params()
username = username or user
endpoint = endpoint or ch_end
key_path = "%s-key.pem" % username
cert_path = "%s-cert.pem" % username
res = ssl_call(method_name, params, endpoint,
key_path=key_path, cert_path=cert_path)
if verbose:
print_call(method_name, params, res)
return res.get("code", None), \
res.get("value", None), res.get("output", None)
def ch_call(method_name, endpoint=None, params=[], username=None,
verbose=False):
user, ch_host, ch_port, ch_end = _get_ch_params()
username = username or user
endpoint = endpoint or ch_end
key_path = "%s-key.pem" % username
cert_path = "%s-cert.pem" % username
res = ssl_call(method_name, params, endpoint, key_path=key_path,
cert_path=cert_path, host=ch_host, port=ch_port)
return res
def handler_call(method_name, params=[], username=None, arg=[]):
if username is None:
user, _, _, _ = _get_ch_params()
verbose = False
if arg in ["-v", "--verbose"]:
verbose = True
return api_call(method_name, "/xmlrpc/geni/3/", params=params,
username=username, verbose=verbose)
class SafeTransportWithCert(xmlrpclib.SafeTransport):
"""
Helper class to force the right certificate for the transport class.
"""
def __init__(self, key_path, cert_path):
# No super because of old-style class
xmlrpclib.SafeTransport.__init__(self)
self._key_path = key_path
self._cert_path = cert_path
def make_connection(self, host):
"""
This method will automatically be called by the ServerProxy class
when a transport channel is needed.
"""
host_with_cert = (host, {"key_file": self._key_path,
"cert_file": self._cert_path})
# No super because of old-style class
return xmlrpclib.SafeTransport.make_connection(self, host_with_cert)
def ssl_call(method_name, params, endpoint, key_path=None, cert_path=None,
host=None, port=None):
username, ch_host, ch_port, ch_end = _get_ch_params()
key_path = key_path or ("%-key.pem" % username)
cert_path = cert_path or ("%-cert.pem" % username)
host = host or ch_host
port = port or ch_port
endpoint = endpoint or ch_end
# Start logic
creds_path = os.path.normpath(os.path.join(os.path.dirname(__file__),
"../../..", "cert"))
if not os.path.isabs(key_path):
key_path = os.path.join(creds_path, key_path)
if not os.path.isabs(cert_path):
cert_path = os.path.join(creds_path, cert_path)
key_path = os.path.abspath(os.path.expanduser(key_path))
cert_path = os.path.abspath(os.path.expanduser(cert_path))
if not os.path.isfile(key_path) or not os.path.isfile(cert_path):
raise RuntimeError("Key or cert file not found (%s, %s)"
% (key_path, cert_path))
transport = SafeTransportWithCert(key_path, cert_path)
if endpoint and len(endpoint):
if endpoint[0] == "/":
endpoint = endpoint[1:]
proxy = xmlrpclib.ServerProxy("https://%s:%s/%s" % (host, str(port),
endpoint), transport=transport)
# return proxy.get_version()
method = getattr(proxy, method_name)
return method(*params)
def getusercred(geni_api=3):
"""Retrieve your user credential. Useful for debugging.
If you specify the -o option, the credential is saved to a file.
If you specify --usercredfile:
First, it tries to read the user cred from that file.
Second, it saves the user cred to a file by that name
(but with the appropriate extension)
Otherwise, the filename is <username>-<framework nickname from
config file>-usercred.[xml or json, depending on AM API version].
If you specify the --prefix option then that string starts the filename.
If instead of the -o option, you supply the --tostdout option,
then the usercred is printed to STDOUT.
Otherwise the usercred is logged.
The usercred is returned for use by calling scripts.
e.g.:
Get user credential, save to a file:
omni.py -o getusercred
Get user credential, save to a file with filename prefix mystuff:
omni.py -o -p mystuff getusercred
"""
from core.config import FullConfParser
fcp = FullConfParser()
username = fcp.get("auth.conf").get("certificates").get("username")
creds_path = os.path.normpath(
os.path.join(os.path.dirname(__file__), "../../..", "cert"))
cert_path = os.path.join(creds_path, "%s-cert.pem" % username)
# Retrieve new credential by contacting with GCF CH
try:
user_cert = open(cert_path, "r").read()
cred = ch_call("CreateUserCredential", params=[user_cert])
# Exception? -> Retrieve already existing credential from disk (CBAS)
except:
cred_path = os.path.join(creds_path, "%s-cred.xml" % username)
cred = open(cred_path).read()
if geni_api >= 3:
if cred:
cred = credentials.wrap_cred(cred)
credxml = credentials.get_cred_xml(cred)
# pull the username out of the cred
# <owner_urn>urn:publicid:IDN+geni:gpo:gcf+user+alice</owner_urn>
user = ""
usermatch = re.search(
r"\<owner_urn>urn:publicid:IDN\+.+\+user\+(\w+)\<\/owner_urn\>",
credxml)
if usermatch:
user = usermatch.group(1)
return ("Retrieved %s user credential" % user, cred)
|
ict-felix/stack
|
modules/resource/orchestrator/src/core/utils/calls.py
|
Python
|
apache-2.0
| 6,238
| 0.00016
|
from twisted.internet import reactor,protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.transport.write("hello a ")
def dataReceived(self, data):
print('Server said:',data)
self.transport.loseConnection()
def connectionLost(self, reason):
print('connection lost')
class EchoFatoty(protocol.ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print('Connection lost - goodbye!')
reactor.stop()
def main():
f = EchoFatoty()
reactor.connectTCP('localhost',9090,f)
reactor.run()
if __name__ == '__main__':
main()
|
XiaJieCom/change
|
Demo/days10/EchoClient.py
|
Python
|
lgpl-2.1
| 658
| 0.018237
|
# -*- coding: utf-8 -*-
"""The function module of dolfin"""
from dolfin.functions import multimeshfunction
from dolfin.functions import functionspace
from dolfin.functions import function
from dolfin.functions import constant
from dolfin.functions import expression
from dolfin.functions import specialfunctions
from .multimeshfunction import *
from .functionspace import *
from .function import *
from .constant import *
from .expression import *
from .specialfunctions import *
# NOTE: The automatic documentation system in DOLFIN requires to _not_ define
# NOTE: classes or functions within this file. Use separate modules for that
# NOTE: purpose.
__all__ = functionspace.__all__ + function.__all__ + constant.__all__ + \
expression.__all__ + specialfunctions.__all__ + \
multimeshfunction.__all__
|
FEniCS/dolfin
|
site-packages/dolfin/functions/__init__.py
|
Python
|
lgpl-3.0
| 827
| 0.001209
|
from pyramid.view import view_config
import logging
@view_config(route_name='hello_json', renderer='json')
def hello_json(request):
logger = logging.getLogger(__name__)
logger.info("Got JSON from name: {n}".format(n = __name__))
request.session['counter'] = request.session.get('counter', 0) + 1
return {
'a': [1,2,request.session['counter']],
'b': ['x', 'y'],
}
|
jgowans/directionFinder_web
|
directionFinder_web/views/hello_json.py
|
Python
|
gpl-2.0
| 400
| 0.0125
|
#!/usr/bin/env python
"""
File: twitter_analyse.py
Author: Me
Email: 0
Github: 0
Description: Analyse tweets. For the detail, please refer to the document
```twitter_analyse.notes```
"""
# System lib
from __future__ import division
import json
import os
from math import log
import numpy
# 3-rd party lib
# import nltk
from nltk.classify import NaiveBayesClassifier
from textblob import TextBlob
# Constants
TWEET_DIR = os.path.join('.', 'twitter_data')
OSCAR_DIR = os.path.join(TWEET_DIR, 'oscar')
RAZZIES_DIR = os.path.join(TWEET_DIR, 'razzies')
PREDICT_DIR = os.path.join(TWEET_DIR, 'proof')
CANDIDATE_DIR = os.path.join(TWEET_DIR, 'candidates')
# PREDICT_OSCAR_DIR = os.path.join(PREDICT_DIR, 'oscar')
# PREDICT_RAZZIES_DIR = os.path.join(PREDICT_DIR, 'razzies')
def attribute_to_characteristic(tweet):
"""
Extract attributes from a tweet and form a characteristic of a tweet
@param tweet dict
@return dict
Charateristic of a tweet
"""
ret = {}
text = tweet['text']
retweets = tweet['retweet_count']
favorites = tweet['favorite_count']
followers = tweet['author_followers']
friends = tweet['author_friends']
publishes = tweet['author_num_of_status']
blob = TextBlob(text)
polarity = blob.sentiment.polarity
ret['scaled_polarity'] = calculate_scaled_polarity(
polarity,
int(retweets),
int(favorites),
int(followers),
int(friends),
int(publishes)
)
ret['retweets'] = retweets
ret['favorites'] = favorites
ret['followers'] = followers
ret['friends'] = friends
ret['publishes'] = publishes
ret['polarity'] = polarity
# print 'p=%.2f re=%d fav=%d, fol=%d, fd=%d, pub=%d' % (
# polarity, retweets, favorites, followers, friends, publishes
# )
return ret
def calculate_scaled_polarity(
polarity, retweets, favorites, followers, friends, publishes):
"""
Return a scaled polarity for a tweet
@param polarity float
@param retweets int
@param favorites int
@param followers int
@param friends int
@param publishes int
@return float
"""
# Avoid zero case and negative value
retweets = retweets if retweets > 0 else 1
favorites = favorites if favorites > 0 else 1
followers = followers if followers > 0 else 1
friends = friends if friends > 0 else 1
publishes = publishes if publishes > 0 else 1
# Entropy
ret = polarity * \
(
log(retweets, 2) +
log(favorites, 2) +
log(followers, 2) +
log(friends, 2) +
log(publishes, 2)
)
return round(ret, 2)
def tweets2film(tweet_characteristics):
"""
Aggreate tweet's characteristics to form a film's characteristics
@param tweet_characteristics list of dict
@return dict
characteristics of a film
"""
ret = {}
retweets_data = []
favorites_data = []
polarities_data = []
friends_data = []
followers_data = []
for t in tweet_characteristics:
retweets_data.append(t['retweets'])
favorites_data.append(t['favorites'])
polarities_data.append(t['polarity'])
friends_data.append(t['friends'])
followers_data.append(t['followers'])
retweets = numpy.array(retweets_data)
favorites = numpy.array(favorites_data)
polarities = numpy.array(polarities_data)
friends = numpy.array(friends_data)
followers = numpy.array(followers_data)
for data_set in [
('retweets', retweets),
('favorites', favorites),
('polarities', polarities),
('friends', friends),
('followers', followers)
]:
data_name = data_set[0]
data_list = data_set[1]
print '|%s| sd: %f mean: %f min: %d max: %d' % (
data_name,
round(data_list.std(), 2),
round(numpy.average(data_list), 2),
data_list.min(),
data_list.max(),
)
# ret['avg_followers'] = round(numpy.average(followers_data), 2)
# ret['avg_friends'] = round(numpy.average(friends_data), 2)
ret['avg_polarity'] = round(numpy.average(polarities_data), 2)
# ret['avg_retweet'] = round(numpy.average(retweets_data), 2)
# ret['std_friends'] = round(friends.std(), 2)
# ret['std_followers'] = round(followers.std(), 2)
# ret['std_polarity'] = round(polarities.std(), 2)
ret['std_retweet'] = round(retweets.std(), 2)
# ret['log_friends'] = round(log(sum(friends_data)) / log(2), 2)
# ret['log_followers'] = round(log(sum(followers_data)) / log(2), 2)
ret['log_retweets'] = round(log(sum(retweets_data)) / log(2), 2)
ret['log_favorites'] = round(log(sum(favorites_data)) / log(2), 2)
return ret
def construct_film_characteristic(film_name, tweet_characteristics):
"""
Construct featuresets for given parameters
@param film_name string
@param tweet_characteristics list of dict
@return featuresets
"""
ret = {}
# Analyze film's attributes
ret['length_of_film'] = len(film_name)
ret['number_of_words'] = len(film_name.split(' '))
# Analyze tweet's characteristics
aggreated_characteristic = tweets2film(tweet_characteristics)
# Merge 2 characteristics
ret = dict(ret.items() + aggreated_characteristic.items())
return ret
def predictCandidates():
list_of_files = os.listdir(CANDIDATE_DIR)
for fn in list_of_files:
path = os.path.join(CANDIDATE_DIR, fn)
film_name = os.path.splitext(fn)[0]
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
result = classifier.classify(film_characteristic)
print 'film: |%s| PREDICT: |%s|\n' % (film_name, result)
features = []
for my_dir in [OSCAR_DIR, RAZZIES_DIR]:
label = os.path.basename(my_dir)
print "=========== Training {0} ============".format(label)
for fn in os.listdir(my_dir):
path = os.path.join(my_dir, fn)
film_name = os.path.splitext(fn)[0]
# print 'dir=%s, film_name=%s, path=%s' % (my_dir, film_name, path)
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
try:
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
except Exception as e:
print '{0}: {1}'.format(film_name, e)
else:
# print 'film: |%s|' % film_name
# print film_characteristic
feature = (film_characteristic, label)
features.append(feature)
# Train the classifier
classifier = NaiveBayesClassifier.train(features)
classifier.show_most_informative_features(10)
# Predict the film
report = {}
predict_labels = ['oscar', 'razzies']
for predict_label in predict_labels:
my_dir = os.path.join(PREDICT_DIR, predict_label)
list_of_files = os.listdir(my_dir)
report[predict_label] = {
'number_of_match': 0,
'number_of_films': len(list_of_files)
}
for fn in list_of_files:
path = os.path.join(my_dir, fn)
film_name = os.path.splitext(fn)[0]
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
result = classifier.classify(film_characteristic)
if result == predict_label:
report[predict_label]['number_of_match'] += 1
print film_characteristic
print 'film: |%s| PREDICT: |%s|\n' % (film_name, result)
report['features'] = film_characteristic.keys()
# classifier.show_most_informative_features()
print "# Features in film's characteristic\n"
for f in report['features']:
print '* %s' % f
print '\n# Prediction\n'
for predict_label in predict_labels:
r = report[predict_label]
print '## %s\n' % predict_label
print 'match %d out of %d, accuracy=%d%%\n' % (
r['number_of_match'],
r['number_of_films'],
round(r['number_of_match'] / r['number_of_films'] * 100)
)
print '## overall\n'
print 'match %d out of %d, accuracy=%d%%\n' % (
sum(
[report[p]['number_of_match'] for p in predict_labels]
),
sum(
[report[p]['number_of_films'] for p in predict_labels]
),
round(
sum(
[report[p]['number_of_match'] for p in predict_labels]
) /
sum(
[report[p]['number_of_films'] for p in predict_labels]
) * 100
)
)
predictCandidates()
|
mondwan/ProjectRazzies
|
twitter_analyse.py
|
Python
|
mit
| 9,448
| 0.004128
|
# coding=utf-8
"""
Impact Layer Merge Dialog.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Christian Christelis christian@kartoza.com'
__revision__ = '$Format:%H$'
__date__ = '27/10/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
from os.path import expanduser, basename
# This import must come first to force sip2 api
# noinspection PyUnresolvedReferences
# pylint: disable=unused-import
from qgis.core import QGis # force sip2 api
# noinspection PyPackageRequirements
from PyQt4 import QtGui
# noinspection PyPackageRequirements
from PyQt4.QtGui import (
QDialog,
QFileDialog,
QGridLayout,
QPushButton,
QDialogButtonBox,
QMessageBox,
QIcon
)
from PyQt4.QtCore import pyqtSignature, pyqtSlot
from safe.common.resource_parameter import ResourceParameter
from safe_extras.parameters.float_parameter import FloatParameter
from safe_extras.parameters.qt_widgets.parameter_container import (
ParameterContainer)
from safe_extras.parameters.parameter_exceptions import (
ValueOutOfBounds,
InvalidMaximumError,
InvalidMinimumError)
from safe_extras.parameters.string_parameter import StringParameter
from safe_extras.parameters.text_parameter import TextParameter
from safe.utilities.resources import (
resources_path, get_ui_class, html_footer, html_header)
from safe.messaging import styles
from safe.gui.tools.minimum_needs.needs_profile import NeedsProfile
from safe.utilities.i18n import tr
from safe.gui.tools.help.needs_manager_help import needs_manager_helps
INFO_STYLE = styles.INFO_STYLE
FORM_CLASS = get_ui_class('needs_manager_dialog_base.ui')
class NeedsManagerDialog(QDialog, FORM_CLASS):
"""Dialog class for the InaSAFE global minimum needs configuration.
.. versionadded:: 2.2.
"""
def __init__(self, parent=None, dock=None):
"""Constructor for the minimum needs dialog.
:param parent: Parent widget of this dialog.
:type parent: QWidget
:param dock: Dock widget instance that we can notify of changes.
:type dock: Dock
"""
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.dock = dock
# These are in the little button bar at the top
# 'Remove resource' button
# noinspection PyUnresolvedReferences
self.remove_resource_button.clicked.connect(self.remove_resource)
self.remove_resource_button.setIcon(
QIcon(os.path.join(
resources_path(), 'img', 'icons', 'remove.svg')))
# Add resource
# noinspection PyUnresolvedReferences
self.add_resource_button.clicked.connect(self.add_new_resource)
self.add_resource_button.setIcon(
QIcon(os.path.join(
resources_path(), 'img', 'icons', 'add.svg')))
# Edit resource
# noinspection PyUnresolvedReferences
self.edit_resource_button.clicked.connect(self.edit_resource)
self.edit_resource_button.setIcon(
QIcon(os.path.join(
resources_path(), 'img', 'icons', 'edit.svg')))
# Discard changes to a resource
self.discard_changes_button = QPushButton(self.tr('Discard changes'))
self.button_box.addButton(
self.discard_changes_button, QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
self.discard_changes_button.clicked.connect(self.discard_changes)
# Save changes to a resource
self.save_resource_button = QPushButton(self.tr('Save resource'))
self.button_box.addButton(
self.save_resource_button, QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
self.save_resource_button.clicked.connect(self.save_resource)
# Export profile button
self.export_profile_button = QPushButton(self.tr('Export ...'))
self.button_box.addButton(
self.export_profile_button, QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
self.export_profile_button.clicked.connect(self.export_profile)
# Import profile button
self.import_profile_button = QPushButton(self.tr('Import ...'))
self.button_box.addButton(
self.import_profile_button, QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
self.import_profile_button.clicked.connect(self.import_profile)
# New profile button
self.new_profile_button = QPushButton(self.tr('New'))
self.button_box.addButton(
self.new_profile_button, QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
self.new_profile_button.clicked.connect(self.new_profile)
# Save profile button
self.save_profile_button = QPushButton(self.tr('Save'))
self.button_box.addButton(
self.save_profile_button, QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
self.save_profile_button.clicked.connect(self.save_profile)
# 'Save as' profile button
self.save_profile_as_button = QPushButton(self.tr('Save as'))
self.button_box.addButton(
self.save_profile_as_button, QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
self.save_profile_as_button.clicked.connect(
self.save_profile_as)
# Set up things for context help
self.help_button = self.button_box.button(QtGui.QDialogButtonBox.Help)
# Allow toggling the help button
self.help_button.setCheckable(True)
self.help_button.toggled.connect(self.help_toggled)
self.main_stacked_widget.setCurrentIndex(1)
self.minimum_needs = NeedsProfile()
self.edit_item = None
# Remove profile button
# noinspection PyUnresolvedReferences
self.remove_profile_button.clicked.connect(self.remove_profile)
# These are all buttons that will get hidden on context change
# to the profile editing view
self.profile_editing_buttons = list()
self.profile_editing_buttons.append(self.remove_resource_button)
self.profile_editing_buttons.append(self.add_resource_button)
self.profile_editing_buttons.append(self.edit_resource_button)
self.profile_editing_buttons.append(self.export_profile_button)
self.profile_editing_buttons.append(self.import_profile_button)
self.profile_editing_buttons.append(self.new_profile_button)
self.profile_editing_buttons.append(self.save_profile_button)
self.profile_editing_buttons.append(self.save_profile_as_button)
# We also keep a list of all widgets to disable in context of resource
# editing (not hidden, just disabled)
self.profile_editing_widgets = self.profile_editing_buttons
self.profile_editing_widgets.append(self.remove_profile_button)
self.profile_editing_widgets.append(self.profile_combo)
# These are all buttons that will get hidden on context change
# to the resource editing view
self.resource_editing_buttons = list()
self.resource_editing_buttons.append(self.discard_changes_button)
self.resource_editing_buttons.append(self.save_resource_button)
for item in self.resource_editing_buttons:
item.hide()
self.load_profiles()
# Next 2 lines fixes issues #1388 #1389 #1390 #1391
if self.profile_combo.count() > 0:
self.select_profile(0)
# initial sync profile_combo and resource list
self.clear_resource_list()
self.populate_resource_list()
self.set_up_resource_parameters()
# Only do this afterward load_profiles to avoid the resource list
# being updated
# noinspection PyUnresolvedReferences
self.profile_combo.activated.connect(self.select_profile)
# noinspection PyUnresolvedReferences
self.stacked_widget.currentChanged.connect(self.page_changed)
self.select_profile(self.profile_combo.currentIndex())
def reject(self):
"""Overload the base dialog reject event so we can handle state change.
If the user is in resource editing mode, clicking close button,
window [x] or pressing escape should switch context back to the
profile view, not close the whole window.
See https://github.com/AIFDR/inasafe/issues/1387
"""
if self.stacked_widget.currentWidget() == self.resource_edit_page:
self.edit_item = None
self.switch_context(self.profile_edit_page)
else:
super(NeedsManagerDialog, self).reject()
def populate_resource_list(self):
"""Populate the list resource list.
"""
minimum_needs = self.minimum_needs.get_full_needs()
for full_resource in minimum_needs["resources"]:
self.add_resource(full_resource)
self.provenance.setText(minimum_needs["provenance"])
def clear_resource_list(self):
"""Clear the resource list.
"""
self.resources_list.clear()
def add_resource(self, resource):
"""Add a resource to the minimum needs table.
:param resource: The resource to be added
:type resource: dict
"""
updated_sentence = NeedsProfile.format_sentence(
resource['Readable sentence'], resource)
if self.edit_item:
item = self.edit_item
item.setText(updated_sentence)
self.edit_item = None
else:
item = QtGui.QListWidgetItem(updated_sentence)
item.resource_full = resource
self.resources_list.addItem(item)
def load_profiles(self):
"""Load the profiles into the dropdown list.
"""
for profile in self.minimum_needs.get_profiles():
self.profile_combo.addItem(profile)
minimum_needs = self.minimum_needs.get_full_needs()
self.profile_combo.setCurrentIndex(
self.profile_combo.findText(minimum_needs['profile']))
def select_profile(self, index):
"""Select a given profile by index.
Slot for when profile is selected.
:param index: The selected item's index
:type index: int
"""
new_profile = self.profile_combo.itemText(index)
self.resources_list.clear()
self.minimum_needs.load_profile(new_profile)
self.clear_resource_list()
self.populate_resource_list()
self.minimum_needs.save()
def select_profile_by_name(self, profile_name):
"""Select a given profile by profile name
:param profile_name: The profile name
:type profile_name: str
"""
self.select_profile(self.profile_combo.findText(profile_name))
def mark_current_profile_as_pending(self):
"""Mark the current profile as pending by colouring the text red.
"""
index = self.profile_combo.currentIndex()
item = self.profile_combo.model().item(index)
item.setForeground(QtGui.QColor('red'))
def mark_current_profile_as_saved(self):
"""Mark the current profile as saved by colouring the text black.
"""
index = self.profile_combo.currentIndex()
item = self.profile_combo.model().item(index)
item.setForeground(QtGui.QColor('black'))
def add_new_resource(self):
"""Handle add new resource requests.
"""
parameters_widget = [
self.parameters_scrollarea.layout().itemAt(i) for i in
range(self.parameters_scrollarea.layout().count())][0].widget()
parameter_widgets = [
parameters_widget.vertical_layout.itemAt(i).widget() for i in
range(parameters_widget.vertical_layout.count())]
parameter_widgets[0].set_text('')
parameter_widgets[1].set_text('')
parameter_widgets[2].set_text('')
parameter_widgets[3].set_text('')
parameter_widgets[4].set_text('')
parameter_widgets[5].set_value(10)
parameter_widgets[6].set_value(0)
parameter_widgets[7].set_value(100)
parameter_widgets[8].set_text(tr('weekly'))
parameter_widgets[9].set_text(tr(
"A displaced person should be provided with "
"{{ Default }} {{ Unit }}/{{ Units }}/{{ Unit abbreviation }} of "
"{{ Resource name }}. Though no less than {{ Minimum allowed }} "
"and no more than {{ Maximum allowed }}. This should be provided "
"{{ Frequency }}."))
self.stacked_widget.setCurrentWidget(self.resource_edit_page)
# hide the close button
self.button_box.button(QDialogButtonBox.Close).setHidden(True)
def edit_resource(self):
"""Handle edit resource requests.
"""
self.mark_current_profile_as_pending()
resource = None
for item in self.resources_list.selectedItems()[:1]:
resource = item.resource_full
self.edit_item = item
if not resource:
return
parameters_widget = [
self.parameters_scrollarea.layout().itemAt(i) for i in
range(self.parameters_scrollarea.layout().count())][0].widget()
parameter_widgets = [
parameters_widget.vertical_layout.itemAt(i).widget() for i in
range(parameters_widget.vertical_layout.count())]
parameter_widgets[0].set_text(resource['Resource name'])
parameter_widgets[1].set_text(resource['Resource description'])
parameter_widgets[2].set_text(resource['Unit'])
parameter_widgets[3].set_text(resource['Units'])
parameter_widgets[4].set_text(resource['Unit abbreviation'])
parameter_widgets[5].set_value(float(resource['Default']))
parameter_widgets[6].set_value(float(resource['Minimum allowed']))
parameter_widgets[7].set_value(float(resource['Maximum allowed']))
parameter_widgets[8].set_text(resource['Frequency'])
parameter_widgets[9].set_text(resource['Readable sentence'])
self.switch_context(self.resource_edit_page)
def set_up_resource_parameters(self):
"""Set up the resource parameter for the add/edit view.
"""
name_parameter = StringParameter('UUID-1')
name_parameter.name = tr('Resource name')
name_parameter.help_text = tr(
'Name of the resource that will be provided '
'as part of minimum needs. '
'e.g. Rice, Water etc.')
name_parameter.description = tr(
'A <b>resource</b> is something that you provide to displaced '
'persons in the event of a disaster. The resource will be made '
'available at IDP camps and may need to be stockpiled by '
'contingency planners in their preparations for a disaster.')
name_parameter.is_required = True
name_parameter.value = ''
description_parameter = StringParameter('UUID-2')
description_parameter.name = tr('Resource description')
description_parameter.help_text = tr(
'Description of the resource that will be provided as part of '
'minimum needs.')
description_parameter.description = tr(
'This gives a detailed description of what the resource is and ')
description_parameter.is_required = True
description_parameter.value = ''
unit_parameter = StringParameter('UUID-3')
unit_parameter.name = tr('Unit')
unit_parameter.help_text = tr(
'Single unit for the resources spelled out. e.g. litre, '
'kilogram etc.')
unit_parameter.description = tr(
'A <b>unit</b> is the basic measurement unit used for computing '
'the allowance per individual. For example when planning water '
'rations the unit would be single litre.')
unit_parameter.is_required = True
unit_parameter.value = ''
units_parameter = StringParameter('UUID-4')
units_parameter.name = tr('Units')
units_parameter.help_text = tr(
'Multiple units for the resources spelled out. e.g. litres, '
'kilogram etc.')
units_parameter.description = tr(
'<b>Units</b> are the basic measurement used for computing the '
'allowance per individual. For example when planning water '
'rations the units would be litres.')
units_parameter.is_required = True
units_parameter.value = ''
unit_abbreviation_parameter = StringParameter('UUID-5')
unit_abbreviation_parameter.name = tr('Unit abbreviation')
unit_abbreviation_parameter.help_text = tr(
'Abbreviations of unit for the resources. e.g. l, kg etc.')
unit_abbreviation_parameter.description = tr(
"A <b>unit abbreviation</b> is the basic measurement unit's "
"shortened. For example when planning water rations "
"the units would be l.")
unit_abbreviation_parameter.is_required = True
unit_abbreviation_parameter.value = ''
minimum_parameter = FloatParameter('UUID-6')
minimum_parameter.name = tr('Minimum allowed')
minimum_parameter.is_required = True
minimum_parameter.precision = 2
minimum_parameter.minimum_allowed_value = -99999.0
minimum_parameter.maximum_allowed_value = 99999.0
minimum_parameter.help_text = tr(
'The minimum allowable quantity per person. ')
minimum_parameter.description = tr(
'The <b>minimum</b> is the minimum allowed quantity of the '
'resource per person. For example you may dictate that the water '
'ration per person per day should never be allowed to be less '
'than 0.5l. This is enforced when tweaking a minimum needs set '
'before an impact evaluation')
minimum_parameter.value = 0.00
maximum_parameter = FloatParameter('UUID-7')
maximum_parameter.name = tr('Maximum allowed')
maximum_parameter.is_required = True
maximum_parameter.precision = 2
maximum_parameter.minimum_allowed_value = -99999.0
maximum_parameter.maximum_allowed_value = 99999.0
maximum_parameter.help_text = tr(
'The maximum allowable quantity per person. ')
maximum_parameter.description = tr(
'The <b>maximum</b> is the maximum allowed quantity of the '
'resource per person. For example you may dictate that the water '
'ration per person per day should never be allowed to be more '
'than 67l. This is enforced when tweaking a maximum needs set '
'before an impact evaluation.')
maximum_parameter.value = 100.0
default_parameter = FloatParameter('UUID-8')
default_parameter.name = tr('Default')
default_parameter.is_required = True
default_parameter.precision = 2
default_parameter.minimum_allowed_value = -99999.0
default_parameter.maximum_allowed_value = 99999.0
default_parameter.help_text = tr(
'The default allowable quantity per person. ')
default_parameter.description = tr(
"The <b>default</b> is the default allowed quantity of the "
"resource per person. For example you may indicate that the water "
"ration per person weekly should be 67l.")
default_parameter.value = 10.0
frequency_parameter = StringParameter('UUID-9')
frequency_parameter.name = tr('Frequency')
frequency_parameter.help_text = tr(
"The frequency that this resource needs to be provided to a "
"displaced person. e.g. weekly, daily, once etc.")
frequency_parameter.description = tr(
"The <b>frequency</b> informs the aid worker how regularly this "
"resource needs to be provided to the displaced person.")
frequency_parameter.is_required = True
frequency_parameter.value = tr('weekly')
sentence_parameter = TextParameter('UUID-10')
sentence_parameter.name = tr('Readable sentence')
sentence_parameter.help_text = tr(
'A readable presentation of the resource.')
sentence_parameter.description = tr(
"A <b>readable sentence</b> is a presentation of the resource "
"that displays all pertinent information. If you are unsure then "
"use the default. Properties should be included using double "
"curly brackets '{{' '}}'. Including the resource name would be "
"achieved by including e.g. {{ Resource name }}")
sentence_parameter.is_required = True
sentence_parameter.value = tr(
"A displaced person should be provided with "
"{{ Default }} {{ Unit }}/{{ Units }}/{{ Unit abbreviation }} of "
"{{ Resource name }}. Though no less than {{ Minimum allowed }} "
"and no more than {{ Maximum allowed }}. This should be provided "
"{{ Frequency }}.")
parameters = [
name_parameter,
description_parameter,
unit_parameter,
units_parameter,
unit_abbreviation_parameter,
default_parameter,
minimum_parameter,
maximum_parameter,
frequency_parameter,
sentence_parameter
]
parameter_container = ParameterContainer(parameters)
parameter_container.setup_ui()
layout = QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(parameter_container)
self.parameters_scrollarea.setLayout(layout)
def remove_resource(self):
"""Remove the currently selected resource.
"""
self.mark_current_profile_as_pending()
for item in self.resources_list.selectedItems():
self.resources_list.takeItem(self.resources_list.row(item))
def discard_changes(self):
"""Discard the changes to the resource add/edit.
"""
self.edit_item = None
self.switch_context(self.profile_edit_page)
def save_resource(self):
"""Accept the add/edit of the current resource.
"""
# --
# Hackorama to get this working outside the method that the
# parameters where defined in.
parameters_widget = [
self.parameters_scrollarea.layout().itemAt(i) for i in
range(self.parameters_scrollarea.layout().count())][0]
parameters = parameters_widget.widget().get_parameters()
resource = {}
for parameter in parameters:
resource[parameter.name] = parameter.value
# verify the parameters are ok - create a throw-away resource param
try:
parameter = ResourceParameter()
parameter.name = resource['Resource name']
parameter.help_text = resource['Resource description']
# Adding in the frequency property. This is not in the
# FloatParameter by default, so maybe we should subclass.
parameter.frequency = resource['Frequency']
parameter.description = NeedsProfile.format_sentence(
resource['Readable sentence'],
resource)
parameter.minimum_allowed_value = float(
resource['Minimum allowed'])
parameter.maximum_allowed_value = float(
resource['Maximum allowed'])
parameter.unit.name = resource['Unit']
parameter.unit.plural = resource['Units']
parameter.unit.abbreviation = resource['Unit abbreviation']
parameter.value = float(resource['Default'])
except ValueOutOfBounds, e:
warning = self.tr(
'Problem - default value is invalid') + '\n' + e.message
# noinspection PyTypeChecker,PyArgumentList
QMessageBox.warning(None, 'InaSAFE', warning)
return
except InvalidMaximumError, e:
warning = self.tr(
'Problem - maximum value is invalid') + '\n' + e.message
# noinspection PyTypeChecker,PyArgumentList
QMessageBox.warning(None, 'InaSAFE', warning)
return
except InvalidMinimumError, e:
warning = self.tr(
'Problem - minimum value is invalid') + '\n' + e.message
# noinspection PyTypeChecker,PyArgumentList
QMessageBox.warning(None, 'InaSAFE', warning)
return
# end of test for parameter validity
self.add_resource(resource)
self.switch_context(self.profile_edit_page)
def import_profile(self):
""" Import minimum needs from an existing json file.
The minimum needs are loaded from a file into the table. This state
is only saved if the form is accepted.
"""
# noinspection PyCallByClass,PyTypeChecker
file_name_dialog = QFileDialog(self)
file_name_dialog.setAcceptMode(QtGui.QFileDialog.AcceptOpen)
file_name_dialog.setNameFilter(self.tr('JSON files (*.json *.JSON)'))
file_name_dialog.setDefaultSuffix('json')
current_directory = os.path.dirname(__file__)
path_name = os.path.join(
current_directory, '..', '..', 'resources', 'minimum_needs')
file_name_dialog.setDirectory(path_name)
if file_name_dialog.exec_():
file_name = file_name_dialog.selectedFiles()[0]
else:
return -1
if self.minimum_needs.read_from_file(file_name) == -1:
return -1
self.clear_resource_list()
self.populate_resource_list()
self.switch_context(self.profile_edit_page)
def export_profile(self):
""" Export minimum needs to a json file.
This method will save the current state of the minimum needs setup.
Then open a dialog allowing the user to browse to the desired
destination location and allow the user to save the needs as a json
file.
"""
file_name_dialog = QFileDialog(self)
file_name_dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
file_name_dialog.setNameFilter(self.tr('JSON files (*.json *.JSON)'))
file_name_dialog.setDefaultSuffix('json')
file_name = None
if file_name_dialog.exec_():
file_name = file_name_dialog.selectedFiles()[0]
if file_name != '' and file_name is not None:
self.minimum_needs.write_to_file(file_name)
def save_profile(self):
""" Save the current state of the minimum needs widget.
The minimum needs widget current state is saved to the QSettings via
the appropriate QMinimumNeeds class' method.
"""
minimum_needs = {'resources': []}
for index in xrange(self.resources_list.count()):
item = self.resources_list.item(index)
minimum_needs['resources'].append(item.resource_full)
minimum_needs['provenance'] = self.provenance.text()
minimum_needs['profile'] = self.profile_combo.itemText(
self.profile_combo.currentIndex()
)
self.minimum_needs.update_minimum_needs(minimum_needs)
self.minimum_needs.save()
self.minimum_needs.save_profile(minimum_needs['profile'])
self.mark_current_profile_as_saved()
# Emit combobox function in dock
current_index = self.dock.cboFunction.currentIndex()
self.dock.cboFunction.currentIndexChanged.emit(current_index)
def save_profile_as(self):
"""Save the minimum needs under a new profile name.
"""
# noinspection PyCallByClass,PyTypeChecker
file_name_dialog = QFileDialog(self)
file_name_dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
file_name_dialog.setNameFilter(self.tr('JSON files (*.json *.JSON)'))
file_name_dialog.setDefaultSuffix('json')
file_name_dialog.setDirectory(expanduser('~/.qgis2/minimum_needs'))
if file_name_dialog.exec_():
file_name = file_name_dialog.selectedFiles()[0]
else:
return
file_name = basename(file_name)
file_name = file_name.replace('.json', '')
minimum_needs = {'resources': []}
self.mark_current_profile_as_saved()
for index in xrange(self.resources_list.count()):
item = self.resources_list.item(index)
minimum_needs['resources'].append(item.resource_full)
minimum_needs['provenance'] = self.provenance.text()
minimum_needs['profile'] = file_name
self.minimum_needs.update_minimum_needs(minimum_needs)
self.minimum_needs.save()
self.minimum_needs.save_profile(file_name)
if self.profile_combo.findText(file_name) == -1:
self.profile_combo.addItem(file_name)
self.profile_combo.setCurrentIndex(
self.profile_combo.findText(file_name))
def new_profile(self):
"""Create a new profile by name.
"""
# noinspection PyCallByClass,PyTypeChecker
file_name = QFileDialog.getSaveFileName(
self,
self.tr('Create a minimum needs profile'),
expanduser('~/.qgis2/minimum_needs'),
self.tr('JSON files (*.json *.JSON)'),
options=QtGui.QFileDialog.DontUseNativeDialog)
if not file_name:
return
file_name = basename(file_name)
if self.profile_combo.findText(file_name) == -1:
minimum_needs = {
'resources': [], 'provenance': '', 'profile': file_name}
self.minimum_needs.update_minimum_needs(minimum_needs)
self.minimum_needs.save_profile(file_name)
self.profile_combo.addItem(file_name)
self.clear_resource_list()
self.profile_combo.setCurrentIndex(
self.profile_combo.findText(file_name))
else:
self.profile_combo.setCurrentIndex(
self.profile_combo.findText(file_name))
self.select_profile_by_name(file_name)
def page_changed(self, index):
"""Slot for when tab changes in the stacked widget changes.
:param index: Index of the now active tab.
:type index: int
"""
if index == 0: # profile edit page
for item in self.resource_editing_buttons:
item.hide()
for item in self.profile_editing_widgets:
item.setEnabled(True)
for item in self.profile_editing_buttons:
item.show()
else: # resource_edit_page
for item in self.resource_editing_buttons:
item.show()
for item in self.profile_editing_widgets:
item.setEnabled(False)
for item in self.profile_editing_buttons:
item.hide()
def switch_context(self, page):
"""Switch context tabs by tab widget name.
:param page: The page should be focussed.
:type page: QWidget
"""
# noinspection PyUnresolvedReferences
if page.objectName() == 'profile_edit_page':
self.stacked_widget.setCurrentIndex(0)
self.button_box.button(QDialogButtonBox.Close).setHidden(False)
else: # resource_edit_page
self.stacked_widget.setCurrentIndex(1)
# hide the close button
self.button_box.button(QDialogButtonBox.Close).setHidden(True)
def remove_profile(self):
"""Remove the current profile.
Make sure the user is sure.
"""
profile_name = self.profile_combo.currentText()
# noinspection PyTypeChecker
button_selected = QMessageBox.warning(
None,
'Remove Profile',
self.tr('Remove %s.') % profile_name,
QMessageBox.Ok,
QMessageBox.Cancel
)
if button_selected == QMessageBox.Ok:
self.profile_combo.removeItem(
self.profile_combo.currentIndex()
)
self.minimum_needs.remove_profile(profile_name)
self.select_profile(self.profile_combo.currentIndex())
@pyqtSlot()
@pyqtSignature('bool') # prevents actions being handled twice
def help_toggled(self, flag):
"""Show or hide the help tab in the stacked widget.
.. versionadded: 3.2.1
:param flag: Flag indicating whether help should be shown or hidden.
:type flag: bool
"""
if flag:
self.help_button.setText(self.tr('Hide Help'))
self.show_help()
else:
self.help_button.setText(self.tr('Show Help'))
self.hide_help()
def hide_help(self):
"""Hide the usage info from the user.
.. versionadded: 3.2.1
"""
self.main_stacked_widget.setCurrentIndex(1)
def show_help(self):
"""Show usage info to the user."""
# Read the header and footer html snippets
self.main_stacked_widget.setCurrentIndex(0)
header = html_header()
footer = html_footer()
string = header
message = needs_manager_helps()
string += message.to_html()
string += footer
self.help_web_view.setHtml(string)
|
dynaryu/inasafe
|
safe/gui/tools/minimum_needs/needs_manager_dialog.py
|
Python
|
gpl-3.0
| 33,744
| 0
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from typing import AsyncIterable
import grpc
from grpc.aio._metadata import Metadata
from grpc.aio._typing import MetadataKey
from grpc.aio._typing import MetadataValue
from grpc.aio._typing import MetadatumType
from grpc.experimental import aio
from tests.unit.framework.common import test_constants
ADHOC_METHOD = '/test/AdHoc'
def seen_metadata(expected: Metadata, actual: Metadata):
return not bool(set(tuple(expected)) - set(tuple(actual)))
def seen_metadatum(expected_key: MetadataKey, expected_value: MetadataValue,
actual: Metadata) -> bool:
obtained = actual[expected_key]
return obtained == expected_value
async def block_until_certain_state(channel: aio.Channel,
expected_state: grpc.ChannelConnectivity):
state = channel.get_state()
while state != expected_state:
await channel.wait_for_state_change(state)
state = channel.get_state()
def inject_callbacks(call: aio.Call):
first_callback_ran = asyncio.Event()
def first_callback(call):
# Validate that all resopnses have been received
# and the call is an end state.
assert call.done()
first_callback_ran.set()
second_callback_ran = asyncio.Event()
def second_callback(call):
# Validate that all responses have been received
# and the call is an end state.
assert call.done()
second_callback_ran.set()
call.add_done_callback(first_callback)
call.add_done_callback(second_callback)
async def validation():
await asyncio.wait_for(
asyncio.gather(first_callback_ran.wait(),
second_callback_ran.wait()),
test_constants.SHORT_TIMEOUT)
return validation()
class CountingRequestIterator:
def __init__(self, request_iterator):
self.request_cnt = 0
self._request_iterator = request_iterator
async def _forward_requests(self):
async for request in self._request_iterator:
self.request_cnt += 1
yield request
def __aiter__(self):
return self._forward_requests()
class CountingResponseIterator:
def __init__(self, response_iterator):
self.response_cnt = 0
self._response_iterator = response_iterator
async def _forward_responses(self):
async for response in self._response_iterator:
self.response_cnt += 1
yield response
def __aiter__(self):
return self._forward_responses()
class AdhocGenericHandler(grpc.GenericRpcHandler):
"""A generic handler to plugin testing server methods on the fly."""
_handler: grpc.RpcMethodHandler
def __init__(self):
self._handler = None
def set_adhoc_handler(self, handler: grpc.RpcMethodHandler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == ADHOC_METHOD:
return self._handler
else:
return None
|
stanley-cheung/grpc
|
src/python/grpcio_tests/tests_aio/unit/_common.py
|
Python
|
apache-2.0
| 3,617
| 0
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import contextlib
import functools
import os
import time
from eventlet import timeout as etimeout
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.api.metadata import base as instance_metadata
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova import objects
from nova.objects import fields
from nova import utils
from nova.virt import configdrive
from nova.virt import hardware
from nova.virt.hyperv import block_device_manager
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import serialconsoleops
from nova.virt.hyperv import vif as vif_utils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SHUTDOWN_TIME_INCREMENT = 5
REBOOT_TYPE_SOFT = 'SOFT'
REBOOT_TYPE_HARD = 'HARD'
VM_GENERATIONS = {
constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2
}
VM_GENERATIONS_CONTROLLER_TYPES = {
constants.VM_GEN_1: constants.CTRL_TYPE_IDE,
constants.VM_GEN_2: constants.CTRL_TYPE_SCSI
}
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
# The console log is stored in two files, each should have at most half of
# the maximum console log size.
_MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2
_ROOT_DISK_CTRL_ADDR = 0
def __init__(self, virtapi=None):
self._virtapi = virtapi
self._vmutils = utilsfactory.get_vmutils()
self._metricsutils = utilsfactory.get_metricsutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._hostutils = utilsfactory.get_hostutils()
self._pathutils = pathutils.PathUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._serial_console_ops = serialconsoleops.SerialConsoleOps()
self._block_dev_man = (
block_device_manager.BlockDeviceInfoManager())
self._vif_driver = vif_utils.HyperVVIFDriver()
def list_instance_uuids(self):
instance_uuids = []
for (instance_name, notes) in self._vmutils.list_instance_notes():
if notes and uuidutils.is_uuid_like(notes[0]):
instance_uuids.append(str(notes[0]))
else:
LOG.debug("Notes not found or not resembling a GUID for "
"instance: %s", instance_name)
return instance_uuids
def list_instances(self):
return self._vmutils.list_instances()
def estimate_instance_overhead(self, instance_info):
# NOTE(claudiub): When an instance starts, Hyper-V creates a VM memory
# file on the local disk. The file size is the same as the VM's amount
# of memory. Since disk_gb must be an integer, and memory is MB, round
# up from X512 MB.
return {'memory_mb': 0,
'disk_gb': (instance_info['memory_mb'] + 512) // units.Ki}
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug("get_info called for instance", instance=instance)
instance_name = instance.name
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance.uuid)
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return hardware.InstanceInfo(state=state,
max_mem_kb=info['MemoryUsage'],
mem_kb=info['MemoryUsage'],
num_cpu=info['NumberOfProcessors'],
cpu_time_ns=info['UpTime'])
def _create_root_device(self, context, instance, root_disk_info, vm_gen):
path = None
if root_disk_info['type'] == constants.DISK:
path = self._create_root_vhd(context, instance)
self.check_vm_image_type(instance.uuid, vm_gen, path)
root_disk_info['path'] = path
def _create_root_vhd(self, context, instance, rescue_image_id=None):
is_rescue_vhd = rescue_image_id is not None
base_vhd_path = self._imagecache.get_cached_image(context, instance,
rescue_image_id)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['VirtualSize']
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance.name,
format_ext,
is_rescue_vhd)
root_vhd_size = instance.flavor.root_gb * units.Gi
try:
if CONF.use_cow_images:
LOG.debug("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
vhd_type = self._vhdutils.get_vhd_format(base_vhd_path)
if vhd_type == constants.DISK_FORMAT_VHD:
# The base image has already been resized. As differencing
# vhdx images support it, the root image will be resized
# instead if needed.
return root_vhd_path
else:
LOG.debug("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
base_vhd_path, root_vhd_size))
if not is_rescue_vhd and self._is_resize_needed(
root_vhd_path, base_vhd_size,
root_vhd_internal_size, instance):
self._vhdutils.resize_vhd(root_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def _is_resize_needed(self, vhd_path, old_size, new_size, instance):
if new_size < old_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=new_size, image_size=old_size)
elif new_size > old_size:
LOG.debug("Resizing VHD %(vhd_path)s to new "
"size %(new_size)s",
{'new_size': new_size,
'vhd_path': vhd_path},
instance=instance)
return True
return False
def _create_ephemerals(self, instance, ephemerals):
for index, eph in enumerate(ephemerals):
eph['format'] = self._vhdutils.get_best_supported_vhd_format()
eph_name = "eph%s" % index
eph['path'] = self._pathutils.get_ephemeral_vhd_path(
instance.name, eph['format'], eph_name)
self.create_ephemeral_disk(instance.name, eph)
def create_ephemeral_disk(self, instance_name, eph_info):
self._vhdutils.create_dynamic_vhd(eph_info['path'],
eph_info['size'] * units.Gi)
@staticmethod
def _get_vif_metadata(context, instance_id):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance_id)
vif_metadata = []
for vif in vifs:
if 'tag' in vif and vif.tag:
device = objects.NetworkInterfaceMetadata(
mac=vif.address,
bus=objects.PCIDeviceBus(),
tags=[vif.tag])
vif_metadata.append(device)
return vif_metadata
def _save_device_metadata(self, context, instance, block_device_info):
"""Builds a metadata object for instance devices, that maps the user
provided tag to the hypervisor assigned device address.
"""
metadata = []
metadata.extend(self._get_vif_metadata(context, instance.uuid))
if block_device_info:
metadata.extend(self._block_dev_man.get_bdm_metadata(
context, instance, block_device_info))
if metadata:
instance.device_metadata = objects.InstanceDeviceMetadata(
devices=metadata)
def set_boot_order(self, instance_name, vm_gen, block_device_info):
boot_order = self._block_dev_man.get_boot_order(
vm_gen, block_device_info)
LOG.debug("Setting boot order for instance: %(instance_name)s: "
"%(boot_order)s", {'instance_name': instance_name,
'boot_order': boot_order})
self._vmutils.set_boot_order(instance_name, boot_order)
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_LI("Spawning new instance"), instance=instance)
instance_name = instance.name
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)
self._block_dev_man.validate_and_update_bdi(
instance, image_meta, vm_gen, block_device_info)
root_device = block_device_info['root_disk']
self._create_root_device(context, instance, root_device, vm_gen)
self._create_ephemerals(instance, block_device_info['ephemerals'])
try:
with self.wait_vif_plug_events(instance, network_info):
# waiting will occur after the instance is created.
self.create_instance(instance, network_info, root_device,
block_device_info, vm_gen, image_meta)
self._save_device_metadata(context, instance, block_device_info)
if configdrive.required_by(instance):
configdrive_path = self._create_config_drive(context,
instance,
injected_files,
admin_password,
network_info)
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.set_boot_order(instance.name, vm_gen, block_device_info)
self.power_on(instance, network_info=network_info)
except Exception:
with excutils.save_and_reraise_exception():
self.destroy(instance)
@contextlib.contextmanager
def wait_vif_plug_events(self, instance, network_info):
timeout = CONF.vif_plugging_timeout
events = self._get_neutron_events(network_info)
try:
with self._virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
yield
except etimeout.Timeout:
# We never heard from Neutron
LOG.warning(_LW('Timeout waiting for vif plugging callback for '
'instance.'), instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event %s'),
event_name, instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
if utils.is_neutron() and CONF.vif_plugging_timeout:
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active') is False]
else:
return []
def create_instance(self, instance, network_info, root_device,
block_device_info, vm_gen, image_meta):
instance_name = instance.name
instance_path = os.path.join(CONF.instances_path, instance_name)
secure_boot_enabled = self._requires_secure_boot(instance, image_meta,
vm_gen)
memory_per_numa_node, cpus_per_numa_node = (
self._get_instance_vnuma_config(instance, image_meta))
if memory_per_numa_node:
LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning "
"has to be disabled in order for the instance to "
"benefit from it.", instance=instance)
if CONF.hyperv.dynamic_memory_ratio > 1.0:
LOG.warning(_LW(
"Instance vNUMA topology requested, but dynamic memory "
"ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
"memory ratio option."), instance=instance)
dynamic_memory_ratio = 1.0
vnuma_enabled = True
else:
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
vnuma_enabled = False
if instance.pci_requests.requests:
# NOTE(claudiub): if the instance requires PCI devices, its
# host shutdown action MUST be shutdown.
host_shutdown_action = os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN
else:
host_shutdown_action = None
self._vmutils.create_vm(instance_name,
vnuma_enabled,
vm_gen,
instance_path,
[instance.uuid])
self._vmutils.update_vm(instance_name,
instance.flavor.memory_mb,
memory_per_numa_node,
instance.flavor.vcpus,
cpus_per_numa_node,
CONF.hyperv.limit_cpu_features,
dynamic_memory_ratio,
host_shutdown_action=host_shutdown_action)
self._configure_remotefx(instance, vm_gen)
self._vmutils.create_scsi_controller(instance_name)
self._attach_root_device(instance_name, root_device)
self._attach_ephemerals(instance_name, block_device_info['ephemerals'])
self._volumeops.attach_volumes(
block_device_info['block_device_mapping'], instance_name)
# For the moment, we use COM port 1 when getting the serial console
# log as well as interactive sessions. In the future, the way in which
# we consume instance serial ports may become configurable.
#
# Note that Hyper-V instances will always have 2 COM ports
serial_ports = {
constants.DEFAULT_SERIAL_CONSOLE_PORT:
constants.SERIAL_PORT_TYPE_RW}
self._create_vm_com_port_pipes(instance, serial_ports)
for vif in network_info:
LOG.debug('Creating nic for instance', instance=instance)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
if CONF.hyperv.enable_instance_metrics_collection:
self._metricsutils.enable_vm_metrics_collection(instance_name)
self._set_instance_disk_qos_specs(instance)
if secure_boot_enabled:
certificate_required = self._requires_certificate(image_meta)
self._vmutils.enable_secure_boot(
instance.name, msft_ca_required=certificate_required)
self._attach_pci_devices(instance)
def _attach_pci_devices(self, instance):
for pci_request in instance.pci_requests.requests:
spec = pci_request.spec[0]
for counter in range(pci_request.count):
self._vmutils.add_pci_device(instance.name,
spec['vendor_id'],
spec['product_id'])
def _get_instance_vnuma_config(self, instance, image_meta):
"""Returns the appropriate NUMA configuration for Hyper-V instances,
given the desired instance NUMA topology.
:param instance: instance containing the flavor and it's extra_specs,
where the NUMA topology is defined.
:param image_meta: image's metadata, containing properties related to
the instance's NUMA topology.
:returns: memory amount and number of vCPUs per NUMA node or
(None, None), if instance NUMA topology was not requested.
:raises exception.InstanceUnacceptable:
If the given instance NUMA topology is not possible on Hyper-V.
"""
instance_topology = hardware.numa_get_constraints(instance.flavor,
image_meta)
if not instance_topology:
# instance NUMA topology was not requested.
return None, None
memory_per_numa_node = instance_topology.cells[0].memory
cpus_per_numa_node = len(instance_topology.cells[0].cpuset)
# validate that the requested NUMA topology is not asymetric.
# e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y.
# same with memory.
for cell in instance_topology.cells:
if len(cell.cpuset) != cpus_per_numa_node:
reason = _("Hyper-V does not support NUMA topologies with "
"uneven number of processors. (%(a)s != %(b)s)") % {
'a': len(cell.cpuset), 'b': cpus_per_numa_node}
raise exception.InstanceUnacceptable(reason=reason,
instance_id=instance.uuid)
if cell.memory != memory_per_numa_node:
reason = _("Hyper-V does not support NUMA topologies with "
"uneven amounts of memory. (%(a)s != %(b)s)") % {
'a': cell.memory, 'b': memory_per_numa_node}
raise exception.InstanceUnacceptable(reason=reason,
instance_id=instance.uuid)
return memory_per_numa_node, cpus_per_numa_node
def _configure_remotefx(self, instance, vm_gen):
extra_specs = instance.flavor.extra_specs
remotefx_max_resolution = extra_specs.get(
constants.FLAVOR_ESPEC_REMOTEFX_RES)
if not remotefx_max_resolution:
# RemoteFX not required.
return
if not CONF.hyperv.enable_remotefx:
raise exception.InstanceUnacceptable(
_("enable_remotefx configuration option needs to be set to "
"True in order to use RemoteFX."))
if not self._hostutils.check_server_feature(
self._hostutils.FEATURE_RDS_VIRTUALIZATION):
raise exception.InstanceUnacceptable(
_("The RDS-Virtualization feature must be installed in order "
"to use RemoteFX."))
if not self._vmutils.vm_gen_supports_remotefx(vm_gen):
raise exception.InstanceUnacceptable(
_("RemoteFX is not supported on generation %s virtual "
"machines on this version of Windows.") % vm_gen)
instance_name = instance.name
LOG.debug('Configuring RemoteFX for instance: %s', instance_name)
remotefx_monitor_count = int(extra_specs.get(
constants.FLAVOR_ESPEC_REMOTEFX_MONITORS) or 1)
remotefx_vram = extra_specs.get(
constants.FLAVOR_ESPEC_REMOTEFX_VRAM)
vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None
self._vmutils.enable_remotefx_video_adapter(
instance_name,
remotefx_monitor_count,
remotefx_max_resolution,
vram_bytes)
def _attach_root_device(self, instance_name, root_dev_info):
if root_dev_info['type'] == constants.VOLUME:
self._volumeops.attach_volume(root_dev_info['connection_info'],
instance_name,
disk_bus=root_dev_info['disk_bus'])
else:
self._attach_drive(instance_name, root_dev_info['path'],
root_dev_info['drive_addr'],
root_dev_info['ctrl_disk_addr'],
root_dev_info['disk_bus'],
root_dev_info['type'])
def _attach_ephemerals(self, instance_name, ephemerals):
for eph in ephemerals:
# if an ephemeral doesn't have a path, it might have been removed
# during resize.
if eph.get('path'):
self._attach_drive(
instance_name, eph['path'], eph['drive_addr'],
eph['ctrl_disk_addr'], eph['disk_bus'],
constants.BDI_DEVICE_TYPE_TO_DRIVE_TYPE[
eph['device_type']])
def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr,
controller_type, drive_type=constants.DISK):
if controller_type == constants.CTRL_TYPE_SCSI:
self._vmutils.attach_scsi_drive(instance_name, path, drive_type)
else:
self._vmutils.attach_ide_drive(instance_name, path, drive_addr,
ctrl_disk_addr, drive_type)
def get_image_vm_generation(self, instance_id, image_meta):
default_vm_gen = self._hostutils.get_default_vm_generation()
image_prop_vm = image_meta.properties.get('hw_machine_type',
default_vm_gen)
if image_prop_vm not in self._hostutils.get_supported_vm_types():
reason = _LE('Requested VM Generation %s is not supported on '
'this OS.') % image_prop_vm
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
return VM_GENERATIONS[image_prop_vm]
def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path):
if (vm_gen != constants.VM_GEN_1 and root_vhd_path and
self._vhdutils.get_vhd_format(
root_vhd_path) == constants.DISK_FORMAT_VHD):
reason = _LE('Requested VM Generation %s, but provided VHD '
'instead of VHDX.') % vm_gen
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
def _requires_certificate(self, image_meta):
os_type = image_meta.properties.get('os_type')
if os_type == fields.OSType.WINDOWS:
return False
return True
def _requires_secure_boot(self, instance, image_meta, vm_gen):
"""Checks whether the given instance requires Secure Boot.
Secure Boot feature will be enabled by setting the "os_secure_boot"
image property or the "os:secure_boot" flavor extra spec to required.
:raises exception.InstanceUnacceptable: if the given image_meta has
no os_type property set, or if the image property value and the
flavor extra spec value are conflicting, or if Secure Boot is
required, but the instance's VM generation is 1.
"""
img_secure_boot = image_meta.properties.get('os_secure_boot')
flavor_secure_boot = instance.flavor.extra_specs.get(
constants.FLAVOR_SPEC_SECURE_BOOT)
requires_sb = False
conflicting_values = False
if flavor_secure_boot == fields.SecureBoot.REQUIRED:
requires_sb = True
if img_secure_boot == fields.SecureBoot.DISABLED:
conflicting_values = True
elif img_secure_boot == fields.SecureBoot.REQUIRED:
requires_sb = True
if flavor_secure_boot == fields.SecureBoot.DISABLED:
conflicting_values = True
if conflicting_values:
reason = _(
"Conflicting image metadata property and flavor extra_specs "
"values: os_secure_boot (%(image_secure_boot)s) / "
"os:secure_boot (%(flavor_secure_boot)s)") % {
'image_secure_boot': img_secure_boot,
'flavor_secure_boot': flavor_secure_boot}
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
if requires_sb:
if vm_gen != constants.VM_GEN_2:
reason = _('Secure boot requires generation 2 VM.')
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
os_type = image_meta.properties.get('os_type')
if not os_type:
reason = _('For secure boot, os_type must be specified in '
'image properties.')
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
return requires_sb
def _create_config_drive(self, context, instance, injected_files,
admin_password, network_info, rescue=False):
if CONF.config_drive_format != 'iso9660':
raise exception.ConfigDriveUnsupportedFormat(
format=CONF.config_drive_format)
LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(
instance, content=injected_files, extra_md=extra_md,
network_info=network_info, request_context=context)
configdrive_path_iso = self._pathutils.get_configdrive_path(
instance.name, constants.DVD_FORMAT, rescue=rescue)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with '
'error: %s'),
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
configdrive_path = self._pathutils.get_configdrive_path(
instance.name, constants.DISK_FORMAT_VHD, rescue=rescue)
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
configdrive_path = configdrive_path_iso
return configdrive_path
def attach_config_drive(self, instance, configdrive_path, vm_gen):
configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):]
# Do the attach here and if there is a certain file format that isn't
# supported in constants.DISK_FORMAT_MAP then bomb out.
try:
drive_type = constants.DISK_FORMAT_MAP[configdrive_ext]
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._attach_drive(instance.name, configdrive_path, 1, 0,
controller_type, drive_type)
except KeyError:
raise exception.InvalidDiskFormat(disk_format=configdrive_ext)
def _detach_config_drive(self, instance_name, rescue=False, delete=False):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance_name, rescue=rescue)
if configdrive_path:
self._vmutils.detach_vm_disk(instance_name,
configdrive_path,
is_physical=False)
if delete:
self._pathutils.remove(configdrive_path)
def _delete_disk_files(self, instance_name):
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance.name
LOG.info(_LI("Got request to destroy instance"), instance=instance)
try:
if self._vmutils.vm_exists(instance_name):
# Stop the VM first.
self._vmutils.stop_vm_jobs(instance_name)
self.power_off(instance)
self.unplug_vifs(instance, network_info)
self._vmutils.destroy_vm(instance_name)
self._volumeops.disconnect_volumes(block_device_info)
else:
LOG.debug("Instance not found", instance=instance)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to destroy instance: %s'),
instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug("Rebooting instance", instance=instance)
if reboot_type == REBOOT_TYPE_SOFT:
if self._soft_shutdown(instance):
self.power_on(instance, network_info=network_info)
return
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_REBOOT)
def _soft_shutdown(self, instance,
timeout=CONF.hyperv.wait_soft_reboot_seconds,
retry_interval=SHUTDOWN_TIME_INCREMENT):
"""Perform a soft shutdown on the VM.
:return: True if the instance was shutdown within time limit,
False otherwise.
"""
LOG.debug("Performing Soft shutdown on instance", instance=instance)
while timeout > 0:
# Perform a soft shutdown on the instance.
# Wait maximum timeout for the instance to be shutdown.
# If it was not shutdown, retry until it succeeds or a maximum of
# time waited is equal to timeout.
wait_time = min(retry_interval, timeout)
try:
LOG.debug("Soft shutdown instance, timeout remaining: %d",
timeout, instance=instance)
self._vmutils.soft_shutdown_vm(instance.name)
if self._wait_for_power_off(instance.name, wait_time):
LOG.info(_LI("Soft shutdown succeeded."),
instance=instance)
return True
except os_win_exc.HyperVException as e:
# Exception is raised when trying to shutdown the instance
# while it is still booting.
LOG.debug("Soft shutdown failed: %s", e, instance=instance)
time.sleep(wait_time)
timeout -= retry_interval
LOG.warning(_LW("Timed out while waiting for soft shutdown."),
instance=instance)
return False
def pause(self, instance):
"""Pause VM instance."""
LOG.debug("Pause instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug("Unpause instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug("Suspend instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug("Resume instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
LOG.debug("Power off instance", instance=instance)
# We must make sure that the console log workers are stopped,
# otherwise we won't be able to delete or move the VM log files.
self._serial_console_ops.stop_console_handler(instance.name)
if retry_interval <= 0:
retry_interval = SHUTDOWN_TIME_INCREMENT
try:
if timeout and self._soft_shutdown(instance,
timeout,
retry_interval):
return
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_DISABLED)
except os_win_exc.HyperVVMNotFoundException:
# The manager can call the stop API after receiving instance
# power off events. If this is triggered when the instance
# is being deleted, it might attempt to power off an unexisting
# instance. We'll just pass in this case.
LOG.debug("Instance not found. Skipping power off",
instance=instance)
def power_on(self, instance, block_device_info=None, network_info=None):
"""Power on the specified instance."""
LOG.debug("Power on instance", instance=instance)
if block_device_info:
self._volumeops.fix_instance_volume_disk_paths(instance.name,
block_device_info)
self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED)
self.plug_vifs(instance, network_info)
def _set_vm_state(self, instance, req_state):
instance_name = instance.name
try:
self._vmutils.set_vm_state(instance_name, req_state)
LOG.debug("Successfully changed state of VM %(instance_name)s"
" to: %(req_state)s", {'instance_name': instance_name,
'req_state': req_state})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to change vm state of %(instance_name)s"
" to %(req_state)s"),
{'instance_name': instance_name,
'req_state': req_state})
def _get_vm_state(self, instance_name):
summary_info = self._vmutils.get_vm_summary_info(instance_name)
return summary_info['EnabledState']
def _wait_for_power_off(self, instance_name, time_limit):
"""Waiting for a VM to be in a disabled state.
:return: True if the instance is shutdown within time_limit,
False otherwise.
"""
desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED]
def _check_vm_status(instance_name):
if self._get_vm_state(instance_name) in desired_vm_states:
raise loopingcall.LoopingCallDone()
periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status,
instance_name)
try:
# add a timeout to the periodic call.
periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT)
etimeout.with_timeout(time_limit, periodic_call.wait)
except etimeout.Timeout:
# VM did not shutdown in the expected time_limit.
return False
finally:
# stop the periodic call, in case of exceptions or Timeout.
periodic_call.stop()
return True
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self.power_on(instance, block_device_info, network_info)
def _create_vm_com_port_pipes(self, instance, serial_ports):
for port_number, port_type in serial_ports.items():
pipe_path = r'\\.\pipe\%s_%s' % (instance.uuid, port_type)
self._vmutils.set_vm_serial_port_connection(
instance.name, port_number, pipe_path)
def copy_vm_dvd_disks(self, vm_name, dest_host):
dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name)
dest_path = self._pathutils.get_instance_dir(
vm_name, remote_server=dest_host)
for path in dvd_disk_paths:
self._pathutils.copyfile(path, dest_path)
def plug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self._vif_driver.plug(instance, vif)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self._vif_driver.unplug(instance, vif)
def _check_hotplug_available(self, instance):
"""Check whether attaching an interface is possible for the given
instance.
:returns: True if attaching / detaching interfaces is possible for the
given instance.
"""
vm_state = self._get_vm_state(instance.name)
if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED:
# can attach / detach interface to stopped VMs.
return True
if not self._hostutils.check_min_windows_version(10, 0):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("vNIC hot plugging is supported only in newer "
"versions than Windows Hyper-V / Server 2012 R2.")
return False
if (self._vmutils.get_vm_generation(instance.name) ==
constants.VM_GEN_1):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Cannot hot plug vNIC to a first generation VM.",
instance=instance)
return False
return True
def attach_interface(self, instance, vif):
if not self._check_hotplug_available(instance):
raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid)
LOG.debug('Attaching vif: %s', vif['id'], instance=instance)
self._vmutils.create_nic(instance.name, vif['id'], vif['address'])
self._vif_driver.plug(instance, vif)
def detach_interface(self, instance, vif):
try:
if not self._check_hotplug_available(instance):
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.debug('Detaching vif: %s', vif['id'], instance=instance)
self._vif_driver.unplug(instance, vif)
self._vmutils.destroy_nic(instance.name, vif['id'])
except os_win_exc.HyperVVMNotFoundException:
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Instance not found during detach interface. It "
"might have been destroyed beforehand.",
instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
def rescue_instance(self, context, instance, network_info, image_meta,
rescue_password):
try:
self._rescue_instance(context, instance, network_info,
image_meta, rescue_password)
except Exception as exc:
with excutils.save_and_reraise_exception():
err_msg = _LE("Instance rescue failed. Exception: %(exc)s. "
"Attempting to unrescue the instance.")
LOG.error(err_msg, {'exc': exc}, instance=instance)
self.unrescue_instance(instance)
def _rescue_instance(self, context, instance, network_info, image_meta,
rescue_password):
rescue_image_id = image_meta.id or instance.image_ref
rescue_vhd_path = self._create_root_vhd(
context, instance, rescue_image_id=rescue_image_id)
rescue_vm_gen = self.get_image_vm_generation(instance.uuid,
image_meta)
vm_gen = self._vmutils.get_vm_generation(instance.name)
if rescue_vm_gen != vm_gen:
err_msg = _('The requested rescue image requires a different VM '
'generation than the actual rescued instance. '
'Rescue image VM generation: %(rescue_vm_gen)s. '
'Rescued instance VM generation: %(vm_gen)s.') % dict(
rescue_vm_gen=rescue_vm_gen,
vm_gen=vm_gen)
raise exception.ImageUnacceptable(reason=err_msg,
image_id=rescue_image_id)
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
if not root_vhd_path:
err_msg = _('Instance root disk image could not be found. '
'Rescuing instances booted from volume is '
'not supported.')
raise exception.InstanceNotRescuable(reason=err_msg,
instance_id=instance.uuid)
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
is_physical=False)
self._attach_drive(instance.name, rescue_vhd_path, 0,
self._ROOT_DISK_CTRL_ADDR, controller_type)
self._vmutils.attach_scsi_drive(instance.name, root_vhd_path,
drive_type=constants.DISK)
if configdrive.required_by(instance):
self._detach_config_drive(instance.name)
rescue_configdrive_path = self._create_config_drive(
context,
instance,
injected_files=None,
admin_password=rescue_password,
network_info=network_info,
rescue=True)
self.attach_config_drive(instance, rescue_configdrive_path,
vm_gen)
self.power_on(instance)
def unrescue_instance(self, instance):
self.power_off(instance)
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name,
rescue=True)
if (instance.vm_state == vm_states.RESCUED and
not (rescue_vhd_path and root_vhd_path)):
err_msg = _('Missing instance root and/or rescue image. '
'The instance cannot be unrescued.')
raise exception.InstanceNotRescuable(reason=err_msg,
instance_id=instance.uuid)
vm_gen = self._vmutils.get_vm_generation(instance.name)
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
is_physical=False)
if rescue_vhd_path:
self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path,
is_physical=False)
fileutils.delete_if_exists(rescue_vhd_path)
self._attach_drive(instance.name, root_vhd_path, 0,
self._ROOT_DISK_CTRL_ADDR, controller_type)
self._detach_config_drive(instance.name, rescue=True, delete=True)
# Reattach the configdrive, if exists and not already attached.
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path and not self._vmutils.is_disk_attached(
configdrive_path, is_physical=False):
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.power_on(instance)
def _set_instance_disk_qos_specs(self, instance):
quota_specs = self._get_scoped_flavor_extra_specs(instance, 'quota')
disk_total_bytes_sec = int(
quota_specs.get('disk_total_bytes_sec') or 0)
disk_total_iops_sec = int(
quota_specs.get('disk_total_iops_sec') or
self._volumeops.bytes_per_sec_to_iops(disk_total_bytes_sec))
if disk_total_iops_sec:
local_disks = self._get_instance_local_disks(instance.name)
for disk_path in local_disks:
self._vmutils.set_disk_qos_specs(disk_path,
disk_total_iops_sec)
def _get_instance_local_disks(self, instance_name):
instance_path = self._pathutils.get_instance_dir(instance_name)
instance_disks = self._vmutils.get_vm_storage_paths(instance_name)[0]
local_disks = [disk_path for disk_path in instance_disks
if instance_path in disk_path]
return local_disks
def _get_scoped_flavor_extra_specs(self, instance, scope):
extra_specs = instance.flavor.extra_specs or {}
filtered_specs = {}
for spec, value in extra_specs.items():
if ':' in spec:
_scope, key = spec.split(':')
if _scope == scope:
filtered_specs[key] = value
return filtered_specs
|
vmturbo/nova
|
nova/virt/hyperv/vmops.py
|
Python
|
apache-2.0
| 48,910
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" cluster.py
Cluster functionality
"""
import os
import time
import logging
import subprocess as sb
from datetime import datetime
from .dax_settings import DAX_Settings
from .errors import ClusterError
__copyright__ = 'Copyright 2013 Vanderbilt University. All Rights Reserved'
DAX_SETTINGS = DAX_Settings()
MAX_TRACE_DAYS = 30
LOGGER = logging.getLogger('dax')
def c_output(output):
"""
Check if the output value is an integer
:param output: variable to check
:return: True if output is not an integer, False otherwise.
"""
try:
int(output)
error = False
except ValueError as err:
error = True
LOGGER.error(err)
return error
def count_jobs():
"""
Count the number of jobs in the queue on the cluster
:return: number of jobs in the queue
"""
if command_found(cmd=DAX_SETTINGS.get_cmd_submit()):
cmd = DAX_SETTINGS.get_cmd_count_nb_jobs()
output = sb.check_output(cmd, shell=True)
error = c_output(output)
while error:
LOGGER.info(' try again to access number of jobs in 2 seconds.')
time.sleep(2)
output = sb.check_output(cmd, shell=True)
error = c_output(output)
if int(output) < 0:
return 0
else:
return int(output)
else:
LOGGER.info(' Running locally. No queue with jobs.')
return 0
def job_status(jobid):
"""
Get the status for a job on the cluster
:param jobid: job id to check
:return: job status
"""
cmd = DAX_SETTINGS.get_cmd_get_job_status()\
.safe_substitute({'jobid': jobid})
LOGGER.debug(str(cmd).strip())
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
LOGGER.debug('output='+str(output))
output = output.decode().strip()
if output == DAX_SETTINGS.get_running_status():
return 'R'
elif output == DAX_SETTINGS.get_queue_status():
return 'Q'
elif output == DAX_SETTINGS.get_complete_status() or len(output) == 0:
return 'C'
else:
return None
except sb.CalledProcessError as e:
LOGGER.debug(str(e))
return None
def is_traceable_date(jobdate):
"""
Check if the job is traceable on the cluster
:param jobdate: launching date of the job
:return: True if traceable, False otherwise.
"""
try:
trace_date = datetime.strptime(jobdate, "%Y-%m-%d")
diff_days = (datetime.today() - trace_date).days
return diff_days <= MAX_TRACE_DAYS
except ValueError:
return False
def tracejob_info(jobid, jobdate):
"""
Trace the job information from the cluster
:param jobid: job id to check
:param jobdate: launching date of the job
:return: dictionary object with 'mem_used', 'walltime_used', 'jobnode'
"""
time_s = datetime.strptime(jobdate, "%Y-%m-%d")
diff_days = (datetime.today() - time_s).days + 1
jobinfo = dict()
jobinfo['mem_used'] = get_job_mem_used(jobid, diff_days)
jobinfo['walltime_used'] = get_job_walltime_used(jobid, diff_days)
jobinfo['jobnode'] = get_job_node(jobid, diff_days)
return jobinfo
def get_job_mem_used(jobid, diff_days):
"""
Get the memory used for the task from cluster
:param jobid: job id to check
:param diff_days: difference of days between starting date and now
:return: string with the memory usage, empty string if error
"""
mem = ''
# Check for blank jobid
if not jobid:
return mem
cmd = DAX_SETTINGS.get_cmd_get_job_memory()\
.safe_substitute({'numberofdays': diff_days,
'jobid': jobid})
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output.startswith(b'sacct: error'):
raise ClusterError(output)
if output:
mem = output.strip()
mem = mem.decode()
except (sb.CalledProcessError, ClusterError):
pass
return mem
def get_job_walltime_used(jobid, diff_days):
"""
Get the walltime used for the task from cluster
:param jobid: job id to check
:param diff_days: difference of days between starting date and now
:return: string with the walltime used, empty string if error
"""
walltime = ''
# Check for blank jobid
if not jobid:
return walltime
cmd = DAX_SETTINGS.get_cmd_get_job_walltime()\
.safe_substitute({'numberofdays': diff_days,
'jobid': jobid})
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output:
walltime = output.strip()
walltime = walltime.decode()
except sb.CalledProcessError:
pass
if not walltime and diff_days > 3:
walltime = 'NotFound'
return walltime
def get_job_node(jobid, diff_days):
"""
Get the node where the job was running on the cluster
:param jobid: job id to check
:param diff_days: difference of days between starting date and now
:return: string with the node, empty string if error
"""
jobnode = ''
# Check for blank jobid
if not jobid:
return jobnode
if jobid == 'no_qsub':
cmd = 'uname -a'
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output and len(output.strip().split(' ')) > 1:
jobnode = output.strip().split(' ')[1]
return jobnode
cmd = DAX_SETTINGS.get_cmd_get_job_node()\
.safe_substitute({'numberofdays': diff_days,
'jobid': jobid})
try:
output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True)
if output:
jobnode = output.strip()
jobnode = jobnode.decode()
except sb.CalledProcessError:
pass
return jobnode
def get_specific_str(big_str, prefix, suffix):
"""
Extract a specific length out of a string
:param big_str: string to reduce
:param prefix: prefix to remove
:param suffix: suffix to remove
:return: string reduced, return empty string if prefix/suffix not present
"""
specific_str = big_str
if prefix and len(specific_str.split(prefix)) > 1:
specific_str = specific_str.split(prefix)[1]
if suffix and len(specific_str.split(suffix)) > 1:
specific_str = specific_str.split(suffix)[0]
if specific_str != big_str:
return specific_str
else:
return ''
def command_found(cmd='qsub'):
""" Return True if the command was found."""
if True in [os.path.isfile(os.path.join(path, cmd)) and
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)]:
return True
return False
class PBS(object): # The script file generator class
""" PBS class to generate/submit the cluster file to run a task """
def __init__(self, filename, outfile, cmds, walltime_str, mem_mb=2048,
ppn=1, env=None, email=None,
email_options=DAX_SETTINGS.get_email_opts(), rungroup=None,
xnat_host=None, job_template=None):
"""
Entry point for the PBS class
:param filename: filename for the script
:param outfile: filepath for the outlogs
:param cmds: commands to run in the script
:param walltime_str: walltime to set for the script
:param mem_mb: memory in mb to set for the script
:param ppn: number of processor to set for the script
:param env: Environment file to source for the script
:param email: email address to set for the script
:param email_options: email options to set for the script
:param rungroup: group to run job under on the cluster
:param xnat_host: set the XNAT_HOST for the job (export)
:return: None
"""
self.filename = filename
self.outfile = outfile
self.cmds = cmds
self.walltime_str = walltime_str
self.mem_mb = mem_mb
self.email = email
self.email_options = email_options
self.rungroup = rungroup
self.ppn = ppn
self.job_template = job_template
if env:
self.env = env
else:
self.env = os.path.join(os.environ['HOME'], '.bashrc')
if xnat_host:
self.xnat_host = xnat_host
else:
self.xnat_host = os.environ['XNAT_HOST']
def write(self):
"""
Write the file
:return: None
"""
# pbs_dir
job_dir = os.path.dirname(self.filename)
if not os.path.exists(job_dir):
os.makedirs(job_dir)
# Write the Bedpost script (default value)
job_data = {'job_email': self.email,
'job_email_options': self.email_options,
'job_rungroup': self.rungroup,
'job_ppn': str(self.ppn),
'job_env': str(self.env),
'job_walltime': str(self.walltime_str),
'job_memory': str(self.mem_mb),
'job_output_file': self.outfile,
'job_output_file_options': 'oe',
'job_cmds': '\n'.join(self.cmds),
'xnat_host': self.xnat_host}
with open(self.filename, 'w') as f_obj:
_tmp = DAX_SETTINGS.get_job_template(self.job_template)
_str = _tmp.safe_substitute(job_data)
f_obj.write(_str)
def submit(self, outlog=None, force_no_qsub=False):
"""
Submit the file to the cluster
:return: None
"""
return submit_job(self.filename, outlog=outlog,
force_no_qsub=force_no_qsub)
def submit_job(filename, outlog=None, force_no_qsub=False):
"""
Submit the file to the cluster
:return: jobid and error if the job failed when running locally
"""
failed = False
submit_cmd = DAX_SETTINGS.get_cmd_submit()
if command_found(cmd=submit_cmd) and not force_no_qsub:
try:
cmd = '%s %s' % (submit_cmd, filename)
proc = sb.Popen(cmd.split(), stdout=sb.PIPE, stderr=sb.PIPE)
output, error = proc.communicate()
if output:
LOGGER.info(output.decode())
if error:
LOGGER.error(error.decode())
jobid = get_specific_str(
output.decode(), DAX_SETTINGS.get_prefix_jobid(),
DAX_SETTINGS.get_suffix_jobid())
except sb.CalledProcessError as err:
LOGGER.error(err)
jobid = '0'
else:
cmd = 'sh %s' % (filename)
proc = sb.Popen(cmd.split(), stdout=sb.PIPE, stderr=sb.PIPE)
output, error = proc.communicate()
if outlog:
with open(outlog, 'w') as log_obj:
for line in output:
log_obj.write(line)
for line in error:
log_obj.write(line)
if error:
# Set the status to JOB_FAILED
failed = True
jobid = 'no_qsub'
return jobid.strip(), failed
|
VUIIS/dax
|
dax/cluster.py
|
Python
|
mit
| 11,384
| 0
|
import argparse
from cheat_ext.info import info, ls
from cheat_ext.installer import (
install, upgrade, remove
)
from cheat_ext.linker import link, unlink
def _install(args):
install(args.repository)
link(args.repository)
def _upgrade(args):
upgrade(args.repository)
link(args.repository)
def _remove(args):
unlink(args.repository)
remove(args.repository)
def _info(args):
info(args.repository)
def _ls(args):
ls()
parser = argparse.ArgumentParser(description="cheat extension")
subparsers = parser.add_subparsers()
install_parser = subparsers.add_parser("install")
install_parser.add_argument("repository", type=str)
install_parser.set_defaults(func=_install)
upgrade_parser = subparsers.add_parser("upgrade")
upgrade_parser.add_argument("repository", type=str)
upgrade_parser.set_defaults(func=_upgrade)
remove_parser = subparsers.add_parser("remove")
remove_parser.add_argument("repository", type=str)
remove_parser.set_defaults(func=_remove)
info_parser = subparsers.add_parser("info")
info_parser.add_argument("repository", type=str)
info_parser.set_defaults(func=_info)
ls_parser = subparsers.add_parser("ls")
ls_parser.set_defaults(func=_ls)
def main():
options = parser.parse_args()
options.func(options)
|
chhsiao90/cheat-ext
|
cheat_ext/main.py
|
Python
|
mit
| 1,279
| 0
|
from datetime import datetime
from os.path import abspath, join, dirname
import alabaster
# Alabaster theme + mini-extension
html_theme_path = [alabaster.get_path()]
extensions = ['alabaster', 'sphinx.ext.intersphinx', 'sphinx.ext.doctest']
# Paths relative to invoking conf.py - not this shared file
html_theme = 'alabaster'
html_theme_options = {
'description': "Pythonic task execution",
'github_user': 'pyinvoke',
'github_repo': 'invoke',
'analytics_id': 'UA-18486793-3',
'travis_button': True,
'codecov_button': True,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
'donate.html',
]
}
# Everything intersphinx's to Python
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7/', None),
}
# Doctest settings
doctest_path = [abspath(join(dirname(__file__), '..', 'tests'))]
doctest_global_setup = r"""
from _util import MockSubprocess
"""
# Regular settings
project = 'Invoke'
year = datetime.now().year
copyright = '{} Jeff Forcier'.format(year)
master_doc = 'index'
templates_path = ['_templates']
exclude_trees = ['_build']
source_suffix = '.rst'
default_role = 'obj'
|
mkusz/invoke
|
sites/shared_conf.py
|
Python
|
bsd-2-clause
| 1,189
| 0
|
# -*- coding: utf8 -*-
'''
Copyright 2009 Denis Derman <denis.spir@gmail.com> (former developer)
Copyright 2011-2012 Peter Potrowl <peter017@gmail.com> (current developer)
This file is part of Pijnu.
Pijnu is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pijnu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with Pijnu. If not, see <http://www.gnu.org/licenses/>.
'''
""" wikiLine
lineChar : [\x20..\x7e]
rawChar : [\x20..\x7e !!/!_]
DISTINCT : "//" : drop
IMPORTANT : "!!" : drop
MONOSPACE : "__" : drop
rawText : rawChar+ : join
distinctText : DISTINCT inline DISTINCT : liftValue
importantText : IMPORTANT inline IMPORTANT : liftValue
monospaceText : MONOSPACE inline MONOSPACE : liftValue
styledText : distinctText / importantText / monospaceText
text : styledText / rawText
inline : @ text+
"""
from pijnu import *
# title: wikiLine
inline = Recursion()
lineChar = Klass(' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
rawChar = Klass(' "#$%&\'()*+,-.0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^`abcdefghijklmnopqrstuvwxyz{|}~ ')
DISTINCT = Word('//')(drop)
IMPORTANT = Word('!!')(drop)
MONOSPACE = Word('__')(drop)
rawText = OneOrMore(rawChar)(join)
distinctText = Sequence(DISTINCT, inline, DISTINCT)(liftValue)
importantText = Sequence(IMPORTANT, inline, IMPORTANT)(liftValue)
monospaceText = Sequence(MONOSPACE, inline, MONOSPACE)(liftValue)
styledText = Choice(distinctText, importantText, monospaceText)
text = Choice(styledText, rawText)
inline **= OneOrMore(text)
parser = Parser('wikiLine', locals(), 'inline')
|
peter17/pijnu
|
samples/wikiLine.py
|
Python
|
gpl-3.0
| 2,097
| 0.007153
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str'),
"backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
class BackupPoliciesOperations(object):
"""BackupPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.netapp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> Iterable["_models.BackupPoliciesList"]:
"""List backup policies.
List backup policies for Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BackupPoliciesList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.netapp.models.BackupPoliciesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPoliciesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BackupPoliciesList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> "_models.BackupPolicy":
"""Get a backup Policy.
Get a particular backup Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.BackupPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicy",
**kwargs: Any
) -> Optional["_models.BackupPolicy"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BackupPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'BackupPolicy')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicy",
**kwargs: Any
) -> LROPoller["_models.BackupPolicy"]:
"""Create a backup policy.
Create a backup policy for Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:param body: Backup policy object supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.BackupPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BackupPolicy or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.netapp.models.BackupPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicyPatch",
**kwargs: Any
) -> "_models.BackupPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'BackupPolicyPatch')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
body: "_models.BackupPolicyPatch",
**kwargs: Any
) -> LROPoller["_models.BackupPolicy"]:
"""Patch a backup policy.
Patch a backup policy for Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:param body: Backup policy object supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.BackupPolicyPatch
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BackupPolicy or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.netapp.models.BackupPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BackupPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
account_name: str,
backup_policy_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a backup policy.
Delete backup policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_policy_name: Backup policy Name which uniquely identify backup policy.
:type backup_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
backup_policy_name=backup_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/operations/_backup_policies_operations.py
|
Python
|
mit
| 31,820
| 0.004431
|
# -*- coding: utf-8 -*-
import logging, logging.handlers
from django.conf import settings
def get_logger(name, level=logging.INFO, format='[%(asctime)s] %(message)s', handler=None, filename=None):
new_logger = logging.getLogger(name)
new_logger.setLevel(level)
if not handler:
filename = filename or '%s/logs/%s.log' % (settings.HOME_DIR, name)
handler = logging.FileHandler(filename)
handler.setFormatter(logging.Formatter(format))
new_logger.addHandler(handler)
return new_logger
if hasattr(settings, 'LOG_FILENAME') and not logger:
handler = logging.handlers.TimedRotatingFileHandler(settings.LOG_FILENAME, when = 'midnight')
logger = get_logger('default', handler=handler)
|
leliel12/handy
|
handy/logger.py
|
Python
|
bsd-3-clause
| 733
| 0.006821
|
class Solution:
def containVirus(self, grid: List[List[int]]) -> int:
current_set_number = 1
grid_set = [[0 for i in range(len(grid[0]))] for j in range(len(grid))]
set_grid = {}
threaten = {}
def getAdjacentCellsSet(row, col) -> List[int]:
answer = []
if row != 0 and grid_set[row-1][col] != 0 and grid_set[row-1][col] not in answer:
answer.append(grid_set[row-1][col])
if col != 0 and grid_set[row][col-1] != 0 and grid_set[row][col-1] not in answer:
answer.append(grid_set[row][col-1])
if row != len(grid)-1 and grid_set[row+1][col] != 0 and grid_set[row+1][col] not in answer:
answer.append(grid_set[row+1][col])
if col != len(grid[0])-1 and grid_set[row][col+1] != 0 and grid_set[row][col+1] not in answer:
answer.append(grid_set[row][col+1])
if -1 in answer:
answer.remove(-1)
if grid_set[row][col] in answer:
answer.remove(grid_set[row][col])
return answer
# Merge all regions to the first one.
def merge(regions: List[int]):
merge_to = regions[0]
for i in range(1, len(regions)):
for x, y in set_grid[regions[i]]:
grid_set[x][y] = merge_to
set_grid[merge_to] += set_grid[regions[i]]
del set_grid[regions[i]]
if regions[i] in threaten:
del threaten[regions[i]]
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
adjacent_sets = getAdjacentCellsSet(i, j)
set_number = 0
if len(adjacent_sets) == 0:
set_number = current_set_number
current_set_number += 1
elif len(adjacent_sets) == 1:
set_number = adjacent_sets[0]
else: # Merge
merge(adjacent_sets)
set_number = adjacent_sets[0]
grid_set[i][j] = set_number
if set_number not in set_grid:
set_grid[set_number] = []
set_grid[set_number].append((i, j))
def adjacentThreatened(x, y):
answer = []
if x != 0 and grid_set[x-1][y] == 0:
answer.append((x-1, y))
if y != 0 and grid_set[x][y-1] == 0:
answer.append((x, y-1))
if x != len(grid_set)-1 and grid_set[x+1][y] == 0:
answer.append((x+1, y))
if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0:
answer.append((x, y+1))
return answer
def threatenCells():
for i in set_grid:
if i == 0 or i == -1:
continue
threatened = set()
for x, y in set_grid[i]:
threatened = threatened.union(adjacentThreatened(x, y))
threaten[i] = len(threatened)
def contain(set_number):
wall = 0
for x, y in set_grid[set_number]:
grid_set[x][y] = -1
if x != 0 and grid_set[x-1][y] == 0:
wall += 1
if y != 0 and grid_set[x][y-1] == 0:
wall += 1
if x != len(grid_set)-1 and grid_set[x+1][y] == 0:
wall += 1
if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0:
wall += 1
del set_grid[set_number]
del threaten[set_number]
return wall
def spread():
to_spread = deque()
for _, v in set_grid.items():
to_spread.extend(v)
while len(to_spread) > 0:
x, y = to_spread.popleft()
current_set = grid_set[x][y]
if x != 0 and grid_set[x-1][y] == 0:
grid_set[x-1][y] = current_set
set_grid[current_set].append((x-1, y))
adj = getAdjacentCellsSet(x-1, y)
merge([current_set]+adj)
if y != 0 and grid_set[x][y-1] == 0:
grid_set[x][y-1] = current_set
set_grid[current_set].append((x, y-1))
adj = getAdjacentCellsSet(x, y-1)
merge([current_set]+adj)
if x != len(grid_set)-1 and grid_set[x+1][y] == 0:
grid_set[x+1][y] = current_set
set_grid[current_set].append((x+1, y))
adj = getAdjacentCellsSet(x+1, y)
merge([current_set]+adj)
if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0:
grid_set[x][y+1] = current_set
set_grid[current_set].append((x, y+1))
adj = getAdjacentCellsSet(x, y+1)
merge([current_set]+adj)
answer = 0
threatenCells()
# print(grid_set)
# print(answer)
while len(threaten) != 0:
# print(threaten)
largest_infected = sorted(
threaten.items(), key=lambda x: x[1], reverse=True)[0]
answer += contain(largest_infected[0])
spread()
# print(grid_set)
# print(answer)
threatenCells()
return answer
|
jianjunz/online-judge-solutions
|
leetcode/0750-contain-virus.py
|
Python
|
mit
| 5,673
| 0.000705
|
import copy
from .point import Point
from .misc import *
'''
Line is defined using two point(s).
'''
class Line(object):
_ID_NAME = '_LINE_ID'
_DB_NAME = '_EXISTING_LINES'
def __init__(self, geom, p0, p1):
def check(p):
if geom is None: return p
if isinstance(p, Point):
found,pid = exist(geom,p)
if found: return pid
else:
if geom.get(Point,p) is not None: return p
return None
assert isinstance(p0, (Point, int, long))
assert isinstance(p1, (Point, int, long))
self.pid = [check(p0), check(p1)]
if self.pid[0] is None: raise RuntimeError("Line: Point p0 does not exist in geo-file")
if self.pid[1] is None: raise RuntimeError("Line: Point p1 does not exist in geo-file")
if self.pid[0] == self.pid[1]: raise RuntimeError("Line: Cannot construct lines of zero length")
return
# for printing to terminal
def __repr__(self):
return "l("+remove_bracket(str(self.dataFromKey(self.key())))+")"
def code(self, geom):
'''
Return the code for use in the geo-file
'''
# we do not allow the same line to be added twice
# self.exist(...) should return a (new) idx if not found
found,idx = exist(geom,self)
if found: return ''
return '\n'.join([('Line(%d) = {%d,%d};') % (idx,self.pid[0], self.pid[1])])
# NOTE: for uniqueness the sorted idx is used as "key" in the database
def key(self, master=False):
keystr=remove_bracket(str(sorted(map(abs,self.pid)) + self.pid))
if master:
return remove_bracket(str(sorted(map(abs,self.pid))))
return keystr
# this is an alternative constructor which can be called directly as "Line.fromkey(keystr)"
@classmethod
def fromkey(cls, keystr):
pid=cls.dataFromKey(keystr)
return Line(None, pid[0], pid[1])
@classmethod
def masterDBKeys(cls, geom):
subkeys=copy.deepcopy(getDB(geom,cls).keys())
for i in range(0,len(subkeys)):
tmp=subkeys[i].split(',')
subkeys[i]=",".join(tmp[:len(tmp)/2])
return subkeys
@staticmethod
def dataFromKey(keystr):
return [int(i) for i in keystr.split(',')][2:]
|
BV-DR/foamBazar
|
pythonScripts/gmshScript/line.py
|
Python
|
gpl-3.0
| 2,331
| 0.014586
|
#!/usr/bin/env python2
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name='ezbake-support-django',
version='2.1',
description='Supporting library for integrating Django applications with EzBake.',
license='Apache License 2.0',
author='EzBake Developers',
author_email='developers@ezbake.io',
namespace_packages=['ezbake', 'ezbake.support'],
packages=find_packages('lib', exclude=['test*']),
package_dir={'': 'lib'},
install_requires=[
'ezbake-security-client==2.1',
'Django>=1.4',
'psycopg2>=2.5',
],
)
|
infochimps-forks/ezbake-common-python
|
support/django/setup.py
|
Python
|
apache-2.0
| 1,200
| 0.000833
|
#!/usr/bin/env python
# encoding: utf-8
"""An example for a function returning a function"""
def surround(tag1, tag2):
def wraps(content):
return '{}{}{}'.format(tag1, content, tag2)
return wraps
def printer(content, transform):
return transform(content)
print printer("foo bar", surround("<a>", "</a>"))
print printer("foo bar", surround('<p>', '</p>'))
|
prodicus/dabble
|
python/decorators/decorate_with_tags.py
|
Python
|
mit
| 381
| 0.010499
|
"""Module containing the logic for our debugging logic."""
from __future__ import print_function
import json
import platform
import setuptools
def print_information(option, option_string, value, parser,
option_manager=None):
"""Print debugging information used in bug reports.
:param option:
The optparse Option instance.
:type option:
optparse.Option
:param str option_string:
The option name
:param value:
The value passed to the callback parsed from the command-line
:param parser:
The optparse OptionParser instance
:type parser:
optparse.OptionParser
:param option_manager:
The Flake8 OptionManager instance.
:type option_manager:
flake8.options.manager.OptionManager
"""
if not option_manager.registered_plugins:
# NOTE(sigmavirus24): Flake8 parses options twice. The first time, we
# will not have any registered plugins. We can skip this one and only
# take action on the second time we're called.
return
print(json.dumps(information(option_manager), indent=2, sort_keys=True))
raise SystemExit(False)
def information(option_manager):
"""Generate the information to be printed for the bug report."""
return {
'version': option_manager.version,
'plugins': plugins_from(option_manager),
'dependencies': dependencies(),
'platform': {
'python_implementation': platform.python_implementation(),
'python_version': platform.python_version(),
'system': platform.system(),
},
}
def plugins_from(option_manager):
"""Generate the list of plugins installed."""
return [{'plugin': plugin, 'version': version}
for (plugin, version) in sorted(option_manager.registered_plugins)]
def dependencies():
"""Generate the list of dependencies we care about."""
return [{'dependency': 'setuptools', 'version': setuptools.__version__}]
|
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/flake8/main/debug.py
|
Python
|
bsd-3-clause
| 2,017
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-01 23:15
import autoslug.fields
import common.utils
import datetime
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import open_humans.storage
import private_sharing.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('open_humans', '0003_auto_20151223_1827'),
('oauth2_provider', '__first__'),
('open_humans', '0004_member_badges'),
]
operations = [
migrations.CreateModel(
name='DataRequestProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_study', models.BooleanField(choices=[(True, 'Study'), (False, 'Activity')], help_text='A "study" is doing human subjects research and must have Institutional Review Board approval or equivalent ethics board oversight. Activities can be anything else, e.g. data visualizations.', verbose_name='Is this project a study or an activity?')),
('name', models.CharField(max_length=100, verbose_name='Project name')),
('leader', models.CharField(max_length=100, verbose_name='Leader(s) or principal investigator(s)')),
('organization', models.CharField(max_length=100, verbose_name='Organization or institution')),
('contact_email', models.EmailField(max_length=254, verbose_name='Contact email for your project')),
('info_url', models.URLField(verbose_name='URL for general information about your project')),
('short_description', models.CharField(max_length=140, verbose_name='A short description')),
('long_description', models.TextField(max_length=1000, verbose_name='A long description')),
('active', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active,\nit won\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.')),
('badge_image', models.ImageField(blank=True, help_text="A badge that will be displayed on the user's profile once they've connected your project.", max_length=1024, storage=open_humans.storage.PublicStorage(), upload_to=private_sharing.models.badge_upload_path)),
('request_sources_access', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to")),
('request_message_permission', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text='Permission to send messages to the member. This does not grant access to their email address.', verbose_name='Are you requesting permission to message users?')),
('request_username_access', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text="Access to the member's username. This implicitly enables access to anything the user is publicly sharing on Open Humans. Note that this is potentially sensitive and/or identifying.", verbose_name='Are you requesting Open Humans usernames?')),
('approved', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('api_access_secret', models.CharField(max_length=64)),
],
options={
'verbose_name_plural': 'Data request activities',
},
),
migrations.CreateModel(
name='DataRequestProjectMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id_code', models.CharField(max_length=16)),
('message_permission', models.BooleanField()),
('username_shared', models.BooleanField()),
('sources_shared', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None)),
('member', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member')),
],
),
migrations.CreateModel(
name='OAuth2DataRequestProject',
fields=[
('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')),
('enrollment_url', models.URLField(help_text="The URL we direct members to if they're interested in sharing data with your project.", verbose_name='Enrollment URL')),
('redirect_url', models.CharField(help_text='The return URL for our "authorization code" OAuth2 grant\n process. You can <a target="_blank" href="">read more about OAuth2\n "authorization code" transactions here</a>.', max_length=256, verbose_name='Redirect URL')),
('application', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)),
],
options={
'verbose_name': 'OAuth2 data request project',
},
bases=('private_sharing.datarequestproject',),
),
migrations.CreateModel(
name='OnSiteDataRequestProject',
fields=[
('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')),
('consent_text', models.TextField(help_text='The "informed consent" text that describes your project to Open Humans members.')),
('post_sharing_url', models.URLField(blank=True, help_text='If provided, after authorizing sharing the\nmember will be taken to this URL. If this URL includes "PROJECT_MEMBER_ID"\nwithin it, we will replace that with the member\'s project-specific\nproject_member_id. This allows you to direct them to an external survey you\noperate (e.g. using Google Forms) where a pre-filled project_member_id field\nallows you to connect those responses to corresponding data in Open Humans.', verbose_name='Post-sharing URL')),
],
options={
'verbose_name': 'On-site data request project',
},
bases=('private_sharing.datarequestproject',),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_members', to='private_sharing.DataRequestProject'),
),
migrations.AddField(
model_name='datarequestproject',
name='coordinator',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='open_humans.Member'),
),
migrations.AlterField(
model_name='datarequestproject',
name='long_description',
field=models.TextField(max_length=1000, verbose_name='A long description (1000 characters max)'),
),
migrations.AlterField(
model_name='datarequestproject',
name='short_description',
field=models.CharField(max_length=140, verbose_name='A short description (140 characters max)'),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member'),
),
migrations.RenameField(
model_name='datarequestprojectmember',
old_name='user_id_code',
new_name='project_member_id',
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='project_member_id',
field=models.CharField(max_length=16, unique=True),
),
migrations.AlterField(
model_name='datarequestproject',
name='request_sources_access',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to"),
),
migrations.AlterField(
model_name='datarequestproject',
name='active',
field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active, it\nwon\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.'),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 3, 4, 5, 14, 50, 931889, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='message_permission',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='sources_shared',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, size=None),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='username_shared',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='datarequestproject',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='revoked',
field=models.BooleanField(default=False),
),
migrations.AlterModelOptions(
name='datarequestproject',
options={},
),
migrations.AddField(
model_name='datarequestprojectmember',
name='authorized',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestproject',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', unique=True),
),
migrations.AddField(
model_name='datarequestproject',
name='is_academic_or_nonprofit',
field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=False, verbose_name='Is this institution or organization an academic institution or non-profit organization?'),
preserve_default=False,
),
migrations.AddField(
model_name='datarequestprojectmember',
name='consent_text',
field=models.TextField(blank=True),
),
migrations.RemoveField(
model_name='datarequestproject',
name='api_access_secret',
),
migrations.AddField(
model_name='datarequestproject',
name='master_access_token',
field=models.CharField(default=common.utils.generate_id, max_length=64),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='joined',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestproject',
name='request_sources_access',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to"),
),
migrations.AlterField(
model_name='datarequestproject',
name='organization',
field=models.CharField(blank=True, max_length=100, verbose_name='Organization or institution'),
),
migrations.AddField(
model_name='datarequestproject',
name='returned_data_description',
field=models.CharField(blank=True, help_text="Leave this blank if your project doesn't plan to add or return new data for your members.", max_length=140, verbose_name='Description of data you plan to upload to member accounts (140 characters max)'),
),
migrations.AlterField(
model_name='datarequestproject',
name='active',
field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active, it\nwon\'t show up in listings of activities that can be joined by participants, and\nnew data sharing authorizations cannot occur. Projects which are "active" but\nnot approved may have some information shared in an "In Development" section,\nso Open Humans members can see potential upcoming studies. Removing "active"\nstatus from a project will not remove any uploaded files from a project\nmember\'s profile.'),
),
migrations.AddField(
model_name='datarequestproject',
name='token_expiration_date',
field=models.DateTimeField(default=private_sharing.models.now_plus_24_hours),
),
migrations.AddField(
model_name='datarequestproject',
name='token_expiration_disabled',
field=models.BooleanField(default=False),
),
]
|
PersonalGenomesOrg/open-humans
|
private_sharing/migrations/0001_squashed_0034_auto_20160727_2138.py
|
Python
|
mit
| 14,678
| 0.002861
|
from setuptools import setup, find_packages
setup(
name="simple-crawler",
version="0.1",
url="https://github.com/shonenada/crawler",
author="shonenada",
author_email="shonenada@gmail.com",
description="Simple crawler",
zip_safe=True,
platforms="any",
packages=find_packages(),
install_requires=["requests==2.2.1"],
)
|
shonenada/crawler
|
setup.py
|
Python
|
mit
| 359
| 0
|
# -*- mode: python -*-
# -*- coding: iso8859-15 -*-
##############################################################################
#
# Gestion scolarite IUT
#
# Copyright (c) 2001 - 2006 Emmanuel Viennet. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Emmanuel Viennet emmanuel.viennet@viennet.net
#
##############################################################################
"""Basic User management
"""
???????????????
|
denys-duchier/Scolar
|
notes_users.py
|
Python
|
gpl-2.0
| 1,115
| 0
|
import numpy as np
from ss_generator import geometry
from . import basic
D_MEAN = 3.81
D_STD = 0.02
THETA_MEAN = np.radians(91.8)
THETA_STD = np.radians(3.35)
TAU_MEAN = np.radians(49.5)
TAU_STD = np.radians(7.1)
def theta_tau_to_rotation_matrix(theta, tau):
'''Get the rotation matrix corresponding to the
bond angle theta and dihedral tau.
'''
return np.dot(geometry.rotation_matrix_from_axis_and_angle(np.array([0, 1, 0]), tau),
geometry.rotation_matrix_from_axis_and_angle(np.array([0, 0, 1]), theta - np.pi))
def axis_to_theta_tau(axis):
'''Get the bond angle theta and dihedral tau,
from a rotation axis.
'''
theta = 2 * np.arctan(-axis[1] / axis[0])
tau = 2 * np.arctan(axis[0] / axis[2])
return theta, tau
def check_theta_tau(theta, tau):
'''Check that if the theta and tau are within the
3 * STD respectively.
'''
if theta > THETA_MEAN + 3 * THETA_STD or theta < THETA_MEAN - 3 * THETA_STD:
return False
if tau > TAU_MEAN + 3 * TAU_STD or tau < TAU_MEAN - 3 * TAU_STD:
return False
return True
def theta_tau_for_nexus(axis, axis_new):
'''Given an axis, find a pair of (theta, tau) such that
after rotating the coordinate frame by M(theta, tau), the
coordinates of the axis in the new frame is axis_new.
'''
vx1 = axis[0]
vy1 = axis[1]
vz1 = axis[2]
vx2 = axis_new[0]
vy2 = axis_new[1]
vz2 = axis_new[2]
# Calculate the tau angle
t = 1 / (vz2 + vz1) * (vx1 + np.sign(vx1) * np.sqrt(vx1 ** 2 - (vz2 ** 2 - vz1 ** 2)))
tau = 2 * np.arctan(t)
# Calculate the theta angle
s = np.sin(tau)
c = np.cos(tau)
q = 1 / (vx2 + s * vz1 - c * vx1) * (-vy1 \
- np.sign(vy1) * np.sqrt(vy1 ** 2 - (vx2 ** 2 - (s * vz1 - c * vx1) ** 2)))
theta = 2 * np.arctan(q)
return theta, tau
### Functions to generate a new helix
def generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=False, M_init=None):
'''Generate an alpha helix from a list of screw axes.
Return a list of Ca coordinates.
'''
thetas, taus, M_rot = get_theta_tau_and_rotation_matrix_from_screw_axes(
screw_axes, relieve_strain=relieve_strain, M_init=M_init)
ca_list = basic.generate_segment_from_internal_coordinates(
[D_MEAN] * (len(screw_axes) + 2), thetas, taus)
return [np.dot(M_rot, ca) for ca in ca_list]
def get_theta_tau_and_rotation_matrix_from_screw_axes(screw_axes, relieve_strain=False, M_init=None):
'''Get internal coordinates theta and tau from a list
of screw axes.
'''
# Get the rotation matrix from the default frame to the first local frame.
# Note that there are infinite number of possible matrices to do so.
if M_init is None:
axis_default = geometry.rotation_matrix_to_axis_and_angle(
theta_tau_to_rotation_matrix(THETA_MEAN, TAU_MEAN))[0]
M_init = geometry.rotation_matrix_to_superimpose_two_vectors(
axis_default, screw_axes[0], theta=np.random.uniform(-np.pi, np.pi))
# Get the internal coordinates
thetas = [THETA_MEAN] * 2
taus = [TAU_MEAN]
M_rot = np.dot(M_init, theta_tau_to_rotation_matrix(THETA_MEAN, TAU_MEAN))
for i in range(1, len(screw_axes)):
local_axis = np.dot(np.transpose(M_rot), screw_axes[i])
theta, tau = axis_to_theta_tau(local_axis)
# Relieve the strain
if relieve_strain and i % 7 == 0 and i + 1 < len(screw_axes):
next_axis = np.dot(np.transpose(M_rot), screw_axes[i + 1])
ideal_axis = geometry.rotation_matrix_to_axis_and_angle(
theta_tau_to_rotation_matrix(THETA_MEAN, TAU_MEAN))[0]
theta, tau = theta_tau_for_nexus(next_axis, ideal_axis)
if not check_theta_tau(theta, tau):
raise Exception("The value of theta or tau beyond the limits.")
M_local = theta_tau_to_rotation_matrix(theta, tau)
M_rot = np.dot(M_rot, M_local)
thetas.append(theta)
taus.append(tau)
return thetas, taus, M_init
def generate_super_coil(axis, omega, pitch_angle, length):
'''Generate a alpha helix super coil.
Return a list of Ca coordinates.
'''
axis = geometry.normalize(axis)
M_rot = geometry.rotation_matrix_from_axis_and_angle(axis, omega)
# Get the screw axes
axis_perpendicular = None
if np.abs(axis[0]) > 0.01:
axis_perpendicular = geometry.normalize(
np.array([axis[1], -axis[0], 0]))
else:
axis_perpendicular = geometry.normalize(
np.array([0, axis[2], -axis[1]]))
screw_seed = np.dot(geometry.rotation_matrix_from_axis_and_angle(
axis_perpendicular, pitch_angle), axis)
screw_axes = [screw_seed]
for i in range(1, length):
screw_axes.append(np.dot(M_rot, screw_axes[i - 1]))
# Generate the helix
return generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=True)
### Functions to perturb an existing helix
def randomize_a_helix(ca_list, ratio):
'''Randomize internal coordinates of a helix. Only int(ratio * len(ca_list))
residues are perturbed.
'''
ds, thetas, taus = basic.get_internal_coordinates_from_ca_list(ca_list)
num_to_perturb = int(ratio * len(ca_list))
res_to_perturb = np.random.permutation(len(ca_list) - 3)[:num_to_perturb]
for i in res_to_perturb:
theta = np.random.normal(THETA_MEAN, THETA_STD)
tau = np.random.normal(TAU_MEAN, TAU_STD)
if check_theta_tau(theta, tau):
thetas[i] = theta
taus[i] = tau
perturbed_ca_list = basic.generate_segment_from_internal_coordinates(ds, thetas, taus)
# Superimpose the perturbed ca list to the old list
M, t = geometry.get_superimpose_transformation(perturbed_ca_list, ca_list)
perturbed_ca_list = [ np.dot(M, p) + t for p in perturbed_ca_list]
return perturbed_ca_list
def shift_helix_phase(ca_list, phase_shift):
'''Shift the phase of a helix without changing
it's direction.
'''
# Get the screw axes
screw_axes = []
for i in range(1, len(ca_list) - 2):
M1 = geometry.create_frame_from_three_points(
ca_list[i - 1], ca_list[i], ca_list[i + 1])
M2 = geometry.create_frame_from_three_points(
ca_list[i], ca_list[i + 1], ca_list[i + 2])
screw_axes.append(geometry.rotation_matrix_to_axis_and_angle(
np.dot(np.transpose(M2), M1))[0])
# Get the initial rotation matrix for helix generation
M1 = geometry.create_frame_from_three_points(
ca_list[0], ca_list[1], ca_list[2])
M_init = np.dot(geometry.rotation_matrix_from_axis_and_angle(
screw_axes[0], phase_shift), np.transpose(M1))
# Calculate the Ca coordinates
shifted_ca_list = generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=True, M_init=M_init)
t = np.mean(ca_list, axis=0) - np.mean(shifted_ca_list, axis=0)
for i in range(len(shifted_ca_list)):
shifted_ca_list[i] = shifted_ca_list[i] + t
return shifted_ca_list
def twist_helix(ca_list, axis, pitch_angle, omega, ratio):
'''Twist a helix, making it closer to a super coil who is defined
by axis, pitch_angle and omega. int(ratio * len(ca_list)) minimum
twist units (each with 6 residues) are perturbed.
'''
ds, thetas, taus = basic.get_internal_coordinates_from_ca_list(ca_list)
M_init = geometry.create_frame_from_three_points(
ca_list[0], ca_list[1], ca_list[2])
# Get residues to perturb
num_to_perturb = int(ratio * len(ca_list))
res_to_perturb = sorted(np.random.permutation(len(taus) - 2)[:num_to_perturb])
# Get the perturbed internal coordinates
for i in range(len(taus)):
if i in res_to_perturb:
new_thetas, new_taus = twist_minimum_unit(thetas[i + 1: i + 4],
taus[i: i + 3], M_init, axis, pitch_angle, omega)
if new_thetas is not None:
for j in range(3):
thetas[i + 1 + j] = new_thetas[j]
taus[i + j] = new_taus[j]
M_init = np.dot(M_init, theta_tau_to_rotation_matrix(thetas[i + 1], taus[i]))
# Get new ca coordinates
new_ca_list = basic.generate_segment_from_internal_coordinates(ds, thetas, taus)
M, t = geometry.get_superimpose_transformation(new_ca_list, ca_list)
new_ca_list = [np.dot(M, p) + t for p in new_ca_list]
return new_ca_list
def twist_minimum_unit(thetas, taus, M_init, axis, pitch_angle, omega):
'''Twist a minimum twist unit.
Return new values for thetas and taus. Return None if the twist failed.
'''
if len(thetas) != 3 or len(taus) != 3:
raise Exception("A minimum twist unit must have 3 angles and 3 torsions!")
screw_axes = []
# Get the new value of the first axis
axis1_local = geometry.rotation_matrix_to_axis_and_angle(
theta_tau_to_rotation_matrix(thetas[0], taus[0]))[0]
axis1_global = np.dot(M_init, axis1_local)
angle_to_rotate = pitch_angle - geometry.angle(axis, axis1_global)
rotate_axis = geometry.normalize(np.cross(axis, axis1_global))
for i in range(1, 5):
M_rot = geometry.rotation_matrix_from_axis_and_angle(
rotate_axis, angle_to_rotate)
new_axis_global = np.dot(M_rot, axis1_global)
new_axis_local = np.dot(np.transpose(M_init), new_axis_global)
theta, tau = axis_to_theta_tau(new_axis_local)
if check_theta_tau(theta, tau):
screw_axes.append(new_axis_global)
break
angle_to_rotate /= 2
if len(screw_axes) == 0: return None, None
# Get the new screw axes
M_rot = geometry.rotation_matrix_from_axis_and_angle(axis, omega)
for i in range(1, 3):
screw_axes.append(np.dot(M_rot, screw_axes[i - 1]))
# Get the internal coordinates
try:
thetas, taus, M = get_theta_tau_and_rotation_matrix_from_screw_axes(
screw_axes, M_init=M_init)
except:
return None, None
return thetas[1:], taus
def thread_backbone_for_helix(ca_list):
'''Thread backbone atoms for a ca list of a helix using
the method and parameters from the G. Grigoryan, W. F. DeGrado paper.
Return a list of residue dictionaries.
'''
# Make the N termial residue
residue_list = [{'ca': ca_list[0],
'n': geometry.cartesian_coord_from_internal_coord(ca_list[2],
ca_list[1], ca_list[0], 1.45, np.radians(95.0), np.radians(65.0)),
'c': geometry.cartesian_coord_from_internal_coord(ca_list[2],
ca_list[1], ca_list[0], 1.52, np.radians(21.0), np.radians(-79.0))}]
# Make middle residues
for i in range(1, len(ca_list) - 1):
residue_list.append({'ca': ca_list[i],
'n': geometry.cartesian_coord_from_internal_coord(ca_list[i - 1],
ca_list[i + 1], ca_list[i], 1.45, np.radians(95.0), np.radians(14.0)),
'c': geometry.cartesian_coord_from_internal_coord(ca_list[i + 1],
ca_list[i - 1], ca_list[i], 1.52, np.radians(104.0), np.radians(16.0))})
# Make the N terminal residue
residue_list.append({'ca': ca_list[-1],
'n': geometry.cartesian_coord_from_internal_coord(ca_list[-3],
ca_list[-2], ca_list[-1], 1.45, np.radians(15.0), np.radians(-56.0)),
'c': geometry.cartesian_coord_from_internal_coord(ca_list[-3],
ca_list[-2], ca_list[-1], 1.52, np.radians(104.0), np.radians(67.0))})
# Buil O atoms
for i in range(1, len(ca_list)):
residue_list[i - 1]['o'] = basic.get_o_for_peptide_bond(
residue_list[i - 1]['c'], residue_list[i]['n'], residue_list[i]['ca'])
return residue_list
|
xingjiepan/ss_generator
|
ss_generator/ca_tracing/alpha_helix.py
|
Python
|
bsd-3-clause
| 11,909
| 0.005458
|
from common.models import *
from common.localization import txt, verbose_names
@verbose_names
class Patient(models.Model):
# private
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
GENDER = (
(txt('M'), txt('male')),
(txt('F'), txt('female'))
)
gender = models.CharField(max_length=1, choices=GENDER)
BLOOD_TYPE = (
(txt('0Rh-'), txt('0Rh-')),
(txt('0Rh+'), txt('0Rh+')),
(txt('ARh-'), txt('ARh-')),
(txt('ARh+'), txt('ARh+')),
(txt('BRh-'), txt('BRh-')),
(txt('BRh+'), txt('BRh+')),
(txt('ABR-'), txt('ABRh-')),
(txt('ABR+'), txt('ABRh+')),
)
blood_type = models.CharField(max_length=4, choices=BLOOD_TYPE, blank=True, null=True)
birth_date = models.DateField()
pesel = PESELField()
# address
country = models.CharField(max_length=80, default="Polska")
city = models.CharField(max_length=80)
address = models.CharField(max_length=80, blank=True, null=True)
# mailing_address
mailing_country = models.CharField(max_length=80, blank=True, null=True)
mailing_city = models.CharField(max_length=80, blank=True, null=True)
mailing_address = models.CharField(max_length=80, blank=True, null=True)
# work
job = models.CharField(max_length=80, blank=True, null=True)
workplace = models.CharField(max_length=80, blank=True, null=True)
# contact
cell_phone = models.CharField(max_length=80, blank=True, null=True)
landline_phone = models.CharField(max_length=80, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
# injury info
date_of_injury = models.DateField()
time_of_injury = models.TimeField(blank=True, null=True)
date_of_operation = models.DateField(blank=True, null=True)
time_of_operation = models.TimeField(blank=True, null=True)
additional_notes = AdditionalNotesField(blank=True, null=True)
def __str__(self):
return "{0} {1}".format(self.first_name, self.last_name)
class Meta:
ordering = ('last_name', 'first_name')
|
wesolutki/voter
|
auth/models.py
|
Python
|
gpl-3.0
| 2,125
| 0.000471
|
# Generated by Django 2.2.15 on 2020-11-24 06:44
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("assignments", "0015_assignmentvote_delegated_user"),
]
operations = [
migrations.AddField(
model_name="assignmentpoll",
name="db_amount_global_yes",
field=models.DecimalField(
blank=True,
decimal_places=6,
default=Decimal("0"),
max_digits=15,
null=True,
validators=[django.core.validators.MinValueValidator(Decimal("-2"))],
),
),
migrations.AddField(
model_name="assignmentpoll",
name="global_yes",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("votes", "Yes per candidate"),
("N", "No per candidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="onehundred_percent_base",
field=models.CharField(
choices=[
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
("Y", "Sum of votes including general No/Abstain"),
("valid", "All valid ballots"),
("cast", "All casted ballots"),
("disabled", "Disabled (no percents)"),
],
max_length=8,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("Y", "Yes per candidate"),
("N", "No per candidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
]
|
FinnStutzenstein/OpenSlides
|
server/openslides/assignments/migrations/0016_negative_votes.py
|
Python
|
mit
| 2,395
| 0.000418
|
# -*- coding: utf-8 -*-
'''
Created on 25 April 2014
@author: Kimon Tsitsikas
Copyright © 2013-2014 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
from concurrent import futures
import logging
import math
from odemis import model
import odemis
from odemis import acq
from odemis.acq import align, stream
from odemis.dataio import hdf5
from odemis.driver.actuator import ConvertStage
from odemis.util import test
import os
import threading
import time
import unittest
from unittest.case import skip
import weakref
logging.basicConfig(format="%(asctime)s %(levelname)-7s %(module)-15s: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
CONFIG_PATH = os.path.dirname(odemis.__file__) + "/../../install/linux/usr/share/odemis/"
SECOM_LENS_CONFIG = CONFIG_PATH + "sim/secom-sim-lens-align.odm.yaml" # 4x4
class TestAlignment(unittest.TestCase):
"""
Test Spot Alignment functions
"""
backend_was_running = False
@classmethod
def setUpClass(cls):
try:
test.start_backend(SECOM_LENS_CONFIG)
except LookupError:
logging.info("A running backend is already found, skipping tests")
cls.backend_was_running = True
return
except IOError as exp:
logging.error(str(exp))
raise
# find components by their role
cls.ebeam = model.getComponent(role="e-beam")
cls.sed = model.getComponent(role="se-detector")
cls.ccd = model.getComponent(role="ccd")
cls.focus = model.getComponent(role="focus")
cls.align = model.getComponent(role="align")
cls.light = model.getComponent(role="light")
cls.light_filter = model.getComponent(role="filter")
cls.stage = model.getComponent(role="stage")
# Used for OBJECTIVE_MOVE type
cls.aligner_xy = ConvertStage("converter-ab", "stage",
children={"orig": cls.align},
axes=["b", "a"],
rotation=math.radians(45))
@classmethod
def tearDownClass(cls):
if cls.backend_was_running:
return
test.stop_backend()
def setUp(self):
if self.backend_was_running:
self.skipTest("Running backend found")
# image for FakeCCD
self.data = hdf5.read_data("../align/test/one_spot.h5")
C, T, Z, Y, X = self.data[0].shape
self.data[0].shape = Y, X
self.fake_img = self.data[0]
# @skip("skip")
def test_spot_alignment(self):
"""
Test AlignSpot
"""
escan = self.ebeam
ccd = self.ccd
focus = self.focus
f = align.AlignSpot(ccd, self.aligner_xy, escan, focus)
dist, vector = f.result()
self.assertAlmostEqual(dist, 2.41e-05)
# @skip("faster")
def test_spot_alignment_cancelled(self):
"""
Test AlignSpot cancellation
"""
escan = self.ebeam
ccd = self.ccd
focus = self.focus
f = align.AlignSpot(ccd, self.aligner_xy, escan, focus)
time.sleep(0.01) # Cancel almost after the half grid is scanned
f.cancel()
self.assertTrue(f.cancelled())
self.assertTrue(f.done())
with self.assertRaises(futures.CancelledError):
f.result()
def on_done(self, future):
self.done += 1
def on_progress_update(self, future, past, left):
self.past = past
self.left = left
self.updates += 1
def test_aligned_stream(self):
"""
Test the AlignedSEMStream
"""
# Use fake ccd in order to have just one spot
ccd = FakeCCD(self, self.align)
# first try using the metadata correction
st = stream.AlignedSEMStream("sem-md", self.sed, self.sed.data, self.ebeam,
ccd, self.stage, self.focus, shiftebeam=stream.MTD_MD_UPD)
# we don't really care about the SEM image, so the faster the better
self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]
# start one image acquisition (so it should do the calibration)
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 30 s")
# Check the correction metadata is there
md = self.sed.getMetadata()
self.assertIn(model.MD_POS_COR, md)
# Check the position of the image is correct
pos_cor = md[model.MD_POS_COR]
pos_dict = self.stage.position.value
pos = (pos_dict["x"], pos_dict["y"])
exp_pos = tuple(p - c for p, c in zip(pos, pos_cor))
imd = received[0].metadata
self.assertEqual(exp_pos, imd[model.MD_POS])
# Check the calibration doesn't happen again on a second acquisition
bad_cor = (-1, -1) # stupid impossible value
self.sed.updateMetadata({model.MD_POS_COR: bad_cor})
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 10 s")
# if calibration has happened (=bad), it has changed the metadata
md = self.sed.getMetadata()
self.assertEqual(bad_cor, md[model.MD_POS_COR],
"metadata has been updated while it shouldn't have")
# Check calibration happens again after a stage move
f = self.stage.moveRel({"x": 100e-6})
f.result() # make sure the move is over
time.sleep(0.1) # make sure the stream had time to detect position has changed
received = st.image.value
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 30 s")
# if calibration has happened (=good), it has changed the metadata
md = self.sed.getMetadata()
self.assertNotEqual(bad_cor, md[model.MD_POS_COR],
"metadata hasn't been updated while it should have")
class FakeCCD(model.HwComponent):
"""
Fake CCD component that returns a spot image
"""
def __init__(self, testCase, align):
super(FakeCCD, self).__init__("testccd", "ccd")
self.testCase = testCase
self.align = align
self.exposureTime = model.FloatContinuous(1, (1e-6, 1000), unit="s")
self.binning = model.TupleContinuous((1, 1), [(1, 1), (8, 8)],
cls=(int, long, float), unit="")
self.resolution = model.ResolutionVA((2160, 2560), [(1, 1), (2160, 2560)])
self.data = CCDDataFlow(self)
self._acquisition_thread = None
self._acquisition_lock = threading.Lock()
self._acquisition_init_lock = threading.Lock()
self._acquisition_must_stop = threading.Event()
self.fake_img = self.testCase.fake_img
def start_acquire(self, callback):
with self._acquisition_lock:
self._wait_acquisition_stopped()
target = self._acquire_thread
self._acquisition_thread = threading.Thread(target=target,
name="FakeCCD acquire flow thread",
args=(callback,))
self._acquisition_thread.start()
def stop_acquire(self):
with self._acquisition_lock:
with self._acquisition_init_lock:
self._acquisition_must_stop.set()
def _wait_acquisition_stopped(self):
"""
Waits until the acquisition thread is fully finished _iff_ it was requested
to stop.
"""
# "if" is to not wait if it's already finished
if self._acquisition_must_stop.is_set():
logging.debug("Waiting for thread to stop.")
self._acquisition_thread.join(10) # 10s timeout for safety
if self._acquisition_thread.isAlive():
logging.exception("Failed to stop the acquisition thread")
# Now let's hope everything is back to normal...
# ensure it's not set, even if the thread died prematurely
self._acquisition_must_stop.clear()
def _simulate_image(self):
"""
Generates the fake output.
"""
with self._acquisition_lock:
self.fake_img.metadata[model.MD_ACQ_DATE] = time.time()
output = model.DataArray(self.fake_img, self.fake_img.metadata)
return self.fake_img
def _acquire_thread(self, callback):
"""
Thread that simulates the CCD acquisition.
"""
try:
while not self._acquisition_must_stop.is_set():
# dummy
duration = 1
if self._acquisition_must_stop.wait(duration):
break
callback(self._simulate_image())
except:
logging.exception("Unexpected failure during image acquisition")
finally:
logging.debug("Acquisition thread closed")
self._acquisition_must_stop.clear()
class CCDDataFlow(model.DataFlow):
"""
This is an extension of model.DataFlow. It receives notifications from the
FakeCCD component once the fake output is generated. This is the dataflow to
which the CCD acquisition streams subscribe.
"""
def __init__(self, ccd):
model.DataFlow.__init__(self)
self.component = weakref.ref(ccd)
def start_generate(self):
try:
self.component().start_acquire(self.notify)
except ReferenceError:
pass
def stop_generate(self):
try:
self.component().stop_acquire()
except ReferenceError:
pass
if __name__ == '__main__':
# suite = unittest.TestLoader().loadTestsFromTestCase(TestAlignment)
# unittest.TextTestRunner(verbosity=2).run(suite)
unittest.main()
|
gstiebler/odemis
|
src/odemis/acq/test/spot_alignment_test.py
|
Python
|
gpl-2.0
| 10,396
| 0.002982
|
#!/usr/bin/env python3
"""
Created on 15 Aug 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Note: this script uses the Pt1000 temp sensor for temperature compensation.
"""
import time
from scs_core.data.json import JSONify
from scs_core.gas.afe_baseline import AFEBaseline
from scs_core.gas.afe_calib import AFECalib
from scs_core.gas.afe.pt1000_calib import Pt1000Calib
from scs_dfe.gas.afe.afe import AFE
from scs_dfe.gas.afe.pt1000 import Pt1000
from scs_dfe.interface.interface_conf import InterfaceConf
from scs_host.bus.i2c import I2C
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
try:
I2C.Sensors.open()
interface_conf = InterfaceConf.load(Host)
print(interface_conf)
print("-")
interface = interface_conf.interface()
print(interface)
print("-")
pt1000_calib = Pt1000Calib.load(Host)
print(pt1000_calib)
print("-")
pt1000 = Pt1000(pt1000_calib)
print(pt1000)
print("-")
afe_calib = AFECalib.load(Host)
print(afe_calib)
print("-")
afe_baseline = AFEBaseline.load(Host)
print(afe_baseline)
print("-")
sensors = afe_calib.sensors(afe_baseline)
print('\n\n'.join(str(sensor) for sensor in sensors))
print("-")
# ----------------------------------------------------------------------------------------------------------------
afe = AFE(interface, pt1000, sensors)
print(afe)
print("-")
start_time = time.time()
temp = afe.sample_pt1000()
elapsed = time.time() - start_time
print(temp)
print("elapsed:%0.3f" % elapsed)
print("-")
start_time = time.time()
sample = afe.sample_station(1)
elapsed = time.time() - start_time
print("SN1: %s" % sample)
print("elapsed:%0.3f" % elapsed)
print("-")
start_time = time.time()
sample = afe.sample_station(4)
elapsed = time.time() - start_time
print("SN4: %s" % sample)
print("elapsed:%0.3f" % elapsed)
print("=")
start_time = time.time()
samples = afe.sample()
elapsed = time.time() - start_time
print(samples)
print("elapsed:%0.3f" % elapsed)
print("-")
jstr = JSONify.dumps(samples)
print(jstr)
print("-")
finally:
I2C.Sensors.close()
|
south-coast-science/scs_dfe_eng
|
tests/gas/afe/afe_test.py
|
Python
|
mit
| 2,350
| 0.000426
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 01:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170819_2342'),
]
operations = [
migrations.RemoveField(
model_name='tag',
name='articles',
),
]
|
rcatlin/ryancatlin-info
|
Api/app/migrations/0003_remove_tag_articles.py
|
Python
|
mit
| 388
| 0
|
from django.db import models
from django.core.validators import validate_email, validate_slug, validate_ipv46_address
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from ava.core.models import TimeStampedModel
from ava.core_group.models import Group
from ava.core_identity.validators import validate_skype, validate_twitter
class Identity(TimeStampedModel):
# An identity is an online persona that can map to a single person, a group
# of people, or an automated service.
GROUP = 'GROUP'
PERSON = 'PERSON'
IDENTITY_TYPE_CHOICES = (
(GROUP, 'Group'),
(PERSON, 'Person'),
)
name = models.CharField(max_length=100, verbose_name='Name', null=True, blank=True)
description = models.TextField(max_length=500, verbose_name='Description', null=True, blank=True)
identity_type = models.CharField(max_length=10,
choices=IDENTITY_TYPE_CHOICES,
default=PERSON,
verbose_name='Identity Type')
groups = models.ManyToManyField(Group,
blank=True,
related_name='identities')
def __str__(self):
return self.name or ''
def get_absolute_url(self):
return reverse('identity-detail', kwargs={'pk': self.id})
class Meta:
verbose_name = 'identity'
verbose_name_plural = 'identities'
ordering = ['name']
class Person(TimeStampedModel):
first_name = models.CharField(max_length=75, validators=[validate_slug])
surname = models.CharField(max_length=75, validators=[validate_slug])
identity = models.ManyToManyField('Identity', blank=True)
def __str__(self):
return (self.first_name + " " + self.surname).strip() or ''
def get_absolute_url(self):
return reverse('person-detail', kwargs={'pk': self.id})
class Meta:
verbose_name = 'person'
verbose_name_plural = 'people'
ordering = ['surname', 'first_name']
class Identifier(TimeStampedModel):
"""
TODO: DocString
"""
EMAIL = 'EMAIL'
SKYPE = 'SKYPE'
IP = 'IPADD'
UNAME = 'UNAME'
TWITTER = 'TWITTER'
NAME = 'NAME'
IDENTIFIER_TYPE_CHOICES = (
(EMAIL, 'Email Address'),
(SKYPE, 'Skype ID'),
(IP, 'IP Address'),
(UNAME, 'Username'),
(TWITTER, 'Twitter ID'),
(NAME, 'Other name'),
)
identifier = models.CharField(max_length=100)
identifier_type = models.CharField(max_length=10,
choices=IDENTIFIER_TYPE_CHOICES,
default=EMAIL,
verbose_name='Identifier Type')
identity = models.ForeignKey('Identity', related_name='identifiers')
def __str__(self):
return self.identifier or ''
def get_absolute_url(self):
return reverse('identifier-detail', kwargs={'pk': self.id})
def clean(self):
if self.identifier_type is 'EMAIL':
try:
validate_email(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid email address')
if self.identifier_type is 'IPADD':
try:
validate_ipv46_address(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid IPv4/IPv6 address')
if self.identifier_type is 'UNAME' or self.identifier_type is 'NAME':
try:
validate_slug(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid username or name')
if self.identifier_type is 'SKYPE':
try:
validate_skype(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid Skype user name')
if self.identifier_type is 'TWITTER':
try:
validate_twitter(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid Twitter user name')
class Meta:
unique_together = ("identifier", "identifier_type", "identity")
ordering = ['identifier', 'identifier_type']
|
cnbird1999/ava
|
ava/core_identity/models.py
|
Python
|
gpl-2.0
| 4,406
| 0.001816
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
from testtools import matchers
import nova.conf
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
CONF = nova.conf.CONF
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
}
_volume_id = "6edbc2f4-1507-44f8-ac0d-eed1d2608d38"
_instance_uuid = "f4fda93b-06e0-4743-8117-bc8bcecd651b"
_instance_uuid_2 = "f4fda93b-06e0-4743-8117-bc8bcecd651c"
_attachment_id = "3b4db356-253d-4fab-bfa0-e3626c0b8405"
_attachment_id_2 = "3b4db356-253d-4fab-bfa0-e3626c0b8406"
_device = "/dev/vdb"
_device_2 = "/dev/vdc"
_volume_attachment = \
[{"server_id": _instance_uuid,
"attachment_id": _attachment_id,
"host_name": "",
"volume_id": _volume_id,
"device": _device,
"id": _volume_id
}]
_volume_attachment_2 = _volume_attachment
_volume_attachment_2.append({"server_id": _instance_uuid_2,
"attachment_id": _attachment_id_2,
"host_name": "",
"volume_id": _volume_id,
"device": _device_2,
"id": _volume_id})
exp_volume_attachment = collections.OrderedDict()
exp_volume_attachment[_instance_uuid] = {'attachment_id': _attachment_id,
'mountpoint': _device}
exp_volume_attachment_2 = exp_volume_attachment
exp_volume_attachment_2[_instance_uuid_2] = {'attachment_id': _attachment_id_2,
'mountpoint': _device_2}
class BaseCinderTestCase(object):
def setUp(self):
super(BaseCinderTestCase, self).setUp()
cinder.reset_globals()
self.requests = self.useFixture(fixture.Fixture())
self.api = cinder.API()
self.context = context.RequestContext('username',
'project_id',
auth_token='token',
service_catalog=self.CATALOG)
def flags(self, *args, **kwargs):
super(BaseCinderTestCase, self).flags(*args, **kwargs)
cinder.reset_globals()
def create_client(self):
return cinder.cinderclient(self.context)
def test_context_with_catalog(self):
self.assertEqual(self.URL, self.create_client().client.get_endpoint())
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.assertEqual(retries, self.create_client().client.connect_retries)
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(insecure=True, group='cinder')
self.assertFalse(self.create_client().client.session.verify)
def test_cinder_http_timeout(self):
timeout = 123
self.flags(timeout=timeout, group='cinder')
self.assertEqual(timeout, self.create_client().client.session.timeout)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(cafile=cacert, group='cinder')
self.assertEqual(cacert, self.create_client().client.session.verify)
class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v1 api."""
URL = "http://localhost:8776/v1/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinderv2",
"endpoints": [{"publicURL": URL}]
}]
def create_client(self):
c = super(CinderTestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v1.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'display_name': None,
'display_description': None,
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
"id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true",
"multiattach": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v1/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v1/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
m = self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertThat(m.last_request.path,
matchers.EndsWith('/volumes/5678'))
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v2 api."""
URL = "http://localhost:8776/v2/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinder",
"endpoints": [{"publicURL": URL}]
}]
def setUp(self):
super(CinderV2TestCase, self).setUp()
CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.addCleanup(CONF.reset)
def create_client(self):
c = super(CinderV2TestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v2.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'name': None,
'description': None,
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true",
"multiattach": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v2/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v2/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
def test_volume_without_attachment(self):
v = self.stub_volume(id='1234')
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIsNone(volume.get('attachments'))
def test_volume_with_one_attachment(self):
v = self.stub_volume(id='1234', attachments=_volume_attachment)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('attachments', volume)
self.assertEqual(exp_volume_attachment, volume['attachments'])
def test_volume_with_two_attachments(self):
v = self.stub_volume(id='1234', attachments=_volume_attachment_2)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('attachments', volume)
self.assertEqual(exp_volume_attachment_2, volume['attachments'])
|
sebrandon1/nova
|
nova/tests/unit/test_cinder.py
|
Python
|
apache-2.0
| 9,333
| 0.000107
|
# SPDX-License-Identifier: LGPL-3.0-only
"""Base classes and decorators for the doorstop.core package."""
import abc
import functools
import os
from typing import Dict
import yaml
from doorstop import common, settings
from doorstop.common import DoorstopError, DoorstopInfo, DoorstopWarning
log = common.logger(__name__)
def add_item(func):
"""Add and cache the returned item."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
item = func(self, *args, **kwargs) or self
if settings.ADDREMOVE_FILES and item.tree:
item.tree.vcs.add(item.path)
# pylint: disable=W0212
if item not in item.document._items:
item.document._items.append(item)
if settings.CACHE_ITEMS and item.tree:
item.tree._item_cache[item.uid] = item
log.trace("cached item: {}".format(item)) # type: ignore
return item
return wrapped
def edit_item(func):
"""Mark the returned item as modified."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
item = func(self, *args, **kwargs) or self
if settings.ADDREMOVE_FILES and item.tree:
item.tree.vcs.edit(item.path)
return item
return wrapped
def delete_item(func):
"""Remove and expunge the returned item."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
item = func(self, *args, **kwargs) or self
if settings.ADDREMOVE_FILES and item.tree:
item.tree.vcs.delete(item.path)
# pylint: disable=W0212
if item in item.document._items:
item.document._items.remove(item)
if settings.CACHE_ITEMS and item.tree:
item.tree._item_cache[item.uid] = None
log.trace("expunged item: {}".format(item)) # type: ignore
BaseFileObject.delete(item, item.path)
return item
return wrapped
def add_document(func):
"""Add and cache the returned document."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
document = func(self, *args, **kwargs) or self
if settings.ADDREMOVE_FILES and document.tree:
document.tree.vcs.add(document.config)
# pylint: disable=W0212
if settings.CACHE_DOCUMENTS and document.tree:
document.tree._document_cache[document.prefix] = document
log.trace("cached document: {}".format(document)) # type: ignore
return document
return wrapped
def edit_document(func):
"""Mark the returned document as modified."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
document = func(self, *args, **kwargs) or self
if settings.ADDREMOVE_FILES and document.tree:
document.tree.vcs.edit(document.config)
return document
return wrapped
def delete_document(func):
"""Remove and expunge the returned document."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
document = func(self, *args, **kwargs) or self
if settings.ADDREMOVE_FILES and document.tree:
document.tree.vcs.delete(document.config)
# pylint: disable=W0212
if settings.CACHE_DOCUMENTS and document.tree:
document.tree._document_cache[document.prefix] = None
log.trace("expunged document: {}".format(document)) # type: ignore
try:
os.rmdir(document.path)
except OSError:
# Directory wasn't empty
pass
return document
return wrapped
class BaseValidatable(metaclass=abc.ABCMeta):
"""Abstract Base Class for objects that can be validated."""
def validate(self, skip=None, document_hook=None, item_hook=None):
"""Check the object for validity.
:param skip: list of document prefixes to skip
:param document_hook: function to call for custom document
validation
:param item_hook: function to call for custom item validation
:return: indication that the object is valid
"""
valid = True
# Display all issues
for issue in self.get_issues(
skip=skip, document_hook=document_hook, item_hook=item_hook
):
if isinstance(issue, DoorstopInfo) and not settings.WARN_ALL:
log.info(issue)
elif isinstance(issue, DoorstopWarning) and not settings.ERROR_ALL:
log.warning(issue)
else:
assert isinstance(issue, DoorstopError)
log.error(issue)
valid = False
# Return the result
return valid
@abc.abstractmethod
def get_issues(self, skip=None, document_hook=None, item_hook=None):
"""Yield all the objects's issues.
:param skip: list of document prefixes to skip
:param document_hook: function to call for custom document
validation
:param item_hook: function to call for custom item validation
:return: generator of :class:`~doorstop.common.DoorstopError`,
:class:`~doorstop.common.DoorstopWarning`,
:class:`~doorstop.common.DoorstopInfo`
"""
@property
def issues(self):
"""Get a list of the item's issues."""
return list(self.get_issues())
def auto_load(func):
"""Call self.load() before execution."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
self.load()
return func(self, *args, **kwargs)
return wrapped
def auto_save(func):
"""Call self.save() after execution."""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if self.auto:
self.save()
return result
return wrapped
class BaseFileObject(metaclass=abc.ABCMeta):
"""Abstract Base Class for objects whose attributes save to a file.
For properties that are saved to a file, decorate their getters
with :func:`auto_load` and their setters with :func:`auto_save`.
"""
auto = True # set to False to delay automatic save until explicit save
def __init__(self):
self.path = None
self.root = None
self._data: Dict[str, str] = {}
self._exists = True
self._loaded = False
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.path == other.path
def __ne__(self, other):
return not self == other
@staticmethod
def _create(path, name):
"""Create a new file for the object.
:param path: path to new file
:param name: humanized name for this file
:raises: :class:`~doorstop.common.DoorstopError` if the file
already exists
"""
if os.path.exists(path):
raise DoorstopError("{} already exists: {}".format(name, path))
common.create_dirname(path)
common.touch(path)
@abc.abstractmethod
def load(self, reload=False):
"""Load the object's properties from its file."""
# 1. Start implementations of this method with:
if self._loaded and not reload:
return
# 2. Call self._read() and update properties here
# 3. End implementations of this method with:
self._loaded = True
def _read(self, path):
"""Read text from the object's file.
:param path: path to a text file
:return: contexts of text file
"""
if not self._exists:
msg = "cannot read from deleted: {}".format(self.path)
raise DoorstopError(msg)
return common.read_text(path)
@staticmethod
def _load(text, path, **kwargs):
"""Load YAML data from text.
:param text: text read from a file
:param path: path to the file (for displaying errors)
:return: dictionary of YAML data
"""
return common.load_yaml(text, path, **kwargs)
@abc.abstractmethod
def save(self):
"""Format and save the object's properties to its file."""
# 1. Call self._write() with the current properties here
# 2. End implementations of this method with:
self._loaded = False
self.auto = True
def _write(self, text, path):
"""Write text to the object's file.
:param text: text to write to a file
:param path: path to the file
"""
if not self._exists:
raise DoorstopError("cannot save to deleted: {}".format(self))
common.write_text(text, path)
@staticmethod
def _dump(data):
"""Dump YAML data to text.
:param data: dictionary of YAML data
:return: text to write to a file
"""
return yaml.dump(data, default_flow_style=False, allow_unicode=True)
# properties #############################################################
@property
def relpath(self):
"""Get the item's relative path string."""
assert self.path
relpath = os.path.relpath(self.path, self.root)
return "@{}{}".format(os.sep, relpath)
# extended attributes ####################################################
@property # type: ignore
@auto_load
def extended(self):
"""Get a list of all extended attribute names."""
names = []
for name in self._data:
if not hasattr(self, name):
names.append(name)
return sorted(names)
@auto_load
def get(self, name, default=None):
"""Get an extended attribute.
:param name: name of extended attribute
:param default: value to return for missing attributes
:return: value of extended attribute
"""
if hasattr(self, name):
cname = self.__class__.__name__
msg = "'{n}' can be accessed from {c}.{n}".format(n=name, c=cname)
log.trace(msg) # type: ignore
return getattr(self, name)
else:
return self._data.get(name, default)
@auto_load
@auto_save
def set(self, name, value):
"""Set an extended attribute.
:param name: name of extended attribute
:param value: value to set
"""
if hasattr(self, name):
cname = self.__class__.__name__
msg = "'{n}' can be set from {c}.{n}".format(n=name, c=cname)
log.trace(msg) # type: ignore
setattr(self, name, value)
else:
self._data[name] = value
# actions ################################################################
def delete(self, path):
"""Delete the object's file from the file system."""
if self._exists:
log.info("deleting {}...".format(path))
common.delete(path)
self._loaded = False # force the object to reload
self._exists = False # but, prevent future access
else:
log.warning("already deleted: {}".format(self))
|
jacebrowning/doorstop
|
doorstop/core/base.py
|
Python
|
lgpl-3.0
| 11,112
| 0
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'ex48',
'author': 'Zhao, Li',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'zhaoace@gmail.com',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['ex48'],
'scripts': [],
'name': 'ex48'
}
setup(**config)
|
zhaoace/codecraft
|
python/projects/learnpythonthehardway.org/ex48/setup.py
|
Python
|
unlicense
| 434
| 0.002304
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 IBM Corp
#
# Author: Tong Li <litong01@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging.handlers
import os
import tempfile
from ceilometer.dispatcher import file
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
from ceilometer.publisher import utils
class TestDispatcherFile(test.BaseTestCase):
def setUp(self):
super(TestDispatcherFile, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
def test_file_dispatcher_with_all_config(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filename
self.CONF.dispatcher_file.max_bytes = 50
self.CONF.dispatcher_file.backup_count = 5
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.handlers.RotatingFileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_path_only(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filename
self.CONF.dispatcher_file.max_bytes = None
self.CONF.dispatcher_file.backup_count = None
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.FileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_no_path(self):
self.CONF.dispatcher_file.file_path = None
dispatcher = file.FileDispatcher(self.CONF)
# The log should be None
self.assertIsNone(dispatcher.log)
|
tanglei528/ceilometer
|
ceilometer/tests/dispatcher/test_file.py
|
Python
|
apache-2.0
| 3,762
| 0
|
#!/usr/bin/env python
import os
import sys
from optparse import OptionParser
def makeoptions():
parser = OptionParser()
parser.add_option("-v", "--verbosity",
type = int,
action="store",
dest="verbosity",
default=1,
help="Tests verbosity level, one of 0, 1, 2 or 3")
return parser
if __name__ == '__main__':
import djpcms
import sys
options, tags = makeoptions().parse_args()
verbosity = options.verbosity
p = os.path
path = p.join(p.split(p.abspath(__file__))[0],'tests')
sys.path.insert(0, path)
from testrunner import run
run(tags, verbosity = verbosity)
|
strogo/djpcms
|
runtests.py
|
Python
|
bsd-3-clause
| 722
| 0.012465
|
import tensorflow as tf
import numpy as np
import cv2
img_original = cv2.imread('jack.jpg') #data.camera()
img = cv2.resize(img_original, (64*5,64*5))
# for positions
xs = []
# for corresponding colors
ys = []
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
xs.append([row_i, col_i])
ys.append(img[row_i, col_i])
# list->numpy array
xs,ys = np.array(xs),np.array(ys)
# normalising input img
xs = (xs-np.mean(xs))/np.std(xs)
# placeholders for input and output
X = tf.placeholder(tf.float32, shape=[None, 2], name='X')
Y = tf.placeholder(tf.float32, shape=[None, 3], name='Y')
#defining weights,bias,non-linearity
def linear(X, n_input, n_output, activation=None, scope=None):
with tf.variable_scope(scope or "linear"):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer())
h = tf.matmul(X, W) + b
if activation is not None:
h = activation(h)
return h
#building neural-net with 5 layers
n_neurons = [2,64,64,64,64,64,64,3]
#defining optimizer
def distance(p1, p2):
return tf.abs(p1 - p2)
#building network
current_input = X
for layer_i in range(1, len(n_neurons)):
current_input = linear(
X=current_input,
n_input=n_neurons[layer_i - 1],
n_output=n_neurons[layer_i],
activation=tf.nn.relu if (layer_i+1) < len(n_neurons) else None,
scope='layer_' + str(layer_i))
Y_pred = current_input
cost = tf.reduce_mean(tf.reduce_sum(distance(Y_pred,Y),1) )
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
#training Neural Net
n_iterations = 500
batch_size = 50
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
prev_training_cost = 0.0
for it_i in range(n_iterations):
idxs = np.random.permutation(range(len(xs)))
n_batches = len(idxs) // batch_size
for batch_i in range(n_batches):
idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size]
sess.run(optimizer, feed_dict={X: xs[idxs_i], Y: ys[idxs_i] })
training_cost = sess.run(cost, feed_dict={X: xs, Y: ys})
print(it_i, training_cost)
if (it_i + 1) % 20 == 0:
ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess)
fig, ax = plt.subplots(1, 1)
print ys_pred.shape,img.shape
print ys_pred
img = np.clip(ys_pred.reshape(img.shape), 0, 255).astype(np.uint8)
cv2.imwrite("face____" + str(it_i) + ".jpg", img)
|
iamharshit/ML_works
|
Photo Painter/NN.py
|
Python
|
mit
| 2,722
| 0.013226
|
from django import forms
from django.core.urlresolvers import reverse
from django.forms.widgets import RadioFieldRenderer
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
class BootstrapChoiceFieldRenderer(RadioFieldRenderer):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
def render(self):
"""
Outputs a <div> for this set of choice fields.
If an id was given to the field, it is applied to the <di> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id', None)
start_tag = format_html('<div id="{0}">', id_) if id_ else '<div>'
output = [start_tag]
for widget in self:
output.append(format_html('<div class="radio">{0}</div>', force_text(widget)))
output.append('</div>')
return mark_safe('\n'.join(output))
class UseCustomRegWidget(forms.MultiWidget):
"""
This widget is for three fields on event add/edit under Registration:
* use_custom_reg_form
* reg_form
* bind_reg_form_to_conf_only
"""
def __init__(self, attrs=None, reg_form_choices=None, event_id=None):
self.attrs = attrs
self.reg_form_choices = reg_form_choices
self.event_id = event_id
if not self.attrs:
self.attrs = {'id': 'use_custom_reg'}
self.widgets = (
forms.CheckboxInput(),
forms.Select(attrs={'class': 'form-control'}),
forms.RadioSelect(renderer=BootstrapChoiceFieldRenderer)
)
super(UseCustomRegWidget, self).__init__(self.widgets, attrs)
def render(self, name, value, attrs=None):
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
use_custom_reg_form_widget = self.widgets[0]
rendered_use_custom_reg_form = self.render_widget(
use_custom_reg_form_widget,
name, value, final_attrs,
0, id_
)
reg_form_widget = self.widgets[1]
reg_form_widget.choices = self.reg_form_choices
#reg_form_widget.attrs = {'size':'8'}
rendered_reg_form = self.render_widget(
reg_form_widget,
name, value, final_attrs,
1, id_
)
bind_reg_form_to_conf_only_widget = self.widgets[2]
choices = (
('1', mark_safe('Use one form for all pricings %s' % rendered_reg_form)),
)
bind_reg_form_to_conf_only_widget.choices = choices
rendered_bind_reg_form_to_conf_only = self.render_widget(
bind_reg_form_to_conf_only_widget,
name, value, final_attrs,
2, id_
)
rendered_bind_reg_form_to_conf_only = rendered_bind_reg_form_to_conf_only.replace(
'%s</label>' % rendered_reg_form, "</label>%s" % rendered_reg_form
)
if self.event_id:
manage_custom_reg_link = """
<div>
<a href="%s" target="_blank">Manage Custom Registration Form</a>
</div>
""" % reverse('event.event_custom_reg_form_list', args=[self.event_id])
else:
manage_custom_reg_link = ''
output_html = """
<div id="t-events-use-customreg-box">
<div id="t-events-use-customreg-checkbox" class="checkbox">
<label for="id_%s_%s">%s Use Custom Registration Form</label>
</div>
<div id="t-events-one-or-separate-form">%s</div>
%s
</div>
""" % (
name, '0',
rendered_use_custom_reg_form,
rendered_bind_reg_form_to_conf_only,
manage_custom_reg_link
)
return mark_safe(output_html)
def render_widget(self, widget, name, value, attrs, index=0, id=None):
i = index
id_ = id
if value:
try:
widget_value = value[i]
except IndexError:
self.fields['use_reg_form'].initial = None
else:
widget_value = None
if id_:
final_attrs = dict(attrs, id='%s_%s' % (id_, i))
if widget.__class__.__name__.lower() != 'select':
classes = final_attrs.get('class', None)
if classes:
classes = classes.split(' ')
classes.remove('form-control')
classes = ' '.join(classes)
final_attrs['class'] = classes
return widget.render(name+'_%s' %i, widget_value, final_attrs)
def decompress(self, value):
if value:
data_list = value.split(',')
if data_list[0] == '1':
data_list[0] = 'on'
return data_list
return None
|
alirizakeles/tendenci
|
tendenci/apps/events/widgets.py
|
Python
|
gpl-3.0
| 4,968
| 0.00161
|
# Prints exactly what the script is about to do
print "How many keys are there for the swedish alphabet?"
# Prints the amount of the top row
print "The top row has 11 letter keys"
# Assigns a value to top
top = 11.0
# Prints the amount of the middle row
print "The middle row has 11 letter keys"
# Assigns a value to middle
middle = 11
# Prints the amount of the bottom row
print "The bottom row has 7 letter keys"
# Assigns a value to bottom
bottom = 7
# Prints text then the combined value of from the three rows
print "The total number of letter keys are ", top + middle + bottom
|
seravok/LPTHW
|
StudyDrillMath.py
|
Python
|
gpl-3.0
| 590
| 0
|
import py
import re
from testing.test_interpreter import BaseTestInterpreter
from testing.test_main import TestMain
from hippy.main import entry_point
class TestOptionsMain(TestMain):
def test_version_compare(self, capfd):
output = self.run('''<?php
$versions = array(
'1',
'1.0',
'1.01',
'1.1',
'1.10',
'1.10b',
'1.10.0',
'-3.2.1',
'1rc.0.2',
'bullshit.rc.9.2beta',
);
foreach ($versions as $version) {
if (isset($last)) {
$comp = version_compare($last, $version);
echo $comp;
}
$last = $version;
}
?>''', capfd)
assert output == "-1-10-11-11-11"
def test_version_compare_with_cmp(self, capfd):
output = self.run('''<?php
$versions = array(
'1',
'1.0',
'1.01',
'1.1',
'1.10',
'1.10b',
'1.10.0',
'-3.2.1',
'1rc.0.2',
'bullshit.rc.9.2beta',
);
$co = array(
'=',
'==',
'eq',
'!=',
'<>',
'ne',
'>',
'gt',
'<',
'lt',
'>=',
'ge',
'<=',
'le',
);
foreach ($versions as $version) {
if (isset($last)) {
foreach ($co as $c) {
$comp = version_compare($last, $version, $c);
echo (int)$comp;
}
}
$last = $version;
}
?>''', capfd)
assert output == "000111001100110001110011001111100000001111000111001100110001111100110000011100110011000111110011000001110011001100011111001100"
class TestOptionsFunc(BaseTestInterpreter):
def test_get_cfg_var(self):
php_version = "6.0"
test_value = "test_value"
space = self.space
def setup_conf(interp):
interp.config.ini.update({
'php_version': space.wrap(php_version),
'test_value': space.wrap(test_value),
})
output = self.run('''
echo get_cfg_var('php_version');
echo get_cfg_var('test_value');
''', extra_func=setup_conf)
assert self.space.str_w(output[0]) == php_version
assert self.space.str_w(output[1]) == test_value
def test_get_cfg_var2(self):
output = self.run('''
echo get_cfg_var('');
echo get_cfg_var(' ');
echo get_cfg_var('non_existent_var');
echo get_cfg_var(null);
echo get_cfg_var(1);
echo get_cfg_var(1.0);
''')
assert all([o == self.space.w_False for o in output])
def test_get_cfg_var3(self):
with self.warnings() as w:
output = self.run('''
echo get_cfg_var(array(1));
class Test {};
echo get_cfg_var(new Test);
''')
assert output[0] == self.space.w_Null
assert output[1] == self.space.w_Null
assert w[0] == 'Warning: get_cfg_var() ' +\
'expects parameter 1 to be string, array given'
assert w[1] == 'Warning: get_cfg_var() ' +\
'expects parameter 1 to be string, object given'
|
xhava/hippyvm
|
testing/test_options.py
|
Python
|
mit
| 3,254
| 0.001537
|
"""
Unit tests for the base mechanism class.
"""
import pytest
from azmq.mechanisms.base import Mechanism
from azmq.errors import ProtocolError
@pytest.mark.asyncio
async def test_expect_command(reader):
reader.write(b'\x04\x09\x03FOOhello')
reader.seek(0)
result = await Mechanism._expect_command(reader=reader, name=b'FOO')
assert result == b'hello'
@pytest.mark.asyncio
async def test_expect_command_large(reader):
reader.write(b'\x06\x00\x00\x00\x00\x00\x00\x00\x09\x03FOOhello')
reader.seek(0)
result = await Mechanism._expect_command(reader=reader, name=b'FOO')
assert result == b'hello'
@pytest.mark.asyncio
async def test_expect_command_invalid_size_type(reader):
reader.write(b'\x03')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_expect_command_invalid_name_size(reader):
reader.write(b'\x04\x09\x04HELOhello')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_expect_command_invalid_name(reader):
reader.write(b'\x04\x08\x03BARhello')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_read_frame(reader):
reader.write(b'\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert False
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_frame_large(reader):
reader.write(b'\x02\x00\x00\x00\x00\x00\x00\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert False
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_command(reader):
reader.write(b'\x04\x09\x03BARhello\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert name == b'BAR'
assert data == b'hello'
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_invalid_size_type(reader):
reader.write(b'\x09')
reader.seek(0)
async def on_command(name, data):
assert False
with pytest.raises(ProtocolError):
await Mechanism.read(reader=reader, on_command=on_command)
|
ereOn/azmq
|
tests/unit/test_mechanisms/test_base.py
|
Python
|
gpl-3.0
| 2,577
| 0
|
# -*- coding: utf8 -*-
from phystricks import *
def MBWHooeesXIrsz():
pspict,fig = SinglePicture("MBWHooeesXIrsz")
pspict.dilatation(0.3)
l=4
A=Point(0,0)
B=Point(l,0)
C=Point(l,l)
trig=Polygon(A,B,C)
trig.put_mark(0.2,pspict=pspict)
trig.edges[0].put_code(n=2,d=0.1,l=0.2,pspict=pspict)
trig.edges[1].put_code(n=2,d=0.1,l=0.2,pspict=pspict)
no_symbol(trig.vertices)
pspict.DrawGraphs(trig)
pspict.comment="Vérifier la longueur des codages."
fig.no_figure()
fig.conclude()
fig.write_the_file()
|
LaurentClaessens/phystricks
|
testing/demonstration/phystricksMBWHooeesXIrsz.py
|
Python
|
gpl-3.0
| 563
| 0.039146
|
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''"If This Then That" Gmail example
This example demonstrates how "If This Then That" (http://ifttt.com) can be used
make Cozmo respond when a Gmail account receives an email. Instructions below
will lead you through setting up an applet on the IFTTT website. When the applet
trigger is called (which sends a web request received by the web server started
in this example), Cozmo will play an animation, speak the email sender's name and
show a mailbox image on his face.
Please place Cozmo on the charger for this example. When necessary, he will be
rolled off and back on.
Follow these steps to set up and run the example:
1) Provide a a static ip, URL or similar that can be reached from the If This
Then That server. One easy way to do this is with ngrok, which sets up
a secure tunnel to localhost running on your machine.
To set up ngrok:
a) Follow instructions here to download and install:
https://ngrok.com/download
b) Run this command to create a secure public URL for port 8080:
./ngrok http 8080
c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).
You will use this address in your applet, below.
WARNING: Using ngrok exposes your local web server to the internet. See the ngrok
documentation for more information: https://ngrok.com/docs
2) Set up your applet on the "If This Then That" website.
a) Sign up and sign into https://ifttt.com
b) Create an applet: https://ifttt.com/create
c) Set up your trigger.
1. Click "this".
2. Select "Gmail" as your service. If prompted, click "Connect",
select your Gmail account, and click “Allow” to provide permissions
to IFTTT for your email account. Click "Done".
3. Under "Choose a Trigger", select “Any new email in inbox".
d) Set up your action.
1. Click “that".
2. Select “Maker" to set it as your action channel. Connect to the Maker channel if prompted.
3. Click “Make a web request" and fill out the fields as follows. Remember your publicly
accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field,
followed by "/iftttGmail" as shown below:
URL: http://55e57164.ngrok.io/iftttGmail
Method: POST
Content Type: application/json
Body: {"FromAddress":"{{FromAddress}}"}
5. Click “Create Action" then “Finish".
3) Test your applet.
a) Run this script at the command line: ./ifttt_gmail.py
b) On ifttt.com, on your applet page, click “Check now”. See that IFTTT confirms that the applet
was checked.
c) Send an email to the Gmail account in your recipe
d) On your IFTTT applet webpage, again click “Check now”. This should cause IFTTT to detect that
the email was received and send a web request to the ifttt_gmail.py script.
e) In response to the ifttt web request, Cozmo should roll off the charger, raise and lower
his lift, announce the email, and then show a mailbox image on his face.
'''
import asyncio
import re
import sys
try:
from aiohttp import web
except ImportError:
sys.exit("Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install")
import cozmo
from common import IFTTTRobot
app = web.Application()
async def serve_gmail(request):
'''Define an HTTP POST handler for receiving requests from If This Then That.
You may modify this method to change how Cozmo reacts to the email
being received.
'''
json_object = await request.json()
# Extract the name of the email sender.
from_email_address = json_object["FromAddress"]
# Use a regular expression to break apart pieces of the email address
match_object = re.search(r'([\w.]+)@([\w.]+)', from_email_address)
email_local_part = match_object.group(1)
robot = request.app['robot']
async def read_name():
try:
async with robot.perform_off_charger():
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face.'''
await robot.get_in_position()
# First, have Cozmo play animation "ID_pokedB", which tells
# Cozmo to raise and lower his lift. To change the animation,
# you may replace "ID_pokedB" with another animation. Run
# remote_control_cozmo.py to see a list of animations.
await robot.play_anim(name='ID_pokedB').wait_for_completed()
# Next, have Cozmo speak the name of the email sender.
await robot.say_text("Email from " + email_local_part).wait_for_completed()
# Last, have Cozmo display an email image on his face.
robot.display_image_file_on_face("../face_images/ifttt_gmail.png")
except cozmo.RobotBusy:
cozmo.logger.warning("Robot was busy so didn't read email address: "+ from_email_address)
# Perform Cozmo's task in the background so the HTTP server responds immediately.
asyncio.ensure_future(read_name())
return web.Response(text="OK")
# Attach the function as an HTTP handler.
app.router.add_post('/iftttGmail', serve_gmail)
if __name__ == '__main__':
cozmo.setup_basic_logging()
cozmo.robot.Robot.drive_off_charger_on_connect = False
# Use our custom robot class with extra helper methods
cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot
try:
sdk_conn = cozmo.connect_on_loop(app.loop)
# Wait for the robot to become available and add it to the app object.
app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot())
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
web.run_app(app)
|
manxueitp/cozmo-test
|
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
|
Python
|
mit
| 6,646
| 0.003624
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.