code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from django.test import TestCase
from mdot.mdot_rest_client.client import MDOT, ClientResource
class MdotClientErrorTest(TestCase):
def test_get_resource_by_id(self):
"""
WILL TEST retrieval of a resource by it's id.
"""
with self.settings(
RESTCLIENTS_MDOT_DAO_CLASS='Mock'
):
pass
def test_python_list_conversion_bad_id(self):
fake_list = [{u'accessible': False,
u'feature_desc': u'IT goodness for the UW',
u'title': 123,
u'image': u'http://localhost:8000/\
media/uploads/screenshot_CprR5Dk.jpg',
u'created_date': u'2015-07-31T19:18:43.771637Z',
u'campus_seattle': True,
u'campus_bothell': False,
u'responsive_web': False,
u'featured': True,
u'last_modified': u'2015-07-31T19:21:07.562924Z',
u'intended_audiences': [{u'audience': u'student'},
{u'audience': u'staff'},
{u'audience': u'faculty'},
{u'audience': u'freshman'}],
u'resource_links':
[{u'url': u'http://www.washington.edu/itconnect',
u'link_type': u'WEB'}],
u'id': 'some string not an int',
u'campus_tacoma': False}]
with self.assertRaises(TypeError):
MDOT()._python_list_to_resources_model_list(fake_list)
def test_python_list_conversion_bad_title(self):
fake_list = [{u'accessible': False,
u'feature_desc': u'IT goodness for the UW',
u'title': 234,
u'image': u'http://localhost:8000/\
media/uploads/screenshot_CprR5Dk.jpg',
u'created_date': u'2015-07-31T19:18:43.771637Z',
u'campus_seattle': True,
u'campus_bothell': False,
u'responsive_web': False,
u'featured': True,
u'last_modified': u'2015-07-31T19:21:07.562924Z',
u'intended_audiences': [{u'audience': u'student'},
{u'audience': u'staff'},
{u'audience': u'faculty'},
{u'audience': u'freshman'}],
u'resource_links':
[{u'url': u'http://www.washington.edu/itconnect',
u'link_type': u'WEB'}],
u'id': 1,
u'campus_tacoma': False}]
with self.assertRaises(TypeError):
MDOT()._python_list_to_resources_model_list(fake_list)
def test_python_list_conversion_bad_desc(self):
fake_list = [{u'accessible': False,
u'feature_desc': 1234,
u'title': u'ITConnect',
u'image': u'http://localhost:8000/media/\
uploads/screenshot_CprR5Dk.jpg',
u'created_date': u'2015-07-31T19:18:43.771637Z',
u'campus_seattle': True,
u'campus_bothell': False,
u'responsive_web': False,
u'featured': True,
u'last_modified': u'2015-07-31T19:21:07.562924Z',
u'intended_audiences': [{u'audience': u'student'},
{u'audience': u'staff'},
{u'audience': u'faculty'},
{u'audience': u'freshman'}],
u'resource_links':
[{u'url': u'http://www.washington.edu/itconnect',
u'link_type': u'WEB'}],
u'id': 1,
u'campus_tacoma': False}]
with self.assertRaises(TypeError):
MDOT()._python_list_to_resources_model_list(fake_list)
def test_python_list_conversion_bad_image(self):
fake_list = [{u'accessible': False,
u'feature_desc': u'This is a test',
u'title': u'ITConnect',
u'image': 123,
u'created_date': u'2015-07-31T19:18:43.771637Z',
u'campus_seattle': True,
u'campus_bothell': False,
u'responsive_web': False,
u'featured': True,
u'last_modified': u'2015-07-31T19:21:07.562924Z',
u'intended_audiences': [{u'audience': u'student'},
{u'audience': u'staff'},
{u'audience': u'faculty'},
{u'audience': u'freshman'}],
u'resource_links':
[{u'url': u'http://www.washington.edu/itconnect',
u'link_type': u'WEB'}],
u'id': 1,
u'campus_tacoma': False}]
with self.assertRaises(TypeError):
MDOT()._python_list_to_resources_model_list(fake_list)
def test_python_list_conversion_bad_link_url(self):
fake_list = [{u'accessible': False,
u'feature_desc': u'This is a test',
u'title': u'ITConnect',
u'image': u'http://localhost:8000/media/\
uploads/screenshot_CprR5Dk.jpg',
u'created_date': u'2015-07-31T19:18:43.771637Z',
u'campus_seattle': True,
u'campus_bothell': False,
u'responsive_web': False,
u'featured': True,
u'last_modified': u'2015-07-31T19:21:07.562924Z',
u'intended_audiences': [{u'audience': u'student'},
{u'audience': u'staff'},
{u'audience': u'faculty'},
{u'audience': u'freshman'}],
u'resource_links':
[{u'url': 123,
u'link_type': u'WEB'}],
u'id': 1,
u'campus_tacoma': False}]
with self.assertRaises(TypeError):
MDOT()._python_list_to_resources_model_list(fake_list)
def test_python_list_conversion_bad_link_type(self):
fake_list = [{u'accessible': False,
u'feature_desc': u'This is a test',
u'title': u'ITConnect',
u'image': u'http://localhost:8000/media/\
uploads/screenshot_CprR5Dk.jpg',
u'created_date': u'2015-07-31T19:18:43.771637Z',
u'campus_seattle': True,
u'campus_bothell': False,
u'responsive_web': False,
u'featured': True,
u'last_modified': u'2015-07-31T19:21:07.562924Z',
u'intended_audiences': [{u'audience': u'student'},
{u'audience': u'staff'},
{u'audience': u'faculty'},
{u'audience': u'freshman'}],
u'resource_links':
[{u'url': u'http://www.washington.edu/itconnect',
u'link_type': 123}],
u'id': 1,
u'campus_tacoma': False}]
with self.assertRaises(TypeError):
MDOT()._python_list_to_resources_model_list(fake_list)
|
charlon/mdot
|
mdot/test/client_error_catching.py
|
Python
|
apache-2.0
| 8,028
|
import patches.base
import patches.utils
from turbogears import database
from datetime import datetime
from sqlobject import AND, IN, OR, NOT
class Patch(patches.base.Patch):
description = "Changes to the architecture of MicroSites to support the concepts of Pages, list_items, and rendered objects"
def apply(self):
access_tuple = patches.utils.parseDBAccessDirective()
dburi = patches.utils.parseDBURI('dev.cfg').split('=')[1][1:-1]
database.set_db_uri(dburi)
database.run_with_transaction(setup_display_name_field)
def setup_display_name_field():
"""Create the microsites spaces object
"""
from hubspace.model import User
for user in User.select():
user.display_name
|
thehub/hubspace
|
patches/022.py
|
Python
|
gpl-2.0
| 750
|
#!/usr/bin/env python3
"""A specialised io module for binary ``.ply`` files containing XYZRGB points.
Most uses of this module should go through :py:func:`read` to iterate over
points in the file, or :py:func:`write` to save an iterable of points.
Neither function accumulates much data in memory.
:py:class:`IncrementalWriter` is useful when accumulating data in memory to
write many files is impractical. :py:func:`offset_for` and
:py:func:`read_header` provide location metadata if possible.
In all cases a "point" is tuple of (x, y, z, r, g, b). XYZ are floats denoting
spatial coordinates. RGB is the color, each an unsigned 8-bit integer.
While intentionally limited in scope, most data can be converted to this
format easily enough.
"""
# Pylint can freak out about mypy type notation; it's fine at runtime
# pylint:disable=unsubscriptable-object,invalid-sequence-index
from collections import namedtuple
import itertools
import struct
import os.path
from tempfile import SpooledTemporaryFile
from typing import Iterator, List, NamedTuple, Tuple
# User-defined types:
Point = Tuple[float, ...]
PlyHeader = NamedTuple('PlyHeader', [
('vertex_count', int), ('names', Tuple[str, ...]),
('form_str', str), ('comments', Tuple[str, ...])])
UTM_Coord = NamedTuple('UTM_Coord', [
('x', float), ('y', float), ('zone', int), ('north', bool)])
# The various struct types of .ply binary format
PLY_TYPES = {'float': 'f', 'double': 'd', 'uchar': 'B', 'char': 'b',
'ushort': 'H', 'short': 'h', 'uint': 'I', 'int': 'i'}
def offset_for(filename: str) -> Tuple[float, float, float]:
"""Return the (x, y, z) UTM offset for a Pix4D or forestutils .ply file."""
offset = filename[:-4] + '_ply_offset.xyz'
if os.path.isfile(offset):
with open(offset) as f:
x, y, z = tuple(float(n) for n in f.readline().strip().split(' '))
return x, y, z
for com in parse_ply_header(ply_header_text(filename))[3]:
if com.startswith('comment UTM x y zone north'):
return float(com.split(' ')[-4]), float(com.split(' ')[-3]), 0
return 0, 0, 0
def _check_input(fname, ending='.ply'):
"""Checks that the file exists and has the right ending"""
if not os.path.isfile(fname):
raise FileNotFoundError('Cannot read points from a nonexistent file')
if not fname.endswith(ending):
raise ValueError('Tried to read file type {}, expected {}.'.format(
fname[-4:], ending))
def read(fname: str) -> Iterator:
"""Passes the file to a read function for that format."""
if fname.endswith('_point_cloud_part_1.ply'):
parts, p = [fname], 1
stub = fname.replace('_point_cloud_part_1.ply', '')
while True:
p += 1
part = stub + '_point_cloud_part_{}.ply'.format(p)
if os.path.isfile(part):
parts.append(part)
else:
return _read_pix4d_ply_parts(parts)
return _read_ply(fname)
def _read_pix4d_ply_parts(fname_list: List[str]) -> Iterator:
"""Yield points from a list of Pix4D ply files as if they were one file.
Pix4D usually exports point clouds in parts, with an xyz offset for the
origin. This means that we can yield the points from each, correcting
for the offset in the origin coordinate in each.
We can further move the altitude information into the points without loss
of precision (to any significant degree). However UTM XY coordinates
can't be added; we don't know the UTM zone and loss of precision may
be noticible if we did.
"""
for f in fname_list:
_check_input(f)
f = fname_list.pop(0)
ox, oy, oz = offset_for(f)
for p in _read_ply(f):
yield p._replace(z=p.z+oz)
for f in fname_list:
dx, dy, dz = [b - a for a, b in zip([ox, oy, 0], offset_for(f))]
for p in _read_ply(f):
yield p._replace(x=p.x+dx)._replace(y=p.y+dy)._replace(z=p.z+dz)
def ply_header_text(filename: str) -> bytes:
"""Return the exact text of the header of the given .ply file, as bytes.
Using bytes to allow len(header) to give index to start of data; it's
trivial to decode in the parsing function.
"""
_check_input(filename)
header = b''
with open(filename, 'rb') as f:
while b'end_header' not in header:
header += next(f) # type: ignore
return header
def parse_ply_header(header_text: bytes) -> PlyHeader:
"""Parse the bytes of a .ply header to useful data about the vertices.
Deliberately discards the non-vertex data - this is a pointcloud module!
"""
# Get file lines, do some basic validation
lines = [l.strip() for l in header_text.decode('ascii').split('\n')]
magic_num, data_format, *lines = lines
if magic_num != 'ply':
raise ValueError('Not a valid .ply file (wrong magic number).')
if not data_format.startswith('format'):
raise ValueError(
'Unknown data format "{}" for .ply file.'.format(data_format))
if 'ascii' in data_format:
raise ValueError('ASCII format .ply files not supported at this time.')
# Extract comments from lines
comments = tuple(c for c in lines if c.startswith('comment '))
lines = [l for l in lines if not l.startswith('comment ')]
# Get vertex count
element, _, vertex_count = lines.pop(0).rpartition(' ')
if element != 'element vertex':
raise ValueError('File must begin with vertex data!')
# Get list of (type, name) pairs from the list of vertex properties
properties = [(t, n) for _, t, n in itertools.takewhile(
lambda l: l[0] == 'property', (l.split(' ') for l in lines))]
# Get Struct format from list of property types
form_str = '>' if 'binary_big_endian' in data_format else '<'
form_str += ''.join(PLY_TYPES[t] for t, n in properties)
# Get Namedtuple instance from property names
names = tuple(n for t, n in properties)
if not all(p in names for p in ('x', 'y', 'z')):
raise ValueError('Pointcloud verticies must have x, y, z attributes!')
# Finally, return our values
return PlyHeader(int(vertex_count), names, form_str, comments)
def _read_ply(fname: str) -> Iterator:
"""Opens the specified file, and returns a point set in the format required
by attributes_from_cloud. Only handles xyzrgb point clouds, but that's
a fine subset of the format. See http://paulbourke.net/dataformats/ply/"""
header_bytes = ply_header_text(fname)
header = parse_ply_header(header_bytes)
point = namedtuple('Point', header.names) # type: ignore
fmt = struct.Struct(header.form_str)
with open(fname, 'rb') as f:
f.seek(len(header_bytes))
for _ in range(header.vertex_count):
yield point._make(fmt.unpack(f.read(fmt.size))) # type: ignore
class IncrementalWriter:
"""A streaming file writer for point clouds.
Using the IncrementalWriter with spooled temporary files, which are
only flushed to disk if they go above the given size, allows for
streaming points to disk even when the header is unknown in advance.
This allows some nice tricks, including splitting a point cloud into
multiple files in a single pass, without memory issues.
"""
# pylint:disable=too-few-public-methods
def __init__(self, filename: str, header: PlyHeader,
utm: UTM_Coord=None, buffer=2**22) -> None:
"""
Args:
filename: final place to save the file on disk.
source_fname: source file for the pointcloud; used to detect
file format for metadata etc.
buffer (int): The number of bytes to hold in RAM before flushing
the temporary file to disk. Default 1MB, which holds ~8300
points - enough for most objects but still practical to hold
thousands in memory. Set a smaller buffer for large forests.
"""
self.filename = filename
self.temp_storage = SpooledTemporaryFile(max_size=buffer, mode='w+b')
self.count = 0
self.utm = utm
self.header = header
# Always write in big-endian mode; only store type information
self.binary = struct.Struct('>' + header.form_str[1:])
def __call__(self, point) -> None:
"""Add a single point to this pointcloud, saving in binary format.
Args:
point (namedtuple): vertex attributes for the point, eg xyzrgba.
"""
self.temp_storage.write(self.binary.pack(*point))
self.count += 1
def __del__(self):
"""Flush data to disk and clean up."""
to_ply_types = {v: k for k, v in PLY_TYPES.items()}
properties = ['property {t} {n}'.format(t=t, n=n) for t, n in zip(
(to_ply_types[p] for p in self.header.form_str[1:]),
self.header.names)]
head = ['ply',
'format binary_big_endian 1.0',
'element vertex {}'.format(self.count),
'\n'.join(properties),
'end_header']
if self.utm is not None:
head.insert(-1, 'comment UTM x y zone north ' +
'{0.x} {0.y} {0.zone} {0.north}'.format(self.utm))
if not os.path.isdir(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename))
with open(self.filename, 'wb') as f:
f.write(('\n'.join(head) + '\n').encode('ascii'))
self.temp_storage.seek(0)
chunk = self.temp_storage.read(8192)
while chunk:
f.write(chunk)
chunk = self.temp_storage.read(8192)
self.temp_storage.close()
def write(cloud: Iterator, fname: str, header: PlyHeader,
utm: UTM_Coord) -> None:
"""Write the given cloud to disk."""
writer = IncrementalWriter(fname, header, utm)
for p in cloud:
writer(p)
|
borevitzlab/3D-tools
|
src/pointcloudfile.py
|
Python
|
gpl-3.0
| 9,952
|
import http_server
server = http_server.HTTPServer(8080)
server.run()
|
serpis/pynik
|
httpsrv/main.py
|
Python
|
mit
| 71
|
from __future__ import absolute_import, division, print_function
# import base64
from copy import copy
from functools import partial
import six
from google.protobuf.descriptor import FieldDescriptor
from google.protobuf.message import Message
__all__ = ('protobuf_to_dict',
'dict_to_protobuf',
'TYPE_CALLABLE_MAP',
'REVERSE_TYPE_CALLABLE_MAP')
# adapted from https://github.com/benhodgson/protobuf-to-dict
REVERSE_TYPE_CALLABLE_MAP = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_INT64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_UINT64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_FIXED64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: six.text_type,
FieldDescriptor.TYPE_BYTES: six.binary_type # base64.b64encode,
}
TYPE_CALLABLE_MAP = copy(REVERSE_TYPE_CALLABLE_MAP)
TYPE_CALLABLE_MAP[FieldDescriptor.TYPE_ENUM] = int
CONTAINER_MAP = []
def enum_to_label(field, value):
return field.enum_type.values_by_number[int(value)].name
def label_to_enum(field, value):
enum_dict = field.enum_type.values_by_name
return enum_dict[value].number
def message_to_container(message, containers):
for msg, cnt in containers:
if isinstance(msg, type): # class definition used
if isinstance(message, msg):
return cnt()
elif isinstance(message, msg.__class__): # object definition used
if all([getattr(msg, field.name) == getattr(message, field.name)
for field, value in msg.ListFields()]):
return cnt()
return dict() # fallback to plain dictionary
def container_to_message(container, containers):
for msg, cnt in containers:
if isinstance(container, cnt):
if isinstance(msg, type):
return msg()
else:
return copy(msg)
def protobuf_to_dict(pb, containers=CONTAINER_MAP, converters=TYPE_CALLABLE_MAP):
result = message_to_container(pb, containers)
# for field, value in pb.ListFields(): # only non-empty fields
for field in pb.DESCRIPTOR.fields: # empty fields too
value = getattr(pb, field.name)
if (field.message_type and field.message_type.has_options and
field.message_type.GetOptions().map_entry):
converter = dict
elif field.type == FieldDescriptor.TYPE_MESSAGE:
# recursively encode protobuf sub-message
converter = partial(protobuf_to_dict, containers=containers,
converters=converters)
elif field.type == FieldDescriptor.TYPE_ENUM:
converter = partial(enum_to_label, field)
else:
converter = converters[field.type]
if field.label == FieldDescriptor.LABEL_REPEATED:
result[field.name] = list(map(converter, value))
else:
result[field.name] = converter(value)
return result
def dict_to_protobuf(dct, pb=None, containers=CONTAINER_MAP,
converters=REVERSE_TYPE_CALLABLE_MAP, strict=True):
default = container_to_message(dct, containers)
if pb:
if default:
pb.MergeFrom(default)
else:
pb = default
pb = pb if isinstance(pb, Message) else pb()
for k, v in dct.items():
try:
# TODO silently skip undifened fields
field = pb.DESCRIPTOR.fields_by_name[k]
except:
if not strict:
continue
else:
raise
pb_value = getattr(pb, k, None)
if field.label == FieldDescriptor.LABEL_REPEATED:
for item in v:
if field.type == FieldDescriptor.TYPE_MESSAGE:
dict_to_protobuf(item, pb_value.add(),
containers, converters)
elif field.type == FieldDescriptor.TYPE_ENUM:
pb_value.append(label_to_enum(field, item))
else:
pb_value.append(item)
elif field.type == FieldDescriptor.TYPE_MESSAGE:
dict_to_protobuf(v, pb_value, containers, converters)
else:
if field.type in converters:
v = converters[field.type](v)
elif field.type == FieldDescriptor.TYPE_ENUM:
v = label_to_enum(field, v)
setattr(pb, field.name, v)
return pb
|
kszucs/proxo
|
proxo/protobuf.py
|
Python
|
apache-2.0
| 4,894
|
import os
import random
from itertools import chain, product
from collections import defaultdict, namedtuple
import numpy as np
from sequence import reverse_complement
SHAPE_PARAM_TYPE = 'float32'
def iter_fivemers(seq):
for start in xrange(len(seq) - 5 + 1):
yield seq[start:start+5]
return
ShapeData = namedtuple(
'ShapeData', ['ProT', 'MGW', 'LHelT', 'RHelT', 'LRoll', 'RRoll'])
################################################################################
# Build 'shape_data' lookup table mapping fivemers to their shape paramaters
#
#
fivemer_to_index_map = dict((''.join(fivemer), i)
for (i, fivemer)
in enumerate(sorted(product('ACGT', repeat=5))))
def fivemer_to_index(fivemer):
return fivemer_to_index_map[fivemer.upper()]
def load_shape_data(center=True):
prefix = os.path.join(os.path.dirname(__file__), './shape_data/')
fivemer_fnames = ["all_fivemers.ProT", "all_fivemers.MGW"]
fourmer_fnames = ["all_fivemers.HelT", "all_fivemers.Roll"]
# load shape data for all of the fivemers
shape_params = np.zeros((4**5, 6))
pos = 0
for fname in chain(fivemer_fnames, fourmer_fnames):
shape_param_name = fname.split(".")[-1]
with open(os.path.join(prefix, fname)) as fp:
for data in fp.read().strip().split(">")[1:]:
seq, params = data.split()
param = params.split(";")
if len(param) == 5:
shape_params[fivemer_to_index(seq), pos] = float(param[2])
elif len(param) == 4:
shape_params[fivemer_to_index(seq), pos] = float(param[1])
shape_params[fivemer_to_index(seq), pos+1] = float(param[2])
if fname in fivemer_fnames: pos += 1
if fname in fourmer_fnames: pos += 2
if center:
shape_params = shape_params - shape_params.mean(0)
return shape_params
shape_data = load_shape_data()
# END build shape data
################################################################################
def est_shape_params_for_subseq(subseq):
"""Est shape params for a subsequence.
Assumes that the flanking sequence is included, so it returns
a vector of length len(subseq) - 2 (because the encoding is done with
fivemers)
"""
res = np.zeros((len(subseq)-4, 6), dtype=SHAPE_PARAM_TYPE)
for i, fivemer in enumerate(iter_fivemers(subseq)):
fivemer = fivemer.upper()
if 'AAAAA' == fivemer:
res[i,:] = 0
elif 'N' in fivemer:
res[i,:] = 0
else:
res[i,:] = shape_data[fivemer_to_index(fivemer)]
return res
def code_sequence_shape(seq, left_flank_dimer="NN", right_flank_dimer="NN"):
full_seq = left_flank_dimer + seq + right_flank_dimer
return est_shape_params_for_subseq(full_seq)
def code_seqs_shape_features(seqs, seq_len, n_seqs):
shape_features = np.zeros(
(n_seqs, seq_len, 6), dtype=SHAPE_PARAM_TYPE)
RC_shape_features = np.zeros(
(n_seqs, seq_len, 6), dtype=SHAPE_PARAM_TYPE)
for i, seq in enumerate(seqs):
shape_features[i, :, :] = code_sequence_shape(seq)
RC_shape_features[i, :, :] = code_sequence_shape(
reverse_complement(seq))
return shape_features, RC_shape_features
|
nboley/pyDNAbinding
|
pyDNAbinding/shape.py
|
Python
|
gpl-2.0
| 3,357
|
import logging
from math import floor
import pygame
from .core import King, BOARD_SIZE
logger = logging.getLogger(__name__)
# COLORS
# R G B
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
RED = (255, 0, 0)
BLACK = ( 0, 0, 0)
GOLD = (255, 215, 0)
YELLOW = (255, 255, 0)
GREEN = ( 0, 255, 0)
HIGH = (160, 190, 255)
class SimpleGraphics:
"""Class representing the pygame graphics for a game of draughts."""
def __init__(self):
self.window_size = 600
self.screen = pygame.display.set_mode((self.window_size, self.window_size))
self.background = pygame.image.load('draughts/resources/board.png')
self.fps = 60
self.clock = pygame.time.Clock()
self.square_size = int(self.window_size / BOARD_SIZE)
self.piece_radius = int(self.square_size / 3)
self.message = None
@staticmethod
def setup_window():
logger.debug("Setting up window")
pygame.init()
pygame.display.set_caption('Draughts')
def update_display(self, board, legal_moves, selected_square, show_help):
"""Renders the board state"""
self.screen.blit(self.background, (0, 0))
self.draw_pieces(board)
self.highlight_squares(selected_square, legal_moves, show_help)
if self.message:
self.draw_message()
pygame.display.flip()
self.clock.tick(self.fps)
def draw_piece(self, square, piece):
"""Draws a piece in the middle of a square."""
coords = self.pixel_coords(square, middle=True)
colour = self.map_colour(piece.colour)
pygame.draw.circle(self.screen, colour, coords, self.piece_radius)
if isinstance(piece, King):
pygame.draw.circle(self.screen, GOLD, coords, int(self.piece_radius * 0.6))
@staticmethod
def map_colour(colour):
if colour == 'red':
return RED
if colour == 'white':
return WHITE
def pixel_coords(self, square, middle=False):
"""Returns top left or middle pixel coordinates for a given x, y square."""
x, y = square
if middle:
x, y = x + 0.5, y + 0.5
return int(x * self.square_size), int(y * self.square_size)
def square_coords(self, left, down):
"""Returns x, y coordinates of a given pixel position"""
return floor(left / self.square_size), floor(down / self.square_size)
def draw_pieces(self, board):
for square, piece in board.items():
if piece:
self.draw_piece(square, piece)
def highlight_squares(self, selected_square, legal_moves, show_help):
"""Highlights the currently selected piece and possible moves.
If show help is false, we only show the selected piece
"""
for move in legal_moves.keys():
start, end = move
if selected_square:
if start == selected_square and show_help:
self.highlight_square(end, GREEN)
self.highlight_square(selected_square, BLUE)
elif show_help:
self.highlight_square(start, HIGH)
def highlight_square(self, square, colour):
"""Highlight a square's border"""
pygame.draw.rect(self.screen, colour,
(*self.pixel_coords(square), self.square_size, self.square_size),
5)
def draw_message(self):
"""Draws message to the screen. """
font_obj = pygame.font.Font('freesansbold.ttf', 44)
text_surface_obj = font_obj.render(self.message, True, HIGH, BLACK)
text_rect_obj = text_surface_obj.get_rect()
text_rect_obj.center = (self.window_size / 2, self.window_size / 2)
self.screen.blit(text_surface_obj, text_rect_obj)
|
sdolemelipone/draughts
|
draughts/graphics.py
|
Python
|
gpl-3.0
| 3,845
|
# Hardcoding is evil but at least it can be segregated.
server="127.0.0.1"
port="7890"
|
bbulkow/MagnusFlora
|
led/ledlib/hardcode/fcserverconfig.py
|
Python
|
mit
| 89
|
"""Screen database."""
import redis_client
import control
import re
from twisted.internet import defer
class ScreenDB(object):
"""A screen database."""
def __init__(self):
"""Default constructor."""
pass
def set_mode(self, screen, mode):
redis_client.connection.set('screen:{0}:mode'.format(screen),
mode)
redis_client.connection.publish('screen:update', 'update')
def set_override(self, screen, override):
if override is not None:
redis_client.connection.set('screen:{0}:override'.format(screen),
override)
else:
redis_client.connection.delete('screen:{0}:override'.format(screen))
redis_client.connection.publish('screen:update', 'update')
@defer.inlineCallbacks
def list(self):
screens = yield redis_client.connection.keys('screen:*:mode')
entries = {}
for screen in screens:
screenID = screen.split(':')[1]
mode = yield redis_client.connection.get('screen:{0}:mode'.format(screenID))
host = yield redis_client.connection.get('screen:{0}:host'.format(screenID))
entries[screenID] = {'mode': mode,
'host': host}
defer.returnValue(entries)
screens = ScreenDB()
@control.handler('screen-list')
@defer.inlineCallbacks
def perform_screen_list(responder, options):
screen_list = yield screens.list()
for screen, settings in screen_list.iteritems():
if settings['host'] is None:
online_string = 'offline'
else:
online_string = 'online from {0} port {1}'.format(*settings['host'].split(' '))
responder('{0} - {1} ({2})'.format(screen,
settings['mode'],
online_string))
@control.handler('screen-set-mode')
def perform_screen_set_mode(responder, options):
screens.set_mode(options['<id>'], options['<mode>'])
responder('Mode set.')
@control.handler('screen-override')
def perform_screen_override(responder, options):
screens.set_override(options['<id>'], options['<message>'])
responder('Override set.')
@control.handler('screen-clear-override')
def perform_screen_clear_override(responder, options):
screens.set_override(options['<id>'], None)
responder('Override cleared.')
def got_screen(name):
control.broadcast('Screen connected: {0}'.format(name))
redis_client.add_subscribe('screen:connect', got_screen)
|
prophile/compd
|
src/screen_db.py
|
Python
|
mit
| 2,580
|
# -*- coding: utf-8 -*-
"""Tutorial on using the InfluxDB client."""
import argparse
from influxdb import InfluxDBClient
def main(host='localhost', port=8086):
"""Instantiate a connection to the InfluxDB."""
user = 'root'
password = 'root'
dbname = 'example'
dbuser = 'smly'
dbuser_password = 'my_secret_password'
query = 'select value from cpu_load_short;'
json_body = [
{
"measurement": "cpu_load_short",
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:00:00Z",
"fields": {
"Float_value": 0.64,
"Int_value": 3,
"String_value": "Text",
"Bool_value": True
}
}
]
client = InfluxDBClient(host, port, user, password, dbname)
print("Create database: " + dbname)
client.create_database(dbname)
print("Create a retention policy")
client.create_retention_policy('awesome_policy', '3d', 3, default=True)
print("Switch user: " + dbuser)
client.switch_user(dbuser, dbuser_password)
print("Write points: {0}".format(json_body))
client.write_points(json_body)
print("Querying data: " + query)
result = client.query(query)
print("Result: {0}".format(result))
print("Switch user: " + user)
client.switch_user(user, password)
print("Drop database: " + dbname)
client.drop_database(dbname)
def parse_args():
"""Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
parser.add_argument('--host', type=str, required=False,
default='localhost',
help='hostname of InfluxDB http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port of InfluxDB http API')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(host=args.host, port=args.port)
|
omki2005/influxdb-python
|
examples/tutorial.py
|
Python
|
mit
| 2,065
|
#########################################
## DennisX User-Managed MUD Server Kit ##
## room.py ##
## Room Handling ##
## Copyright 2013 PariahSoft LLC ##
#########################################
## **********
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to
## deal in the Software without restriction, including without limitation the
## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
## sell copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
## IN THE SOFTWARE.
## **********
class Room:
"""This class represents a room."""
def __init__(self):
self.id = -1
self.name = ""
self.desc = ""
self.owner = None
self.locked = False
self.__items = []
self.__links = []
self.__players = []
def add_item(self, item):
"""Add an item to the room, where "item" is the Item class instance."""
self.__items.append(item)
def get_item(self, itemid):
"""Return the Item class instance for the item identified by
"itemid". Returns None if the item does not exist."""
for item in self.__items:
if item.id = itemid:
return item
return None
def del_item(self, itemid):
"""Remove the item identified by "itemid" from the room. Returns True
if succeeded, false if the item does not exist."""
item = None
for n, i in enumerate(self.__items):
if i.id = itemid:
inum = n
item = p
if item:
del self.__items[inum]
return True
else:
return False
def add_link(self, link):
"""Add a link to the room, where "link" is the Ltem class instance."""
self.__links.append(link)
def get_link(self, linkid):
"""Return the Link class instance for the link identified by
"linkid". Returns None if the link does not exist."""
for link in self.__links:
if link.id = linkid:
return link
return None
def del_link(self, linkid):
"""Remove the link identified by "linkid" from the room. Returns True
if succeeded, false if the link does not exist."""
link = None
for n, l in enumerate(self.__links):
if l.id = linkid:
lnum = n
link = l
if link:
del self.__links[lnum]
return True
else:
return False
def enter(self, player):
"""Enter a player into the room, where "player" is the Player class
instance."""
self.__players.append(player)
def exit(self, playerid):
"""Exit the player identified by "playerid" from the room. Returns True
if succeeded, false if the player does not exist."""
player = None
for n, p in enumerate(self.__players):
if p.id = playerid:
pnum = n
player = p
if player:
def self.__players[pnum]
return True
else:
return False
|
pariahsoft/DennisX
|
inc/room.py
|
Python
|
mit
| 3,494
|
from collections import namedtuple
from datetime import datetime, timedelta
from django.db import models
from django.urls import reverse_lazy as reverse
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from djangocms_text_ckeditor.fields import HTMLField
from .pdfmail import PdfExportAndMailMixin
from .roles import Leader
from .startend import StartEndMixin
from .subjects import Subject, SubjectRegistrationParticipant
from .times import AbstractTime, TimesMixin
class JournalPeriod:
def __init__(self, journal, period=None):
self.journal = journal
self.period = period
@property
def all_journal_entries(self):
qs = self.journal.journal_entries.all()
if self.period:
if self.period != self.journal.subject.course.all_periods[0]:
qs = qs.filter(date__gte=self.period.start)
if self.period != self.journal.subject.course.all_periods[-1]:
qs = qs.filter(date__lte=self.period.end)
return list(qs)
@cached_property
def all_participants(self):
if self.period: # course
return [
participant
for participant in self.journal.all_participants
if (
participant.approved.date() <= self.period.end
and (participant.canceled is None or participant.canceled.date() >= self.period.start)
)
]
else: # event
return self.journal.all_participants
@cached_property
def all_alternates(self):
alternates = set()
for entry in self.all_journal_entries:
for alternate in entry.all_alternates:
alternates.add(alternate)
return list(alternates)
PresenceRecord = namedtuple("PresenceRecord", ("person", "presences"))
def get_participant_presences(self):
return [
self.PresenceRecord(
participant, [participant.id in entry.all_participants_idset for entry in self.all_journal_entries]
)
for participant in self.all_participants
]
def get_leader_presences(self):
return [
self.PresenceRecord(
leader, [entry.all_leader_entries_by_leader.get(leader, None) for entry in self.all_journal_entries]
)
for leader in self.journal.all_leaders
]
def get_alternate_presences(self):
return [
self.PresenceRecord(
alternate,
[entry.all_leader_entries_by_leader.get(alternate, None) for entry in self.all_journal_entries],
)
for alternate in self.all_alternates
]
class Journal(PdfExportAndMailMixin, TimesMixin, models.Model):
object_name = "journal"
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, related_name="journals", verbose_name=_("subject"))
name = models.CharField(_("journal name"), blank=True, default="", max_length=150)
leaders = models.ManyToManyField(Leader, blank=True, related_name="journals", verbose_name=_("leaders"))
participants = models.ManyToManyField(
SubjectRegistrationParticipant, blank=True, related_name="journals", verbose_name=_("participants")
)
risks = HTMLField(_("risks"), blank=True)
plan = HTMLField(_("plan"), blank=True)
evaluation = HTMLField(_("evaluation"), blank=True)
class Meta:
app_label = "leprikon"
verbose_name = _("journal")
verbose_name_plural = _("journal")
def __str__(self):
return f"{self.subject.display_name} - {self.name}" if self.name else self.subject.display_name
@cached_property
def all_journal_entries(self):
return list(self.journal_entries.all())
@cached_property
def all_journal_periods(self):
try:
return [JournalPeriod(self, period) for period in self.subject.course.all_periods]
except AttributeError:
return [JournalPeriod(self)]
@cached_property
def all_leaders(self):
return list(self.leaders.all())
@cached_property
def all_participants(self):
return list(
self.participants.annotate(
approved=models.F("registration__approved"),
canceled=models.F("registration__canceled"),
)
)
def get_valid_participants(self, d):
return self.participants.exclude(registration__canceled__date__lt=d)
class JournalTime(AbstractTime):
journal = models.ForeignKey(Journal, on_delete=models.CASCADE, related_name="times", verbose_name=_("journal"))
class Meta:
app_label = "leprikon"
ordering = ("day_of_week", "start")
verbose_name = _("time")
verbose_name_plural = _("times")
class JournalEntry(StartEndMixin, models.Model):
journal = models.ForeignKey(
Journal, editable=False, on_delete=models.PROTECT, related_name="journal_entries", verbose_name=_("journal")
)
date = models.DateField(_("date"))
start = models.TimeField(_("start time"), blank=True, null=True)
end = models.TimeField(_("end time"), blank=True, null=True)
agenda = HTMLField(_("session agenda"), default="")
participants = models.ManyToManyField(
SubjectRegistrationParticipant, blank=True, related_name="journal_entries", verbose_name=_("participants")
)
participants_instructed = models.ManyToManyField(
SubjectRegistrationParticipant,
blank=True,
related_name="instructed",
verbose_name=_("participants instructed about safety and internal rules"),
)
class Meta:
app_label = "leprikon"
ordering = ("date", "start", "end")
verbose_name = _("journal entry")
verbose_name_plural = _("journal entries")
def __str__(self):
return f"{self.journal}, {self.date}, {self.duration}"
@cached_property
def datetime_start(self):
try:
return datetime.combine(self.date, self.start)
except TypeError:
return None
@cached_property
def datetime_end(self):
try:
return datetime.combine(self.date, self.end)
except TypeError:
return None
@cached_property
def duration(self):
try:
return self.datetime_end - self.datetime_start
except TypeError:
return timedelta()
duration.short_description = _("duration")
@cached_property
def all_participants(self):
return list(self.participants.all())
@cached_property
def all_participants_instructed(self):
return list(self.participants_instructed.all())
@cached_property
def all_participants_idset(self):
return set(r.id for r in self.all_participants)
@cached_property
def all_leader_entries(self):
return list(self.leader_entries.all())
@cached_property
def all_leader_entries_by_leader(self):
return dict((e.timesheet.leader, e) for e in self.all_leader_entries)
@cached_property
def all_leaders(self):
return list(
le.timesheet.leader for le in self.all_leader_entries if le.timesheet.leader in self.journal.all_leaders
)
@cached_property
def all_alternates(self):
return list(
le.timesheet.leader for le in self.all_leader_entries if le.timesheet.leader not in self.journal.all_leaders
)
@cached_property
def affects_submitted_timesheets(self):
return self.leader_entries.filter(timesheet__submitted=True).exists()
def save(self, *args, **kwargs):
if self.end is None:
self.end = self.start
super().save(*args, **kwargs)
def get_edit_url(self):
return reverse("leprikon:journalentry_update", args=(self.id,))
def get_delete_url(self):
return reverse("leprikon:journalentry_delete", args=(self.id,))
class JournalLeaderEntry(StartEndMixin, models.Model):
journal_entry = models.ForeignKey(
JournalEntry,
editable=False,
on_delete=models.CASCADE,
related_name="leader_entries",
verbose_name=_("journal entry"),
)
timesheet = models.ForeignKey(
"leprikon.Timesheet",
editable=False,
on_delete=models.PROTECT,
related_name="journal_entries",
verbose_name=_("timesheet"),
)
start = models.TimeField(_("start time"))
end = models.TimeField(_("end time"))
class Meta:
app_label = "leprikon"
verbose_name = _("journal leader entry")
verbose_name_plural = _("journal leader entries")
unique_together = (("journal_entry", "timesheet"),)
def __str__(self):
return f"{self.journal_entry}"
@cached_property
def date(self):
return self.journal_entry.date
date.short_description = _("date")
date.admin_order_field = "journal_entry__date"
@cached_property
def journal(self):
return self.journal_entry.journal
journal.short_description = _("journal")
@cached_property
def subject(self):
return self.journal_entry.journal.subject
subject.short_description = _("subject")
@cached_property
def datetime_start(self):
return datetime.combine(self.date, self.start)
@cached_property
def datetime_end(self):
return datetime.combine(self.date, self.end)
@cached_property
def duration(self):
return self.datetime_end - self.datetime_start
duration.short_description = _("duration")
@property
def group(self):
return self.subject
def get_edit_url(self):
return reverse("leprikon:journalleaderentry_update", args=(self.id,))
def get_delete_url(self):
return reverse("leprikon:journalleaderentry_delete", args=(self.id,))
|
leprikon-cz/leprikon
|
leprikon/models/journals.py
|
Python
|
bsd-3-clause
| 9,926
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import re
import string
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class StorageError(Exception):
"""To raise errors related to SR, VDI, PBD, and VBD commands."""
def __init__(self, message=None):
super(StorageError, self).__init__(message)
def _handle_sr_params(params):
if 'id' in params:
del params['id']
sr_type = params.pop('sr_type', 'iscsi')
sr_desc = params.pop('name_description', '')
return sr_type, sr_desc
def create_sr(session, label, params):
LOG.debug(_('Creating SR %s'), label)
sr_type, sr_desc = _handle_sr_params(params)
sr_ref = session.call_xenapi("SR.create",
session.get_xenapi_host(),
params,
'0', label, sr_desc, sr_type, '', False, {})
return sr_ref
def introduce_sr(session, sr_uuid, label, params):
LOG.debug(_('Introducing SR %s'), label)
sr_type, sr_desc = _handle_sr_params(params)
sr_ref = session.call_xenapi('SR.introduce', sr_uuid, label, sr_desc,
sr_type, '', False, params)
LOG.debug(_('Creating PBD for SR'))
pbd_ref = create_pbd(session, sr_ref, params)
LOG.debug(_('Plugging SR'))
session.call_xenapi("PBD.plug", pbd_ref)
session.call_xenapi("SR.scan", sr_ref)
return sr_ref
def forget_sr(session, sr_ref):
"""
Forgets the storage repository without destroying the VDIs within
"""
LOG.debug(_('Forgetting SR...'))
unplug_pbds(session, sr_ref)
session.call_xenapi("SR.forget", sr_ref)
def find_sr_by_uuid(session, sr_uuid):
"""
Return the storage repository given a uuid.
"""
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
if sr_rec['uuid'] == sr_uuid:
return sr_ref
return None
def find_sr_from_vbd(session, vbd_ref):
"""Find the SR reference from the VBD reference."""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure as exc:
LOG.exception(exc)
raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
def create_pbd(session, sr_ref, params):
pbd_rec = {}
pbd_rec['host'] = session.get_xenapi_host()
pbd_rec['SR'] = sr_ref
pbd_rec['device_config'] = params
pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
return pbd_ref
def unplug_pbds(session, sr_ref):
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure as exc:
LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref})
return
for pbd in pbds:
try:
session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure as exc:
LOG.warn(_('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd})
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
"""Introduce VDI in the host."""
try:
session.call_xenapi("SR.scan", sr_ref)
if vdi_uuid:
LOG.debug("vdi_uuid: %s" % vdi_uuid)
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
elif target_lun:
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for curr_ref in vdi_refs:
curr_rec = session.call_xenapi("VDI.get_record", curr_ref)
if ('sm_config' in curr_rec and
'LUNid' in curr_rec['sm_config'] and
curr_rec['sm_config']['LUNid'] == str(target_lun)):
vdi_ref = curr_ref
break
else:
vdi_ref = (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
except session.XenAPI.Failure as exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(vdi_rec)
LOG.debug(type(vdi_rec))
except session.XenAPI.Failure as exc:
LOG.exception(exc)
raise StorageError(_('Unable to get record'
' of VDI %s on') % vdi_ref)
if vdi_rec['managed']:
# We do not need to introduce the vdi
return vdi_ref
try:
return session.call_xenapi("VDI.introduce",
vdi_rec['uuid'],
vdi_rec['name_label'],
vdi_rec['name_description'],
vdi_rec['SR'],
vdi_rec['type'],
vdi_rec['sharable'],
vdi_rec['read_only'],
vdi_rec['other_config'],
vdi_rec['location'],
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except session.XenAPI.Failure as exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI for SR %s')
% sr_ref)
def purge_sr(session, sr_ref):
# Make sure no VBDs are referencing the SR VDIs
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for vdi_ref in vdi_refs:
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
if vbd_refs:
LOG.warn(_('Cannot purge SR with referenced VDIs'))
return
forget_sr(session, sr_ref)
def get_device_number(mountpoint):
device_number = mountpoint_to_number(mountpoint)
if device_number < 0:
raise StorageError(_('Unable to obtain target information %s') %
mountpoint)
return device_number
def parse_sr_info(connection_data, description=''):
label = connection_data.pop('name_label',
'tempSR-%s' % connection_data.get('volume_id'))
params = {}
if 'sr_uuid' not in connection_data:
params = parse_volume_info(connection_data)
# This magic label sounds a lot like 'False Disc' in leet-speak
uuid = "FA15E-D15C-" + str(params['id'])
else:
uuid = connection_data['sr_uuid']
for k in connection_data.get('introduce_sr_keys', {}):
params[k] = connection_data[k]
params['name_description'] = connection_data.get('name_description',
description)
return (uuid, label, params)
def parse_volume_info(connection_data):
"""
Parse device_path and mountpoint as they can be used by XenAPI.
In particular, the mountpoint (e.g. /dev/sdc) must be translated
into a numeric literal.
"""
volume_id = connection_data['volume_id']
target_portal = connection_data['target_portal']
target_host = _get_target_host(target_portal)
target_port = _get_target_port(target_portal)
target_iqn = connection_data['target_iqn']
log_params = {
"vol_id": volume_id,
"host": target_host,
"port": target_port,
"iqn": target_iqn
}
LOG.debug(_('(vol_id,host,port,iqn): '
'(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)'), log_params)
if (volume_id is None or
target_host is None or
target_iqn is None):
raise StorageError(_('Unable to obtain target information'
' %s') % connection_data)
volume_info = {}
volume_info['id'] = volume_id
volume_info['target'] = target_host
volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn
if ('auth_method' in connection_data and
connection_data['auth_method'] == 'CHAP'):
volume_info['chapuser'] = connection_data['auth_username']
volume_info['chappassword'] = connection_data['auth_password']
return volume_info
def mountpoint_to_number(mountpoint):
"""Translate a mountpoint like /dev/sdc into a numeric."""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
return (ord(mountpoint[2:3]) - ord('a'))
elif re.match('^x?vd[a-p]$', mountpoint):
return (ord(mountpoint[-1]) - ord('a'))
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
return -1
def _get_volume_id(path_or_id):
"""Retrieve the volume id from device_path."""
# If we have the ID and not a path, just return it.
if isinstance(path_or_id, int):
return path_or_id
# n must contain at least the volume_id
# :volume- is for remote volumes
# -volume- is for local volumes
# see compute/manager->setup_compute_volume
volume_id = path_or_id[path_or_id.find(':volume-') + 1:]
if volume_id == path_or_id:
volume_id = path_or_id[path_or_id.find('-volume--') + 1:]
volume_id = volume_id.replace('volume--', '')
else:
volume_id = volume_id.replace('volume-', '')
volume_id = volume_id[0:volume_id.find('-')]
return int(volume_id)
def _get_target_host(iscsi_string):
"""Retrieve target host."""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
elif iscsi_string is None or CONF.target_host:
return CONF.target_host
def _get_target_port(iscsi_string):
"""Retrieve target port."""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
return CONF.target_port
|
plumgrid/plumgrid-nova
|
nova/virt/xenapi/volume_utils.py
|
Python
|
apache-2.0
| 10,644
|
# Copyright 2016-2017 Florian Pigorsch & Contributors. All rights reserved.
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from . import utils
class TracksDrawer:
def __init__(self):
self.poster = None
def draw(self, poster, d, w, h, offset_x, offset_y):
self.poster = poster
size, (count_x, count_y) = self.__compute_grid(len(self.poster.tracks), w, h)
spacing_x = 0 if count_x <= 1 else (w-size*count_x)/(count_x - 1)
spacing_y = 0 if count_y <= 1 else (h-size*count_y)/(count_y - 1)
offset_x += (w - count_x*size - (count_x - 1)*spacing_x)/2
offset_y += (h - count_y*size - (count_y - 1)*spacing_y)/2
for (index, track) in enumerate(self.poster.tracks):
x = index % count_x
y = index // count_x
color = self.poster.colors['special'] if track.special else self.poster.colors['track']
self.__draw_track(d, track, offset_x+(0.05 + x)*size+x*spacing_x, offset_y+(0.05+y)*size+y*spacing_y, 0.9 * size, 0.9 * size, color)
def __compute_grid(self, count, width, height):
# this is somehow suboptimal O(count^2). I guess it's possible in O(count)
min_waste = -1
best_counts = None
best_size = None
for count_x in range(1, count+1):
size_x = width/count_x
for count_y in range(1, count+1):
if count_x * count_y >= count:
size_y = height/count_y
size = min(size_x, size_y)
waste = width*height - count*size*size
if waste < 0:
continue
elif best_size is None or waste < min_waste:
best_size = size
best_counts = count_x, count_y
min_waste = waste
return best_size, best_counts
def __draw_track(self, d, track, x_offset, y_offset, width, height, color):
# compute mercator projection of track segments
lines = []
for polyline in track.polylines:
lines.append([utils.latlng2xy(lat, lng) for (lat, lng) in polyline])
# compute bounds
(min_x, min_y, max_x, max_y) = utils.compute_bounds_xy(lines)
d_x = max_x - min_x
d_y = max_y - min_y
# compute scale
scale = width/d_x
if width/height > d_x/d_y:
scale = height/d_y
# compute offsets such that projected track is centered in its rect
x_offset += 0.5 * width - 0.5 * scale * d_x
y_offset += 0.5 * height - 0.5 * scale * d_y
scaled_lines = []
for line in lines:
scaled_line = []
for (x, y) in line:
scaled_x = x_offset + scale * (x - min_x)
scaled_y = y_offset + scale * (y - min_y)
scaled_line.append((scaled_x, scaled_y))
scaled_lines.append(scaled_line)
for line in scaled_lines:
d.add(d.polyline(points=line, stroke=color, fill='none', stroke_width=0.5, stroke_linejoin='round', stroke_linecap='round'))
|
lefty01/GpxTrackPoster
|
src/grid_drawer.py
|
Python
|
mit
| 3,173
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.287975
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/loadepg.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class loadepg(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(loadepg, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_48542765 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>\t
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_48542765
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_loadepg= 'respond'
## END CLASS DEFINITION
if not hasattr(loadepg, '_initCheetahAttributes'):
templateAPIClass = getattr(loadepg, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(loadepg)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=loadepg()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/web/loadepg.py
|
Python
|
gpl-2.0
| 5,152
|
from tests.unit.fixtures import mock_commands_po as mock_po
def test_get_value_method_delegates_to_webelement_with_correct_parameter(mock_po):
correct_parameter = 'value'
mock_po.webelement.get_attribute = lambda attr, log=True: attr
assert mock_po.get_value() == correct_parameter
|
lukas-linhart/pageobject
|
tests/unit/commands/test_get_value.py
|
Python
|
mit
| 297
|
"""Command-line user interface of igraph
The command-line interface launches a Python shell with the igraph
module automatically imported into the main namespace. This is mostly a
convenience module and it is used only by the C{igraph} command line
script which executes a suitable Python shell and automatically imports
C{igraph}'s classes and functions in the top-level namespace.
Supported Python shells are:
- IDLE shell (class L{IDLEShell})
- IPython shell (class L{IPythonShell})
- Classic Python shell (class L{ClassicPythonShell})
The shells are tried in the above mentioned preference order one by
one, unless the C{global.shells} configuration key is set which
overrides the default order. IDLE shell is only tried in Windows
unless explicitly stated by C{global.shells}, since Linux and
Mac OS X users are likely to invoke igraph from the command line.
"""
# pylint: disable-msg=W0401
# W0401: wildcard import. That's exactly what we need for the shell.
from igraph import __version__, set_progress_handler, set_status_handler
from igraph.configuration import Configuration
import sys, re
# pylint: disable-msg=C0103,R0903
# C0103: invalid name. Disabled because this is a third-party class.
# R0903: too few public methods.
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print 'This is '+term.GREEN+'green'+term.NORMAL
This is green
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print term.render('This is ${GREEN}green${NORMAL}')
This is green
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print 'This terminal supports clearning the screen.'
...
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
@author: Edward Loper
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# Curses isn't available on all platforms
try:
import curses
except ImportError:
return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty():
return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try:
curses.setupterm()
except StandardError:
return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i, color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, curses.tparm(set_fg, i) or '')
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
set_bg = self._tigetstr('setb')
if set_bg:
for i, color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
@staticmethod
def _tigetstr(cap_name):
"""Rewrites string capabilities to remove "delays" which are not
required for modern terminals"""
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name) or ''
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
"""Helper function for L{render}"""
s = match.group()
if s == '$$':
return s
else:
return getattr(self, s[2:-1])
class ProgressBar:
"""
A 2-line progress bar, which looks like::
Header
20% [===========----------------------------------]
The progress bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
BAR = '%3d%% ${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n'
def __init__(self, term):
self.term = term
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
raise ValueError("Terminal isn't capable enough -- you "
"should use a simpler progress display.")
self.width = self.term.COLS or 75
self.progress_bar = term.render(self.BAR)
self.header = self.term.render(self.HEADER % "".center(self.width))
self.cleared = True #: true if we haven't drawn the bar yet.
self.last_percent = 0
self.last_message = ""
def update(self, percent=None, message=None):
"""Updates the progress bar.
@param percent: the percentage to be shown. If C{None}, the previous
value will be used.
@param message: the message to be shown above the progress bar. If
C{None}, the previous message will be used.
"""
if self.cleared:
sys.stdout.write("\n"+self.header)
self.cleared = False
if message is None:
message = self.last_message
else:
self.last_message = message
if percent is None:
percent = self.last_percent
else:
self.last_percent = percent
n = int((self.width-10)*(percent/100.0))
sys.stdout.write(
self.term.BOL + self.term.UP + self.term.UP + self.term.CLEAR_EOL +
self.term.render(self.HEADER % message.center(self.width)) +
(self.progress_bar % (percent, '='*n, '-'*(self.width-10-n))) + "\n"
)
def update_message(self, message):
"""Updates the message of the progress bar.
@param message: the message to be shown above the progress bar
"""
return self.update(message=message.strip())
def clear(self):
"""Clears the progress bar (i.e. removes it from the screen)"""
if not self.cleared:
sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
self.cleared = True
self.last_percent = 0
self.last_message = ""
class Shell(object):
"""Superclass of the embeddable shells supported by igraph"""
def __init__(self):
pass
def __call__(self):
raise NotImplementedError("abstract class")
def supports_progress_bar(self):
"""Checks whether the shell supports progress bars.
This is done by checking for the existence of an attribute
called C{_progress_handler}."""
return hasattr(self, "_progress_handler")
def supports_status_messages(self):
"""Checks whether the shell supports status messages.
This is done by checking for the existence of an attribute
called C{_status_handler}."""
return hasattr(self, "_status_handler")
# pylint: disable-msg=E1101
def get_progress_handler(self):
"""Returns the progress handler (if exists) or None (if not)."""
if self.supports_progress_bar():
return self._progress_handler
return None
# pylint: disable-msg=E1101
def get_status_handler(self):
"""Returns the status handler (if exists) or None (if not)."""
if self.supports_status_messages():
return self._status_handler
return None
class IDLEShell(Shell):
"""IDLE embedded shell interface.
This class allows igraph to be embedded in IDLE (the Tk Python IDE).
@todo: no progress bar support yet. Shell/Restart Shell command should
re-import igraph again."""
def __init__(self):
"""Constructor.
Imports IDLE's embedded shell. The implementation of this method is
ripped from idlelib.PyShell.main() after removing the unnecessary
parts."""
Shell.__init__(self)
import idlelib.PyShell
idlelib.PyShell.use_subprocess = True
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
root = idlelib.PyShell.Tk(className="Idle")
idlelib.PyShell.fixwordbreaks(root)
root.withdraw()
flist = idlelib.PyShell.PyShellFileList(root)
if not flist.open_shell():
raise NotImplementedError
self._shell = flist.pyshell
self._root = root
def __call__(self):
"""Starts the shell"""
self._shell.interp.execsource("from igraph import *")
self._root.mainloop()
self._root.destroy()
class ConsoleProgressBarMixin(object):
"""Mixin class for console shells that support a progress bar."""
def __init__(self):
try:
self.__class__.progress_bar = ProgressBar(TerminalController())
except ValueError:
# Terminal is not capable enough, disable progress handler
del self.__class__._progress_handler
del self.__class__._status_handler
@classmethod
def _progress_handler(cls, message, percentage):
"""Progress bar handler, called when C{igraph} reports the progress
of an operation
@param message: message provided by C{igraph}
@param percentage: percentage provided by C{igraph}
"""
if percentage >= 100:
cls.progress_bar.clear()
else:
cls.progress_bar.update(percentage, message)
@classmethod
def _status_handler(cls, message):
"""Status message handler, called when C{igraph} sends a status
message to be displayed.
@param message: message provided by C{igraph}
"""
cls.progress_bar.update_message(message)
class IPythonShell(Shell, ConsoleProgressBarMixin):
"""IPython embedded shell interface.
This class allows igraph to be embedded in IPython's interactive shell."""
def __init__(self):
"""Constructor.
Imports IPython's embedded shell with separator lines removed."""
Shell.__init__(self)
ConsoleProgressBarMixin.__init__(self)
# We cannot use IPShellEmbed here because generator expressions do not
# work there (e.g., set(g.degree(x) for x in [1,2,3])) where g comes
# from an external context
import sys
from IPython import __version__ as ipython_version
self.ipython_version = ipython_version
try:
# IPython >= 0.11 supports this
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
self._shell = TerminalIPythonApp.instance()
sys.argv.append("--nosep")
except ImportError:
# IPython 0.10 and earlier
import IPython.Shell
self._shell = IPython.Shell.start()
self._shell.IP.runsource("from igraph import *")
sys.argv.append("-nosep")
def __call__(self):
"""Starts the embedded shell."""
print "igraph %s running inside " % __version__,
if self._shell.__class__.__name__ == "TerminalIPythonApp":
self._shell.initialize()
self._shell.shell.ex("from igraph import *")
self._shell.start()
else:
self._shell.mainloop()
class ClassicPythonShell(Shell, ConsoleProgressBarMixin):
"""Classic Python shell interface.
This class allows igraph to be embedded in Python's shell."""
def __init__(self):
"""Constructor.
Imports Python's classic shell"""
Shell.__init__(self)
self._shell = None
try:
self.__class__.progress_bar = ProgressBar(TerminalController())
except ValueError:
# Terminal is not capable enough, disable progress handler
del self.__class__._progress_handler
def __call__(self):
"""Starts the embedded shell."""
if self._shell is None:
from code import InteractiveConsole
self._shell = InteractiveConsole()
print >> sys.stderr, "igraph %s running inside " % __version__,
self._shell.runsource("from igraph import *")
self._shell.interact()
def main():
"""The main entry point for igraph when invoked from the command
line shell"""
config = Configuration.instance()
if config.filename:
print >> sys.stderr, "Using configuration from %s" % config.filename
else:
print >> sys.stderr, "No configuration file, using defaults"
if config.has_key("shells"):
parts = [part.strip() for part in config["shells"].split(",")]
shell_classes = []
available_classes = dict([(k, v) for k, v in globals().iteritems() \
if isinstance(v, type) and issubclass(v, Shell)])
for part in parts:
klass = available_classes.get(part, None)
if klass is None:
print >> sys.stderr, "Warning: unknown shell class `%s'" % part
continue
shell_classes.append(klass)
else:
shell_classes = [IPythonShell, ClassicPythonShell]
import platform
if platform.system() == "Windows":
shell_classes.insert(0, IDLEShell)
shell = None
for shell_class in shell_classes:
# pylint: disable-msg=W0703
# W0703: catch "Exception"
try:
shell = shell_class()
break
except StandardError:
# Try the next one
pass
if isinstance(shell, Shell):
if config["verbose"]:
if shell.supports_progress_bar():
set_progress_handler(shell.get_progress_handler())
if shell.supports_status_messages():
set_status_handler(shell.get_status_handler())
shell()
else:
print >> sys.stderr, "No suitable Python shell was found."
print >> sys.stderr, "Check configuration variable `general.shells'."
if __name__ == '__main__':
sys.exit(main())
|
janschulz/igraph
|
interfaces/python/igraph/app/shell.py
|
Python
|
gpl-2.0
| 18,023
|
"""
Copyright 2014-2021 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: populate/pop_wcag20.py
Author: Jon Gunderson
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import django
from django.core.exceptions import ObjectDoesNotExist
fp = os.path.realpath(__file__)
path, filename = os.path.split(fp)
fae2_path = path.split('/populate')[0]
sys.path.append(fae2_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fae2.settings')
from django.conf import settings
django.setup()
from wcag20.models import Principle, Guideline, SuccessCriterion
"""This file is for populating the database with WCAG 2.0 References"""
# Principle.objects.all().delete()
# Guideline.objects.all().delete()
# SuccessCriterion.objects.all().delete()
import json
json_data=open(os.path.join(path,'wcag.json'))
data = json.load(json_data)
json_data.close()
def wcag_principle(num, title, url):
try:
principle = Principle.objects.get(num=num)
print(" " + title + " (found)")
principle.title = title
principle.url = url
print(" " + title + " (updated) ")
except:
principle = Principle(num=num, title=ttile, url=url)
print(" " + principle.title + " (CREATED)")
principle.save()
return principle
def wcag_guideline(p_obj, num, title, url, slug):
try:
guideline = Guideline.objects.get(principle=p_obj, num=num)
print(" " + title + " (found)")
guideline.title = title
guideline.url = url
guideline.slug = 'p' + str(p_obj.num) + 'g' + str(num)
print(" " + title + " (updated)")
except:
guideline = Guideline(principle=p_obj, num=num, title=title, url=url, slug=slug)
print(" " + title + " (CREATED)")
guideline.save()
return guideline
def wcag_success_criterion(g_obj, num, title, level, url, url_meet, url_understand, slug):
try:
sc = SuccessCriterion.objects.get(guideline=g_obj, num=num)
print(" " + title + " (found)")
sc.title = title
sc.url = url
sc.url_meet = url_meet
sc.url_understand = url_understand
sc.level = level
sc.slug = g_obj.slug + 'sc' + str(num)
print(" " + title + " (updated)")
except:
sc = SuccessCriterion(guideline=g_obj, num=num, title=title, url=url, url_meet=url_meet, url_understand=url_understand, level=level, slug=slug)
print(" " + title + " (CREATED)")
sc.save()
return sc
def wcag():
for p in data['principles']:
print('[Principle]: ' + p);
p_data = data['principles'][p];
p_num = int(p)
p_title = p_data['title']
p_url = p_data['url_spec'];
p_obj = wcag_principle(p_num, p_title, p_url);
for g in p_data['guidelines']:
print(' [Guideline]: ' + p);
g_data = p_data['guidelines'][g];
g_num = int(g.split('.')[1])
g_title = g_data['title']
g_url = g_data['url_spec'];
g_slug = g_url.split('#')[1];
# currently do not support rules for Guideline 2.5
if p_num == 2 and g_num >= 5:
continue
g_obj = wcag_guideline(p_obj, g_num, g_title, g_url, g_slug);
for sc in g_data['success_criteria']:
print(' [Success Criteria]: ' + sc);
sc_data = g_data['success_criteria'][sc];
sc_num = int(sc.split('.')[2])
sc_title = sc_data['title']
sc_url = sc_data['url_spec'];
sc_url_meet = sc_data['url_meet'];
sc_url_understand = sc_data['url_understand'];
sc_slug = sc_url.split('#')[1];
sc_level = sc_data['level'];
if sc_level.find('AAA') >= 0:
sc_level = '3';
else:
if sc_level.find('AA') >= 0:
sc_level = '2';
else:
sc_level = '1';
# print(' [ num]: ' + str(sc_num))
# print(' [ title]: ' + sc_title)
# print(' [ level]: ' + sc_level)
# print(' [ url]: ' + sc_url)
# print(' [ meet]: ' + sc_url_meet)
# print(' [underst]: ' + sc_url_understand)
# print(' [ slug]: ' + sc_slug)
if len(sc_slug) > 32:
sc_slug = sc_slug[0:32]
wcag_success_criterion(g_obj, sc_num, sc_title, sc_level, sc_url, sc_url_meet, sc_url_understand, sc_slug);
wcag()
|
opena11y/fae2
|
fae2/populate/pop_wcag.py
|
Python
|
apache-2.0
| 4,765
|
# -*- coding: utf8 -*-
"""
Инициализация класса графики.
"""
class ClsGUI(object):
"""
Главный класс GUI для приложения. Является точкой подключения всех
графических ресурсов.
"""
def __init__(self, root=None):
"""
Импорт всех возможных окон.
:param root:
:return:
"""
self.__root = root
# импорт главного класса окна
from pak_pc.pak_gui.pak_win_main.mod_win_main import ClsWinMain
self.win_main = ClsWinMain(root=self.__root)
# импорт окна "О программе"
from pak_pc.pak_gui.pak_win_about.mod_win_about import ClsWinAbout
self.win_about = ClsWinAbout(root=self.__root)
# импорт окна лицензии
from pak_pc.pak_gui.pak_win_license.mod_win_license import ClsWinLicense
self.win_license = ClsWinLicense(root=self.__root)
# импорт окна экрана виртуального компьютера
from pak_pc.pak_gui.pak_win_screen.mod_win_screen import ClsWinScreen
self.win_screen = ClsWinScreen(root=self.__root)
# импорт окна для создания нового диска
from pak_pc.pak_gui.pak_win_create_disk.mod_win_create_disk \
import ClsWinCreateDisk
self.win_create_disk = ClsWinCreateDisk(root=self.__root)
# импорт окна создания/монтирования дисков (IDC)
from pak_pc.pak_gui.pak_win_idc.mod_win_idc import ClsWinIDC
self.win_idc = ClsWinIDC(root=self.__root)
# импорт окна редактирования настроек регистра программного
# прерывания (winBP)
from pak_pc.pak_gui.pak_win_edit_bp.mod_win_edit_bp import ClsWinEditBP
self.win_edit_bp = ClsWinEditBP(root=self.__root)
def run(self):
"""
Класс загружает графику, должен запускаться последним!!!
"""
# запуск бесконечной петли
self.win_main.begin()
|
prospero78/pyPC
|
pak_pc/pak_gui/mod_gui.py
|
Python
|
lgpl-3.0
| 2,287
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright © 2014 Martin Ueding <dev@martin-ueding.de>
# Licensed under The Lesser GNU Public License Version 2 (or later)
from setuptools import setup, find_packages
setup(
author="David Pine",
description="Least squares linear fit for numpy library of Python",
license="LGPL2",
name="linfit",
packages=find_packages(),
install_requires=[
'numpy',
],
url="https://github.com/djpine/linfit",
download_url="https://github.com/djpine/linfit",
version="2014.9.3",
)
|
djpine/linfit
|
setup.py
|
Python
|
lgpl-2.1
| 560
|
from __future__ import absolute_import
from .base import Filter
import re
from sentry.utils.data_filters import FilterStatKeys
EXTENSION_EXC_VALUES = re.compile(
'|'.join(
(
re.escape(x)
for x in (
# Random plugins/extensions
'top.GLOBALS',
# See: http://blog.errorception.com/2012/03/tale-of-unfindable-js-error.html
'originalCreateNotification',
'canvas.contentDocument',
'MyApp_RemoveAllHighlights',
'http://tt.epicplay.com',
'Can\'t find variable: ZiteReader',
'jigsaw is not defined',
'ComboSearch is not defined',
'http://loading.retry.widdit.com/',
'atomicFindClose',
# Facebook borked
'fb_xd_fragment',
# ISP "optimizing" proxy - `Cache-Control: no-transform` seems to
# reduce this. (thanks @acdha)
# See http://stackoverflow.com/questions/4113268
'bmi_SafeAddOnload',
'EBCallBackMessageReceived',
# See
# https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE
'_gCrWeb',
# See http://toolbar.conduit.com/Debveloper/HtmlAndGadget/Methods/JSInjection.aspx
'conduitPage',
# Google Search app (iOS)
# See: https://github.com/getsentry/raven-js/issues/756
'null is not an object (evaluating \'elt.parentNode\')',
# Dragon Web Extension from Nuance Communications
# See: https://forum.sentry.io/t/error-in-raven-js-plugin-setsuspendstate/481/
'plugin.setSuspendState is not a function',
)
)
),
re.I
)
EXTENSION_EXC_SOURCES = re.compile(
'|'.join(
(
# Facebook flakiness
r'graph\.facebook\.com'
# Facebook blocked
r'connect\.facebook\.net\/en_US\/all\.js',
# Woopra flakiness
r'eatdifferent\.com\.woopra-ns\.com',
r'static\.woopra\.com\/js\/woopra\.js',
# Chrome extensions
r'^chrome(?:-extension)?:\/\/',
# Cacaoweb
r'127\.0\.0\.1:4001\/isrunning',
# Other
r'webappstoolbarba\.texthelp\.com\/',
r'metrics\.itunes\.apple\.com\.edgesuite\.net\/',
# Kaspersky Protection browser extension
r'kaspersky-labs\.com',
)
),
re.I
)
class BrowserExtensionsFilter(Filter):
id = FilterStatKeys.BROWSER_EXTENSION
name = 'Filter out errors known to be caused by browser extensions'
description = 'Certain browser extensions will inject inline scripts and are known to cause errors.'
def get_exception_value(self, data):
try:
return data['sentry.interfaces.Exception']['values'][0]['value']
except (LookupError, TypeError):
return ''
def get_exception_source(self, data):
try:
return data['sentry.interfaces.Exception']['values'][0]['stacktrace']['frames'
][-1]['abs_path']
except (LookupError, TypeError):
return ''
def test(self, data):
"""
Test the exception value to determine if it looks like the error is
caused by a common browser extension.
"""
if data.get('platform') != 'javascript':
return False
exc_value = self.get_exception_value(data)
if exc_value:
if EXTENSION_EXC_VALUES.search(exc_value):
return True
exc_source = self.get_exception_source(data)
if exc_source:
if EXTENSION_EXC_SOURCES.search(exc_source):
return True
return False
|
ifduyue/sentry
|
src/sentry/filters/browser_extensions.py
|
Python
|
bsd-3-clause
| 3,977
|
import heapq
from .cfg import makeGraph, flattenDict
# Variables x and y can safely be merged when it is true that for any use of y (respectively x)
# that sees a definition of y, either there are no intervening definitions of x, or x was known
# to be equal to y *at the point of its most recent definition*
# Given this info, we greedily merge related variables, that is, those where one is assigned to the other
# to calculate which variables can be merged, we first have to build a CFG from the Java AST again
class VarInfo(object):
__slots__ = "key", "defs", "rdefs", "extracount"
def __init__(self, key):
self.key = key
self.defs = set()
self.rdefs = set()
self.extracount = 0
def priority(self):
return (len(self.defs) + self.extracount), self.key
class EqualityData(object):
def __init__(self, d=None):
# Equal values point to a representative object instance. Singletons are not represented at all for efficiency
# None represents the top value (i.e. this point has not been visited yet)
self.d = d.copy() if d is not None else None
def _newval(self): return object()
def initialize(self): #initialize to bottom value (all variables unequal)
assert(self.d is None)
self.d = {}
def handleAssign(self, var1, var2=None):
if var1 == var2:
return
if var2 is None:
if var1 in self.d:
del self.d[var1]
else:
self.d[var1] = self.d.setdefault(var2, self._newval())
assert(self.iseq(var1, var2))
def iseq(self, var1, var2):
assert(var1 != var2)
return var1 in self.d and var2 in self.d and self.d[var1] is self.d[var2]
def merge_update(self, other):
if other.d is None:
return
elif self.d is None:
self.d = other.d.copy()
else:
d1, d2 = self.d, other.d
new = {}
todo = list(set(d1) & set(d2))
while todo:
cur = todo.pop()
matches = [k for k in todo if d1[k] is d1[cur] and d2[k] is d2[cur]]
if not matches:
continue
new[cur] = self._newval()
for k in matches:
new[k] = new[cur]
todo = [k for k in todo if k not in new]
self.d = new
def copy(self): return EqualityData(self.d)
def __eq__(self, other):
if self.d is None or other.d is None:
return self.d is other.d
if self.d == other.d:
return True
if set(self.d) != set(other.d):
return False
match = {}
for k in self.d:
if match.setdefault(self.d[k], other.d[k]) != other.d[k]:
return False
return True
def __ne__(self, other): return not self == other
def __hash__(self): raise TypeError('unhashable type')
def calcEqualityData(graph):
graph.simplify()
blocks = graph.blocks
d = {b:[EqualityData()] for b in blocks}
d[graph.entry][0].initialize()
stack = [graph.entry]
dirty = set(blocks)
while stack:
block = stack.pop()
if block not in dirty:
continue
dirty.remove(block)
cur = d[block][0].copy()
e_out = EqualityData()
del d[block][1:]
for line_t, data in block.lines:
if line_t == 'def':
cur.handleAssign(*data)
d[block].append(cur.copy())
elif line_t == 'canthrow':
e_out.merge_update(cur)
for out, successors in [(e_out, block.e_successors), (cur, block.n_successors)]:
stack += successors
for suc in successors:
old = d[suc][0].copy()
d[suc][0].merge_update(out)
if old != d[suc][0]:
dirty.add(suc)
for block in blocks:
assert(d[block][0].d is not None)
assert(not dirty)
return d
class VarMergeInfo(object):
def __init__(self, graph, methodparams, isstatic):
self.info = {}
self.final, self.unmergeable, self.external = set(), set(), set()
self.equality = None #to be calculated later
self.graph = graph
self.pending_graph_replaces = {}
self.touched_vars = set()
#initialize variables and assignment data
for var in methodparams:
self._addvar(var)
self.external.update(methodparams)
if not isstatic:
self.final.add(methodparams[0])
for block in graph.blocks:
for line_t, data in block.lines:
if line_t == 'def':
self._addassign(data[0], data[1])
for caught in block.caught_excepts:
self._addvar(caught)
self.external.add(caught)
self.unmergeable.add(caught)
#initialization helper funcs
def _addvar(self, v):
return self.info.setdefault(v, VarInfo(len(self.info)))
def _addassign(self, v1, v2):
info = self._addvar(v1)
if v2 is not None:
info.defs.add(v2)
self._addvar(v2).rdefs.add(v1)
else:
info.extracount += 1
#process helper funcs
def iseq(self, block, index, v1, v2):
return self.equality[block][index].iseq(v1, v2)
def _doGraphReplacements(self):
self.graph.replace(self.pending_graph_replaces)
self.pending_graph_replaces = {}
self.touched_vars = set()
def compat(self, v1, v2, doeq):
if v1 in self.touched_vars or v2 in self.touched_vars:
self._doGraphReplacements()
blocks = self.graph.blocks
vok = {b:3 for b in blocks} # use bitmask v1ok = 1<<0, v2ok = 1<<1
stack = [b for b in blocks if v1 in b.vars or v2 in b.vars]
while stack:
block = stack.pop()
cur = vok[block]
e_out = 3
if v1 in block.vars or v2 in block.vars:
defcount = 0
for line_t, data in block.lines:
if line_t == 'use':
if (data == v1 and not cur & 1) or (data == v2 and not cur & 2):
return False
elif line_t == 'def':
defcount += 1
if data[0] == v1 and data[1] != v1:
cur = 1
elif data[0] == v2 and data[1] != v2:
cur = 2
if doeq and self.iseq(block, defcount, v1, v2):
cur = 3
elif line_t == 'canthrow':
e_out &= cur
else:
#v1 and v2 not touched in this block, so there is nothing to do
e_out = cur
for out, successors in [(e_out, block.e_successors), (cur, block.n_successors)]:
for suc in successors:
if vok[suc] & out != vok[suc]:
stack.append(suc)
vok[suc] &= out
return True
def process(self, replace, doeq):
final, unmergeable, external = self.final, self.unmergeable, self.external
d = self.info
work_q = [(info.priority(), var) for var, info in d.items()]
heapq.heapify(work_q)
dirty = set(d) - external
while work_q:
_, cur = heapq.heappop(work_q)
if (cur in external) or cur not in dirty:
continue
dirty.remove(cur)
candidate_set = d[cur].defs - unmergeable
if len(d[cur].defs) > 1 or d[cur].extracount > 0:
candidate_set = candidate_set - final
candidates = [v for v in candidate_set if v.dtype == cur.dtype]
candidates = sorted(candidates, key=lambda v:d[v].key)
assert(cur not in candidates)
#find first candidate that is actually compatible
for parent in candidates:
if self.compat(cur, parent, doeq):
break
else:
continue #no candidates found
replace[cur] = parent
self.pending_graph_replaces[cur] = parent
self.touched_vars.add(cur)
self.touched_vars.add(parent)
infc, infp = d[cur], d[parent]
#Be careful, there could be a loop with cur in parent.defs
infc.defs.remove(parent)
infc.rdefs.discard(parent)
infp.rdefs.remove(cur)
infp.defs.discard(cur)
for var in d[cur].rdefs:
d[var].defs.remove(cur)
d[var].defs.add(parent)
heapq.heappush(work_q, (d[var].priority(), var))
for var in d[cur].defs:
d[var].rdefs.remove(cur)
d[var].rdefs.add(parent)
d[parent].defs |= d[cur].defs
d[parent].rdefs |= d[cur].rdefs
d[parent].extracount += d[cur].extracount
del d[cur]
heapq.heappush(work_q, (d[parent].priority(), parent))
dirty.add(parent)
def processMain(self, replace):
self.process(replace, False)
self._doGraphReplacements()
self.equality = calcEqualityData(self.graph)
self.process(replace, True)
###############################################################################
def mergeVariables(root, isstatic, parameters):
#first, create CFG from the Java AST
graph = makeGraph(root)
mergeinfo = VarMergeInfo(graph, parameters, isstatic)
replace = {}
mergeinfo.processMain(replace)
flattenDict(replace)
return replace
|
difcareer/Krakatau
|
Krakatau/java/mergevariables.py
|
Python
|
gpl-3.0
| 9,778
|
from Components.config import config, ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection, getConfigListEntry, ConfigSequence, ConfigYesNo
import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class DVDTitle:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.VideoType = -1
self.project = project
self.length = 0
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime, time
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
from TitleProperties import languageChoices
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
trackstring = audiotrack.format.getValue()
language = audiotrack.language.getValue()
if languageChoices.langdict.has_key(language):
trackstring += ' (' + languageChoices.langdict[language] + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.getValue()
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps
|
XTAv2/Enigma2
|
lib/python/Plugins/Extensions/DVDBurn/DVDTitle.py
|
Python
|
gpl-2.0
| 6,505
|
import re
from collections import Counter
from optparse import OptionParser
from ..feature_extractors import vocabulary, tokenizer
from ..util import file_handling as fh, defines
def prepare_data_for_rnn(datasets, min_threshold=1, n=1):
input_filename = defines.data_normalized_text_file
responses = fh.read_json(input_filename)
print "Extracting ngram tokens:"
data = {}
for key in responses.keys():
extract_ngram_tokens(key, responses[key], data, n)
output_dir = fh.makedirs(defines.data_rnn_dir)
output_filename = fh.make_filename(output_dir, get_feature_name(n, min_threshold), 'json')
fh.write_to_json(data, output_filename)
print "Counting tokens"
token_counts = Counter()
token_doc_counts = Counter()
#exclude_questions = ['Current-Industry', 'Current-Occupation', 'Past-Industry', 'Past-Occupation',
# 'drld-synthetic']
for rid in data.keys():
parts = rid.split('_')
if parts[0] in datasets:
token_counts.update(data[rid])
token_doc_counts.update(set(data[rid]))
print "Vocabulary size before pruning:", len(token_counts)
valid_tokens = [t for (t, c) in token_doc_counts.items() if c >= min_threshold]
print "Making vocabulary"
vocab = vocabulary.Vocab('', tokens_to_add=valid_tokens)
print "Vocabulary size after pruning:", len(vocab)
print "Saving vocabulary"
output_filename = fh.make_filename(output_dir, get_feature_name(n, min_threshold) + '_vocab', 'json')
fh.write_to_json(vocab.token2index, output_filename)
print "Extracting indices"
indices = {}
for rid in data.keys():
indices[rid] = vocab.get_indices(data[rid]).tolist()
print "Saving indices"
output_filename = fh.make_filename(output_dir, get_feature_name(n, min_threshold) + '_indices', 'json')
fh.write_to_json(indices, output_filename)
def get_feature_name(n, m):
return 'ngrams_n' + str(n) + '_m' + str(m) + '_rnn'
def extract_ngram_tokens(key, text, all_tokens, n):
text = text.lstrip()
text = text.rstrip()
tokens = []
sentences = tokenizer.split_sentences(text)
for s in sentences:
sent_tokens = tokenizer.make_ngrams(s, n=n)
# remove single quotes from words
sent_tokens = [t.rstrip('\'') if re.search('[a-z]', t) else t for t in sent_tokens]
sent_tokens = [t.lstrip('\'') if re.search('[a-z]', t) else t for t in sent_tokens]
tokens = tokens + sent_tokens
all_tokens[key] = tokens
def main():
usage = "%prog"
parser = OptionParser(usage=usage)
parser.add_option('-m', dest='min_doc_thresh', default=1,
help='Minimum doc count threshold for inclusion of words; default=%default')
(options, args) = parser.parse_args()
m = int(options.min_doc_thresh)
prepare_data_for_rnn(min_threshold=m)
if __name__ == '__main__':
main()
|
dallascard/guac
|
core/rnn/extract_ngram_tokens_for_rnn.py
|
Python
|
apache-2.0
| 2,934
|
from django.contrib.auth import get_user_model
from django.views.generic import TemplateView
from rest_framework import viewsets
from rest_framework.permissions import IsAdminUser, DjangoModelPermissionsOrAnonReadOnly
from .models import Catastrophe
from .serializers import UserSerializer, CatastropheSerializer
class CatastropheViewSet(viewsets.ModelViewSet):
queryset = Catastrophe.objects.all()
serializer_class = CatastropheSerializer
permission_classes = (DjangoModelPermissionsOrAnonReadOnly, )
class UserViewSet(viewsets.ModelViewSet):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
permission_classes = (IsAdminUser, )
class ClockView(TemplateView):
"""
The view for the main page -- a clock that counts down until some catastrophe has been calculated to occur.
"""
pass
|
MCGallaspy/catastrophe-clock
|
catastrophe_clock/views.py
|
Python
|
mit
| 855
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-19 09:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('street_agitation_bot', '0011_agitationevent_agitators_limit'),
]
operations = [
migrations.AddField(
model_name='agitationevent',
name='is_canceled',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='agitationevent',
name='agitators_limit',
field=models.IntegerField(blank=True, null=True),
),
]
|
Kurpilyansky/street-agitation-telegram-bot
|
street_agitation_bot/migrations/0012_auto_20170719_0929.py
|
Python
|
gpl-3.0
| 671
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import nova.context
import nova.db
import nova.flags
FLAGS = nova.flags.FLAGS
def get_test_admin_context():
return nova.context.get_admin_context()
def get_test_instance(context=None):
if not context:
context = get_test_admin_context()
test_instance = {'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '1',
'instance_type_id': '5'} # m1.small
instance_ref = nova.db.instance_create(context, test_instance)
return instance_ref
def get_test_network_info(count=1):
ipv6 = FLAGS.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0/0'
fake_ip_2 = '0.0.0.1/0'
fake_ip_3 = '0.0.0.1/0'
fake_vlan = 100
fake_bridge_interface = 'eth0'
network = {'bridge': fake,
'cidr': fake_ip,
'cidr_v6': fake_ip,
'vlan': fake_vlan,
'bridge_interface': fake_bridge_interface,
'injected': False}
mapping = {'mac': fake,
'dhcp_server': fake,
'gateway': fake,
'gateway6': fake,
'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
if ipv6:
mapping['ip6s'] = [{'ip': fake_ip},
{'ip': fake_ip_2},
{'ip': fake_ip_3}]
return [(network, mapping) for x in xrange(0, count)]
|
nii-cloud/dodai-compute
|
nova/tests/utils.py
|
Python
|
apache-2.0
| 2,170
|
# This file is covered by the GPL as part of Rubber.
# (c) Emmanuel Beffara, 2002--2006
"""
Metapost support for Rubber.
The module parses input files for dependencies and does some checkings on
Metapost's log files after the process. Is it enough?
"""
import os, os.path
import re, string
from rubber import _
from rubber.util import *
from rubber.depend import Node
from rubber.converters.latex import LogCheck
def check (source, target, context):
return prog_available('mpost')
re_input = re.compile("input\\s+(?P<file>[^\\s;]+)")
# This is very restrictive, and so is the parsing routine. FIXME?
re_mpext = re.compile("[0-9]+|mpx|log")
re_mpxerr = re.compile("% line (?P<line>[0-9]+) (?P<file>.*)$")
class MPLogCheck (LogCheck):
"""
This class adapats the LogCheck class from the main program to the case of
MetaPost log files, which are very similar to TeX log files.
"""
def __init__ (self, pwd):
LogCheck.__init__(self)
self.pwd = pwd
def read (self, name):
"""
The read() method in LogCheck checks that the log is produced by TeX,
here we check that it is produced by MetaPost.
"""
file = open(name)
line = file.readline()
if not line:
file.close()
return 1
if line.find("This is MetaPost,") == -1:
file.close()
return 1
self.lines = file.readlines()
file.close()
return 0
def continued (self, line):
"""
Messages in Metapost logs are broken at 79 characters per line, except
in some cases where there are lines of this length that are not
continued...
"""
if len(line) != 79:
return 0
return line[-3:] != "..."
def get_errors (self):
"""
Parse the Metapost log file for errors. The file has the same form as
a TeX log file, so the parser for TeX logs is used. The special case
is that of TeX errors in Metapost labels, which requires parsing
another TeX log file.
"""
for err in LogCheck.get_errors(self):
if (err["kind"] != "error"
or err["text"] != "Unable to make mpx file."):
yield err
continue
# a TeX error was found: parse mpxerr.log
log = LogCheck()
if log.read(os.path.join(self.pwd, "mpxerr.log")):
yield err
continue
# read mpxerr.tex to read line unmbers from it
tex_file = open(os.path.join(self.pwd, "mpxerr.tex"))
tex = tex_file.readlines()
tex_file.close()
# get the name of the mpxNNN.tex source
for line in log.lines:
if line[:2] == "**":
tex_src = os.path.join(".", line[2:].strip())
break
for err in log.get_errors():
if tex_src != err["file"]:
# the error is not in a Metapost source
yield err
continue
line = int(err["line"])
for shift in range(1, line + 1):
tex_line = tex[line - shift].rstrip()
m = re_mpxerr.search(tex_line)
if m:
err["line"] = int(m.group("line")) + shift - 2
err["file"] = os.path.join(self.pwd, m.group("file"))
err["pkg"] = "TeX"
yield err
break
if shift == line:
# the error is in some verbatimtex
yield err
class Dep (Node):
"""
This class represents dependency nodes for MetaPost figures. The __init__
method simply creates one node for the figures and one leaf node for all
sources.
"""
def __init__ (self, set, target, source, context):
sources = []
self.cmd_pwd = os.path.dirname(source)
self.include(os.path.basename(source), sources)
msg.log(_("%s is made from %r") % (target, sources))
Node.__init__(self, set, [target], sources)
self.env = context['_environment']
self.base = source[:-3]
self.cmd = ["mpost", "\\batchmode;input %s" %
os.path.basename(self.base)]
if self.env.path == [""]:
self.penv = {}
else:
path = string.join(self.env.path, ":")
self.penv = {
"TEXINPUTS": "%s:%s" % (path, os.getenv("TEXINPUTS", "")),
"MPINPUTS": "%s:%s" % (path, os.getenv("MPINPUTS", "")) }
self.log = None
def include (self, source, list):
"""
This function tries to find a specified MetaPost source (currently all
in the same directory), appends its actual name to the list, and
parses it to find recursively included files.
"""
file = os.path.normpath(os.path.join(self.cmd_pwd, source))
if os.path.exists(file + ".mp"):
file = file + ".mp"
elif not os.path.exists(file):
return
list.append(file)
fd = open(file)
for line in fd.readlines():
m = re_input.search(line)
if m:
self.include(m.group("file"), list)
fd.close()
def run (self):
"""
Run Metapost from the source file's directory, so that figures are put
next to their source file.
"""
msg.progress(_("running Metapost on %s") %
msg.simplify(self.base + ".mp"))
if self.env.execute(self.cmd, self.penv, pwd=self.cmd_pwd, kpse=1) == 0:
return True
# This creates a log file that has the same aspect as TeX logs.
self.log = MPLogCheck(self.cmd_pwd)
if self.log.read(self.base + ".log"):
msg.error(_(
"I can't read MetaPost's log file, this is wrong."))
return False
return not self.log.errors()
def get_errors (self):
"""
Report the errors from the last compilation.
"""
return self.log.get_errors()
def clean (self):
"""
This method removes all the files that the Metapost compilation may
have created. It is required because the compilation may produce more
files than just the figures used by the document.
"""
base = self.base + "."
ln = len(base)
dir = os.path.dirname(base)
if dir == "":
list = os.listdir(".")
else:
list = os.listdir(dir)
for f in list:
file = os.path.join(dir, f)
if file[:ln] == base:
ext = file[ln:]
m = re_mpext.match(ext)
if m and ext[m.end():] == "":
msg.log(_("removing %s") % file)
os.unlink(file)
# The `files' dictionary associates dependency nodes to MetaPost sources. It
# is used to detect when several figures from the same source are included. It
# uses a global variable, and this is authentically BAD. Therefore it deserves
# a big fat huge FIXME. Graphics modules should proably be encapsuled in
# classes the same way as standard Rubber modules, that would help for this
# problem.
files = {}
def convert (source, target, context, set):
if source in files:
dep = files[source]
dep.add_product(target)
else:
dep = Dep(set, target, source, context)
files[source] = dep
return dep
|
sre/rubber
|
src/converters/mpost.py
|
Python
|
gpl-2.0
| 6,296
|
import json
import urllib.request
class AppURLopener(urllib.request.FancyURLopener):
version = "App/1.7"
def search_song_lyrics(artist, song_name):
try:
opener = AppURLopener()
response = opener.open(
'http://api.lyricsnmusic.com/songs?' +
'api_key=d232f509b3d2f6a11fab7fa07d38ba&artist=' +
artist + '&track=' + song_name)\
.read().\
decode('utf-8')
json_obj = json.loads(response)
if json_obj:
found_artist = json_obj[0]['artist']['name']
found_song = json_obj[0]['title']
snippet_lyrics = json_obj[0]['snippet']
full_lyrics_link = json_obj[0]['url']
song_found = "{} {}\n".format(found_artist, found_song)
return "{}\n{}\n\nSee full lyrics?".format(
song_found, snippet_lyrics), full_lyrics_link
else:
return "Lyrics not found!", ""
except:
return "Lyrics not found!", ""
|
SimeonRolev/RolevPlayerQT
|
RolevPlayer/RequestLyrics.py
|
Python
|
gpl-3.0
| 1,000
|
"""
Tests for ExtraFieldsTransformer.
"""
from django.test import override_settings
# pylint: disable=protected-access
from openedx.core.djangoapps.content.block_structure.factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import SampleCourseFactory # lint-amnesty, pylint: disable=wrong-import-order
from ..extra_fields import ExtraFieldsTransformer
@override_settings(COURSE_BLOCKS_API_EXTRA_FIELDS=[('course', 'other_course_settings')])
class TestExtraFieldsTransformer(ModuleStoreTestCase):
"""
Test proper behavior for ExtraFieldsTransformer
"""
shard = 4
OTHER_COURSE_SETTINGS_DEFAULT = {
'test key': 'test value',
'jackson 5': [
['a', 'b', 'c'],
'it\'s easy as',
[1, 2, 3],
'as simple as',
['do', 're', 'mi']
]
}
def setUp(self):
super().setUp()
self.course = SampleCourseFactory.create(
other_course_settings=self.OTHER_COURSE_SETTINGS_DEFAULT
)
self.course_key = self.course.id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def test_transform(self):
# collect phase
ExtraFieldsTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields()
# transform phase
ExtraFieldsTransformer().transform(
usage_info=None,
block_structure=self.block_structure,
)
block_data = self.block_structure.get_transformer_block_data(
self.course_usage_key, ExtraFieldsTransformer,
)
assert block_data.other_course_settings == self.OTHER_COURSE_SETTINGS_DEFAULT
|
eduNEXT/edx-platform
|
lms/djangoapps/course_api/blocks/transformers/tests/test_extra_fields.py
|
Python
|
agpl-3.0
| 1,963
|
# -*- coding: utf-8 *-*
# Copyright (c) 2013 Tisserant Pierre
#
# This file is part of Dragon dice simulator.
#
# Dragon dice simulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragon dice simulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Dragon dice simulator. If not, see <http://www.gnu.org/licenses/>.
from business.dice.face import SpecialOnMelee
from business.effect import InflictNoRetaliationEffect
class Surprise(SpecialOnMelee):
@property
def name(self):
return 'Surprise'
def get_special(self):
return InflictNoRetaliationEffect(1)
|
TheLazyHase/dragon_dice_simulator
|
business/dice/face/special_on_melee/surprise.py
|
Python
|
gpl-3.0
| 1,092
|
#!/usr/bin/python
#
# Urwid container widget classes
# Copyright (C) 2004-2012 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from itertools import chain, repeat
from urwid.util import is_mouse_press
from urwid.widget import (Widget, Divider, FLOW, FIXED, PACK, BOX, WidgetWrap,
GIVEN, WEIGHT, LEFT, RIGHT, RELATIVE, TOP, BOTTOM, CLIP, RELATIVE_100)
from urwid.decoration import (Padding, Filler, calculate_left_right_padding,
calculate_top_bottom_filler, normalize_align, normalize_width,
normalize_valign, normalize_height, simplify_align, simplify_width,
simplify_valign, simplify_height)
from urwid.monitored_list import MonitoredList, MonitoredFocusList
from urwid.canvas import (CompositeCanvas, CanvasOverlay, CanvasCombine,
SolidCanvas, CanvasJoin)
class WidgetContainerMixin(object):
"""
Mixin class for widget containers implementing common container methods
"""
def __getitem__(self, position):
"""
Container short-cut for self.contents[position][0].base_widget
which means "give me the child widget at position without any
widget decorations".
This allows for concise traversal of nested container widgets
such as:
my_widget[position0][position1][position2] ...
"""
return self.contents[position][0].base_widget
def get_focus_path(self):
"""
Return the .focus_position values starting from this container
and proceeding along each child widget until reaching a leaf
(non-container) widget.
"""
out = []
w = self
while True:
try:
p = w.focus_position
except IndexError:
return out
out.append(p)
w = w.focus.base_widget
def set_focus_path(self, positions):
"""
Set the .focus_position property starting from this container
widget and proceeding along newly focused child widgets. Any
failed assignment due do incompatible position types or invalid
positions will raise an IndexError.
This method may be used to restore a particular widget to the
focus by passing in the value returned from an earlier call to
get_focus_path().
positions -- sequence of positions
"""
w = self
for p in positions:
if p != w.focus_position:
w.focus_position = p # modifies w.focus
w = w.focus.base_widget
def get_focus_widgets(self):
"""
Return the .focus values starting from this container
and proceeding along each child widget until reaching a leaf
(non-container) widget.
Note that the list does not contain the topmost container widget
(i.e, on which this method is called), but does include the
lowest leaf widget.
"""
out = []
w = self
while True:
w = w.base_widget.focus
if w is None:
return out
out.append(w)
class WidgetContainerListContentsMixin(object):
"""
Mixin class for widget containers whose positions are indexes into
a list available as self.contents.
"""
def __iter__(self):
"""
Return an iterable of positions for this container from first
to last.
"""
return xrange(len(self.contents))
def __reversed__(self):
"""
Return an iterable of positions for this container from last
to first.
"""
return xrange(len(self.contents) - 1, -1, -1)
class GridFlowError(Exception):
pass
class GridFlow(WidgetWrap, WidgetContainerMixin, WidgetContainerListContentsMixin):
"""
The GridFlow widget is a flow widget that renders all the widgets it
contains the same width and it arranges them from left to right and top to
bottom.
"""
def sizing(self):
return frozenset([FLOW])
def __init__(self, cells, cell_width, h_sep, v_sep, align):
"""
:param cells: list of flow widgets to display
:param cell_width: column width for each cell
:param h_sep: blank columns between each cell horizontally
:param v_sep: blank rows between cells vertically
(if more than one row is required to display all the cells)
:param align: horizontal alignment of cells, one of\:
'left', 'center', 'right', ('relative', percentage 0=left 100=right)
"""
self._contents = MonitoredFocusList([
(w, (GIVEN, cell_width)) for w in cells])
self._contents.set_modified_callback(self._invalidate)
self._contents.set_focus_changed_callback(lambda f: self._invalidate())
self._contents.set_validate_contents_modified(self._contents_modified)
self._cell_width = cell_width
self.h_sep = h_sep
self.v_sep = v_sep
self.align = align
self._cache_maxcol = None
self.__super.__init__(None)
# set self._w to something other than None
self.get_display_widget(((h_sep+cell_width)*len(cells),))
def _invalidate(self):
self._cache_maxcol = None
self.__super._invalidate()
def _contents_modified(self, slc, new_items):
for item in new_items:
try:
w, (t, n) = item
if t != GIVEN:
raise ValueError
except (TypeError, ValueError):
raise GridFlowError("added content invalid %r" % (item,))
def _get_cells(self):
ml = MonitoredList(w for w, t in self.contents)
def user_modified():
self._set_cells(ml)
ml.set_modified_callback(user_modified)
return ml
def _set_cells(self, widgets):
focus_position = self.focus_position
self.contents = [
(new, (GIVEN, self._cell_width)) for new in widgets]
if focus_position < len(widgets):
self.focus_position = focus_position
cells = property(_get_cells, _set_cells, doc="""
A list of the widgets in this GridFlow
.. note:: only for backwards compatibility. You should use the new
use the new standard container property :attr:`contents` to modify
GridFlow contents.
""")
def _get_cell_width(self):
return self._cell_width
def _set_cell_width(self, width):
focus_position = self.focus_position
self.contents = [
(w, (GIVEN, width)) for (w, options) in self.contents]
self.focus_position = focus_position
self._cell_width = width
cell_width = property(_get_cell_width, _set_cell_width, doc="""
The width of each cell in the GridFlow. Setting this value affects
all cells.
""")
def _get_contents(self):
return self._contents
def _set_contents(self, c):
self._contents[:] = c
contents = property(_get_contents, _set_contents, doc="""
The contents of this GridFlow as a list of (widget, options)
tuples.
options is currently a tuple in the form `('fixed', number)`.
number is the number of screen columns to allocate to this cell.
'fixed' is the only type accepted at this time.
This list may be modified like a normal list and the GridFlow
widget will update automatically.
.. seealso:: Create new options tuples with the :meth:`options` method.
""")
def options(self, width_type=GIVEN, width_amount=None):
"""
Return a new options tuple for use in a GridFlow's .contents list.
width_type -- 'given' is the only value accepted
width_amount -- None to use the default cell_width for this GridFlow
"""
if width_type != GIVEN:
raise GridFlowError("invalid width_type: %r" % (width_type,))
if width_amount is None:
width_amount = self._cell_width
return (width_type, width_amount)
def set_focus(self, cell):
"""
Set the cell in focus, for backwards compatibility.
.. note:: only for backwards compatibility. You may also use the new
standard container property :attr:`focus_position` to get the focus.
:param cell: contained element to focus
:type cell: Widget or int
"""
if isinstance(cell, int):
return self._set_focus_position(cell)
return self._set_focus_cell(cell)
def get_focus(self):
"""
Return the widget in focus, for backwards compatibility.
.. note:: only for backwards compatibility. You may also use the new
standard container property :attr:`focus` to get the focus.
"""
if not self.contents:
return None
return self.contents[self.focus_position][0]
focus = property(get_focus,
doc="the child widget in focus or None when GridFlow is empty")
def _set_focus_cell(self, cell):
for i, (w, options) in enumerate(self.contents):
if cell == w:
self.focus_position = i
return
raise ValueError("Widget not found in GridFlow contents: %r" % (cell,))
focus_cell = property(get_focus, _set_focus_cell, doc="""
The widget in focus, for backwards compatibility.
.. note:: only for backwards compatibility. You should use the new
use the new standard container property :attr:`focus` to get the
widget in focus and :attr:`focus_position` to get/set the cell in
focus by index.
""")
def _get_focus_position(self):
"""
Return the index of the widget in focus or None if this GridFlow is
empty.
"""
if not self.contents:
raise IndexError, "No focus_position, GridFlow is empty"
return self.contents.focus
def _set_focus_position(self, position):
"""
Set the widget in focus.
position -- index of child widget to be made focus
"""
try:
if position < 0 or position >= len(self.contents):
raise IndexError
except (TypeError, IndexError):
raise IndexError, "No GridFlow child widget at position %s" % (position,)
self.contents.focus = position
focus_position = property(_get_focus_position, _set_focus_position, doc="""
index of child widget in focus. Raises :exc:`IndexError` if read when
GridFlow is empty, or when set to an invalid index.
""")
def get_display_widget(self, size):
"""
Arrange the cells into columns (and possibly a pile) for
display, input or to calculate rows, and update the display
widget.
"""
(maxcol,) = size
# use cache if possible
if self._cache_maxcol == maxcol:
return self._w
self._cache_maxcol = maxcol
self._w = self.generate_display_widget(size)
return self._w
def generate_display_widget(self, size):
"""
Actually generate display widget (ignoring cache)
"""
(maxcol,) = size
divider = Divider()
if not self.contents:
return divider
if self.v_sep > 1:
# increase size of divider
divider.top = self.v_sep-1
c = None
p = Pile([])
used_space = 0
for i, (w, (width_type, width_amount)) in enumerate(self.contents):
if c is None or maxcol - used_space < width_amount:
# starting a new row
if self.v_sep:
p.contents.append((divider, p.options()))
c = Columns([], self.h_sep)
pad = Padding(c, self.align)
# extra attribute to reference contents position
pad.first_position = i
p.contents.append((pad, p.options()))
c.contents.append((w, c.options(GIVEN, width_amount)))
if i == self.focus_position:
c.focus_position = len(c.contents) - 1
p.focus_position = len(p.contents) - 1
used_space = (sum(x[1][1] for x in c.contents) +
self.h_sep * len(c.contents))
if width_amount > maxcol:
# special case: display is too narrow for the given
# width so we remove the Columns for better behaviour
# FIXME: determine why this is necessary
pad.original_widget=w
pad.width = used_space - self.h_sep
return p
def _set_focus_from_display_widget(self):
"""
Set the focus to the item in focus in the display widget.
"""
# display widget (self._w) is always built as:
#
# Pile([
# Padding(
# Columns([ # possibly
# cell, ...])),
# Divider(), # possibly
# ...])
pile_focus = self._w.focus
if not pile_focus:
return
c = pile_focus.base_widget
if c.focus:
col_focus_position = c.focus_position
else:
col_focus_position = 0
# pad.first_position was set by generate_display_widget() above
self.focus_position = pile_focus.first_position + col_focus_position
def keypress(self, size, key):
"""
Pass keypress to display widget for handling.
Captures focus changes.
"""
self.get_display_widget(size)
key = self.__super.keypress(size, key)
if key is None:
self._set_focus_from_display_widget()
return key
def rows(self, size, focus=False):
self.get_display_widget(size)
return self.__super.rows(size, focus=focus)
def render(self, size, focus=False ):
self.get_display_widget(size)
return self.__super.render(size, focus)
def get_cursor_coords(self, size):
"""Get cursor from display widget."""
self.get_display_widget(size)
return self.__super.get_cursor_coords(size)
def move_cursor_to_coords(self, size, col, row):
"""Set the widget in focus based on the col + row."""
self.get_display_widget(size)
rval = self.__super.move_cursor_to_coords(size, col, row)
self._set_focus_from_display_widget()
return rval
def mouse_event(self, size, event, button, col, row, focus):
self.get_display_widget(size)
self.__super.mouse_event(size, event, button, col, row, focus)
self._set_focus_from_display_widget()
return True # at a minimum we adjusted our focus
def get_pref_col(self, size):
"""Return pref col from display widget."""
self.get_display_widget(size)
return self.__super.get_pref_col(size)
class OverlayError(Exception):
pass
class Overlay(Widget, WidgetContainerMixin, WidgetContainerListContentsMixin):
"""
Overlay contains two box widgets and renders one on top of the other
"""
_selectable = True
_sizing = frozenset([BOX])
_DEFAULT_BOTTOM_OPTIONS = (
LEFT, None, RELATIVE, 100, None, 0, 0,
TOP, None, RELATIVE, 100, None, 0, 0)
def __init__(self, top_w, bottom_w, align, width, valign, height,
min_width=None, min_height=None, left=0, right=0, top=0, bottom=0):
"""
:param top_w: a flow, box or fixed widget to overlay "on top"
:type top_w: Widget
:param bottom_w: a box widget to appear "below" previous widget
:type bottom_w: Widget
:param align: alignment, one of ``'left'``, ``'center'``, ``'right'`` or
(``'relative'``, *percentage* 0=left 100=right)
:type align: str
:param width: width type, one of:
``'pack'``
if *top_w* is a fixed widget
*given width*
integer number of columns wide
(``'relative'``, *percentage of total width*)
make *top_w* width related to container width
:param valign: alignment mode, one of ``'top'``, ``'middle'``, ``'bottom'`` or
(``'relative'``, *percentage* 0=top 100=bottom)
:param height: one of:
``'pack'``
if *top_w* is a flow or fixed widget
*given height*
integer number of rows high
(``'relative'``, *percentage of total height*)
make *top_w* height related to container height
:param min_width: the minimum number of columns for *top_w* when width
is not fixed
:type min_width: int
:param min_height: minimum number of rows for *top_w* when height
is not fixed
:type min_height: int
:param left: a fixed number of columns to add on the left
:type left: int
:param right: a fixed number of columns to add on the right
:type right: int
:param top: a fixed number of rows to add on the top
:type top: int
:param bottom: a fixed number of rows to add on the bottom
:type bottom: int
Overlay widgets behave similarly to :class:`Padding` and :class:`Filler`
widgets when determining the size and position of *top_w*. *bottom_w* is
always rendered the full size available "below" *top_w*.
"""
self.__super.__init__()
self.top_w = top_w
self.bottom_w = bottom_w
self.set_overlay_parameters(align, width, valign, height,
min_width, min_height, left, right, top, bottom)
def options(self, align_type, align_amount, width_type, width_amount,
valign_type, valign_amount, height_type, height_amount,
min_width=None, min_height=None, left=0, right=0, top=0, bottom=0):
"""
Return a new options tuple for use in this Overlay's .contents mapping.
This is the common container API to create options for replacing the
top widget of this Overlay. It is provided for completeness
but is not necessarily the easiest way to change the overlay parameters.
See also :meth:`.set_overlay_parameters`
"""
return (align_type, align_amount, width_type, width_amount,
min_width, left, right, valign_type, valign_amount,
height_type, height_amount, min_height, top, bottom)
def set_overlay_parameters(self, align, width, valign, height,
min_width=None, min_height=None, left=0, right=0, top=0, bottom=0):
"""
Adjust the overlay size and position parameters.
See :class:`__init__() <Overlay>` for a description of the parameters.
"""
# convert obsolete parameters 'fixed ...':
if isinstance(align, tuple):
if align[0] == 'fixed left':
left = align[1]
align = LEFT
elif align[0] == 'fixed right':
right = align[1]
align = RIGHT
if isinstance(width, tuple):
if width[0] == 'fixed left':
left = width[1]
width = RELATIVE_100
elif width[0] == 'fixed right':
right = width[1]
width = RELATIVE_100
if isinstance(valign, tuple):
if valign[0] == 'fixed top':
top = valign[1]
valign = TOP
elif valign[0] == 'fixed bottom':
bottom = valign[1]
valign = BOTTOM
if isinstance(height, tuple):
if height[0] == 'fixed bottom':
bottom = height[1]
height = RELATIVE_100
elif height[0] == 'fixed top':
top = height[1]
height = RELATIVE_100
if width is None: # more obsolete values accepted
width = PACK
if height is None:
height = PACK
align_type, align_amount = normalize_align(align, OverlayError)
width_type, width_amount = normalize_width(width, OverlayError)
valign_type, valign_amount = normalize_valign(valign, OverlayError)
height_type, height_amount = normalize_height(height, OverlayError)
if height_type in (GIVEN, PACK):
min_height = None
# use container API to set the parameters
self.contents[1] = (self.top_w, self.options(
align_type, align_amount, width_type, width_amount,
valign_type, valign_amount, height_type, height_amount,
min_width, min_height, left, right, top, bottom))
def selectable(self):
"""Return selectable from top_w."""
return self.top_w.selectable()
def keypress(self, size, key):
"""Pass keypress to top_w."""
return self.top_w.keypress(self.top_w_size(size,
*self.calculate_padding_filler(size, True)), key)
def _get_focus(self):
"""
Currently self.top_w is always the focus of an Overlay
"""
return self.top_w
focus = property(_get_focus,
doc="the top widget in this overlay is always in focus")
def _get_focus_position(self):
"""
Return the top widget position (currently always 1).
"""
return 1
def _set_focus_position(self, position):
"""
Set the widget in focus. Currently only position 0 is accepted.
position -- index of child widget to be made focus
"""
if position != 1:
raise IndexError, ("Overlay widget focus_position currently "
"must always be set to 1, not %s" % (position,))
focus_position = property(_get_focus_position, _set_focus_position,
doc="index of child widget in focus, currently always 1")
def _contents(self):
class OverlayContents(object):
def __len__(inner_self):
return 2
__getitem__ = self._contents__getitem__
__setitem__ = self._contents__setitem__
return OverlayContents()
def _contents__getitem__(self, index):
if index == 0:
return (self.bottom_w, self._DEFAULT_BOTTOM_OPTIONS)
if index == 1:
return (self.top_w, (
self.align_type, self.align_amount,
self.width_type, self.width_amount,
self.min_width, self.left,
self.right, self.valign_type, self.valign_amount,
self.height_type, self.height_amount,
self.min_height, self.top, self.bottom))
raise IndexError("Overlay.contents has no position %r"
% (index,))
def _contents__setitem__(self, index, value):
try:
value_w, value_options = value
except (ValueError, TypeError):
raise OverlayError("added content invalid: %r" % (value,))
if index == 0:
if value_options != self._DEFAULT_BOTTOM_OPTIONS:
raise OverlayError("bottom_options must be set to "
"%r" % (self._DEFAULT_BOTTOM_OPTIONS,))
self.bottom_w = value_w
elif index == 1:
try:
(align_type, align_amount, width_type, width_amount,
min_width, left, right, valign_type, valign_amount,
height_type, height_amount, min_height, top, bottom,
) = value_options
except (ValueError, TypeError):
raise OverlayError("top_options is invalid: %r"
% (value_options,))
# normalize first, this is where errors are raised
align_type, align_amount = normalize_align(
simplify_align(align_type, align_amount), OverlayError)
width_type, width_amount = normalize_width(
simplify_width(width_type, width_amount), OverlayError)
valign_type, valign_amoun = normalize_valign(
simplify_valign(valign_type, valign_amount), OverlayError)
height_type, height_amount = normalize_height(
simplify_height(height_type, height_amount), OverlayError)
self.align_type = align_type
self.align_amount = align_amount
self.width_type = width_type
self.width_amount = width_amount
self.valign_type = valign_type
self.valign_amount = valign_amount
self.height_type = height_type
self.height_amount = height_amount
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.min_width = min_width
self.min_height = min_height
else:
raise IndexError("Overlay.contents has no position %r"
% (index,))
self._invalidate()
contents = property(_contents, doc="""
a list-like object similar to::
[(bottom_w, bottom_options)),
(top_w, top_options)]
This object may be used to read or update top and bottom widgets and
top widgets's options, but no widgets may be added or removed.
`top_options` takes the form
`(align_type, align_amount, width_type, width_amount, min_width, left,
right, valign_type, valign_amount, height_type, height_amount,
min_height, top, bottom)`
bottom_options is always
`('left', None, 'relative', 100, None, 0, 0,
'top', None, 'relative', 100, None, 0, 0)`
which means that bottom widget always covers the full area of the Overlay.
writing a different value for `bottom_options` raises an
:exc:`OverlayError`.
""")
def get_cursor_coords(self, size):
"""Return cursor coords from top_w, if any."""
if not hasattr(self.top_w, 'get_cursor_coords'):
return None
(maxcol, maxrow) = size
left, right, top, bottom = self.calculate_padding_filler(size,
True)
x, y = self.top_w.get_cursor_coords(
(maxcol-left-right, maxrow-top-bottom) )
if y >= maxrow: # required??
y = maxrow-1
return x+left, y+top
def calculate_padding_filler(self, size, focus):
"""Return (padding left, right, filler top, bottom)."""
(maxcol, maxrow) = size
height = None
if self.width_type == PACK:
width, height = self.top_w.pack((),focus=focus)
if not height:
raise OverlayError("fixed widget must have a height")
left, right = calculate_left_right_padding(maxcol,
self.align_type, self.align_amount, CLIP, width,
None, self.left, self.right)
else:
left, right = calculate_left_right_padding(maxcol,
self.align_type, self.align_amount,
self.width_type, self.width_amount,
self.min_width, self.left, self.right)
if height:
# top_w is a fixed widget
top, bottom = calculate_top_bottom_filler(maxrow,
self.valign_type, self.valign_amount,
GIVEN, height, None, self.top, self.bottom)
if maxrow-top-bottom < height:
bottom = maxrow-top-height
elif self.height_type == PACK:
# top_w is a flow widget
height = self.top_w.rows((maxcol,),focus=focus)
top, bottom = calculate_top_bottom_filler(maxrow,
self.valign_type, self.valign_amount,
GIVEN, height, None, self.top, self.bottom)
if height > maxrow: # flow widget rendered too large
bottom = maxrow - height
else:
top, bottom = calculate_top_bottom_filler(maxrow,
self.valign_type, self.valign_amount,
self.height_type, self.height_amount,
self.min_height, self.top, self.bottom)
return left, right, top, bottom
def top_w_size(self, size, left, right, top, bottom):
"""Return the size to pass to top_w."""
if self.width_type == PACK:
# top_w is a fixed widget
return ()
maxcol, maxrow = size
if self.width_type != PACK and self.height_type == PACK:
# top_w is a flow widget
return (maxcol-left-right,)
return (maxcol-left-right, maxrow-top-bottom)
def render(self, size, focus=False):
"""Render top_w overlayed on bottom_w."""
left, right, top, bottom = self.calculate_padding_filler(size,
focus)
bottom_c = self.bottom_w.render(size)
if not bottom_c.cols() or not bottom_c.rows():
return CompositeCanvas(bottom_c)
top_c = self.top_w.render(
self.top_w_size(size, left, right, top, bottom), focus)
top_c = CompositeCanvas(top_c)
if left < 0 or right < 0:
top_c.pad_trim_left_right(min(0, left), min(0, right))
if top < 0 or bottom < 0:
top_c.pad_trim_top_bottom(min(0, top), min(0, bottom))
return CanvasOverlay(top_c, bottom_c, left, top)
def mouse_event(self, size, event, button, col, row, focus):
"""Pass event to top_w, ignore if outside of top_w."""
if not hasattr(self.top_w, 'mouse_event'):
return False
left, right, top, bottom = self.calculate_padding_filler(size,
focus)
maxcol, maxrow = size
if ( col<left or col>=maxcol-right or
row<top or row>=maxrow-bottom ):
return False
return self.top_w.mouse_event(
self.top_w_size(size, left, right, top, bottom),
event, button, col-left, row-top, focus )
class FrameError(Exception):
pass
class Frame(Widget, WidgetContainerMixin):
"""
Frame widget is a box widget with optional header and footer
flow widgets placed above and below the box widget.
.. note:: The main difference between a Frame and a :class:`Pile` widget
defined as: `Pile([('pack', header), body, ('pack', footer)])` is that
the Frame will not automatically change focus up and down in response to
keystrokes.
"""
_selectable = True
_sizing = frozenset([BOX])
def __init__(self, body, header=None, footer=None, focus_part='body'):
"""
:param body: a box widget for the body of the frame
:type body: Widget
:param header: a flow widget for above the body (or None)
:type header: Widget
:param footer: a flow widget for below the body (or None)
:type footer: Widget
:param focus_part: 'header', 'footer' or 'body'
:type focus_part: str
"""
self.__super.__init__()
self._header = header
self._body = body
self._footer = footer
self.focus_part = focus_part
def get_header(self):
return self._header
def set_header(self, header):
self._header = header
if header is None and self.focus_part == 'header':
self.focus_part = 'body'
self._invalidate()
header = property(get_header, set_header)
def get_body(self):
return self._body
def set_body(self, body):
self._body = body
self._invalidate()
body = property(get_body, set_body)
def get_footer(self):
return self._footer
def set_footer(self, footer):
self._footer = footer
if footer is None and self.focus_part == 'footer':
self.focus_part = 'body'
self._invalidate()
footer = property(get_footer, set_footer)
def set_focus(self, part):
"""
Determine which part of the frame is in focus.
.. note:: included for backwards compatibility. You should rather use
the container property :attr:`.focus_position` to set this value.
:param part: 'header', 'footer' or 'body'
:type part: str
"""
if part not in ('header', 'footer', 'body'):
raise IndexError, 'Invalid position for Frame: %s' % (part,)
if (part == 'header' and self._header is None) or (
part == 'footer' and self._footer is None):
raise IndexError, 'This Frame has no %s' % (part,)
self.focus_part = part
self._invalidate()
def get_focus(self):
"""
Return an indicator which part of the frame is in focus
.. note:: included for backwards compatibility. You should rather use
the container property :attr:`.focus_position` to get this value.
:returns: one of 'header', 'footer' or 'body'.
:rtype: str
"""
return self.focus_part
def _get_focus(self):
return {
'header': self._header,
'footer': self._footer,
'body': self._body
}[self.focus_part]
focus = property(_get_focus, doc="""
child :class:`Widget` in focus: the body, header or footer widget.
This is a read-only property.""")
focus_position = property(get_focus, set_focus, doc="""
writeable property containing an indicator which part of the frame
that is in focus: `'body', 'header'` or `'footer'`.
""")
def _contents(self):
class FrameContents(object):
def __len__(inner_self):
return len(inner_self.keys())
def items(inner_self):
return [(k, inner_self[k]) for k in inner_self.keys()]
def values(inner_self):
return [inner_self[k] for k in inner_self.keys()]
def update(inner_self, E=None, **F):
if E:
keys = getattr(E, 'keys', None)
if keys:
for k in E:
inner_self[k] = E[k]
else:
for k, v in E:
inner_self[k] = v
for k in F:
inner_self[k] = F[k]
keys = self._contents_keys
__getitem__ = self._contents__getitem__
__setitem__ = self._contents__setitem__
__delitem__ = self._contents__delitem__
return FrameContents()
def _contents_keys(self):
keys = ['body']
if self._header:
keys.append('header')
if self._footer:
keys.append('footer')
return keys
def _contents__getitem__(self, key):
if key == 'body':
return (self._body, None)
if key == 'header' and self._header:
return (self._header, None)
if key == 'footer' and self._footer:
return (self._footer, None)
raise KeyError("Frame.contents has no key: %r" % (key,))
def _contents__setitem__(self, key, value):
if key not in ('body', 'header', 'footer'):
raise KeyError("Frame.contents has no key: %r" % (key,))
try:
value_w, value_options = value
if value_options is not None:
raise ValueError
except (ValueError, TypeError):
raise FrameError("added content invalid: %r" % (value,))
if key == 'body':
self.body = value_w
elif key == 'footer':
self.footer = value_w
else:
self.header = value_w
def _contents__delitem__(self, key):
if key not in ('header', 'footer'):
raise KeyError("Frame.contents can't remove key: %r" % (key,))
if (key == 'header' and self._header is None
) or (key == 'footer' and self._footer is None):
raise KeyError("Frame.contents has no key: %r" % (key,))
if key == 'header':
self.header = None
else:
self.footer = None
contents = property(_contents, doc="""
a dict-like object similar to::
{
'body': (body_widget, None),
'header': (header_widget, None), # if frame has a header
'footer': (footer_widget, None) # if frame has a footer
}
This object may be used to read or update the contents of the Frame.
The values are similar to the the list-like .contents objects used
in other containers with (:class:`Widget`, options) tuples, but are
constrained to keys for each of the three usual parts of a Frame.
When other keys are used a :exc:`KeyError` will be raised.
Currently all options are `None`, but using the :meth:`options` method
to create the options value is recommended for forwards
compatibility.
""")
def options(self):
"""
There are currently no options for Frame contents.
Return None as a placeholder for future options.
"""
return None
def frame_top_bottom(self, size, focus):
"""
Calculate the number of rows for the header and footer.
:param size: See :meth:`Widget.render` for details
:type size: widget size
:param focus: ``True`` if this widget is in focus
:type focus: bool
:returns: `(head rows, foot rows),(orig head, orig foot)`
orig head/foot are from rows() calls.
:rtype: (int, int), (int, int)
"""
(maxcol, maxrow) = size
frows = hrows = 0
if self.header:
hrows = self.header.rows((maxcol,),
self.focus_part=='header' and focus)
if self.footer:
frows = self.footer.rows((maxcol,),
self.focus_part=='footer' and focus)
remaining = maxrow
if self.focus_part == 'footer':
if frows >= remaining:
return (0, remaining),(hrows, frows)
remaining -= frows
if hrows >= remaining:
return (remaining, frows),(hrows, frows)
elif self.focus_part == 'header':
if hrows >= maxrow:
return (remaining, 0),(hrows, frows)
remaining -= hrows
if frows >= remaining:
return (hrows, remaining),(hrows, frows)
elif hrows + frows >= remaining:
# self.focus_part == 'body'
rless1 = max(0, remaining-1)
if frows >= remaining-1:
return (0, rless1),(hrows, frows)
remaining -= frows
rless1 = max(0, remaining-1)
return (rless1,frows),(hrows, frows)
return (hrows, frows),(hrows, frows)
def render(self, size, focus=False):
(maxcol, maxrow) = size
(htrim, ftrim),(hrows, frows) = self.frame_top_bottom(
(maxcol, maxrow), focus)
combinelist = []
depends_on = []
head = None
if htrim and htrim < hrows:
head = Filler(self.header, 'top').render(
(maxcol, htrim),
focus and self.focus_part == 'header')
elif htrim:
head = self.header.render((maxcol,),
focus and self.focus_part == 'header')
assert head.rows() == hrows, "rows, render mismatch"
if head:
combinelist.append((head, 'header',
self.focus_part == 'header'))
depends_on.append(self.header)
if ftrim+htrim < maxrow:
body = self.body.render((maxcol, maxrow-ftrim-htrim),
focus and self.focus_part == 'body')
combinelist.append((body, 'body',
self.focus_part == 'body'))
depends_on.append(self.body)
foot = None
if ftrim and ftrim < frows:
foot = Filler(self.footer, 'bottom').render(
(maxcol, ftrim),
focus and self.focus_part == 'footer')
elif ftrim:
foot = self.footer.render((maxcol,),
focus and self.focus_part == 'footer')
assert foot.rows() == frows, "rows, render mismatch"
if foot:
combinelist.append((foot, 'footer',
self.focus_part == 'footer'))
depends_on.append(self.footer)
return CanvasCombine(combinelist)
def keypress(self, size, key):
"""Pass keypress to widget in focus."""
(maxcol, maxrow) = size
if self.focus_part == 'header' and self.header is not None:
if not self.header.selectable():
return key
return self.header.keypress((maxcol,),key)
if self.focus_part == 'footer' and self.footer is not None:
if not self.footer.selectable():
return key
return self.footer.keypress((maxcol,),key)
if self.focus_part != 'body':
return key
remaining = maxrow
if self.header is not None:
remaining -= self.header.rows((maxcol,))
if self.footer is not None:
remaining -= self.footer.rows((maxcol,))
if remaining <= 0: return key
if not self.body.selectable():
return key
return self.body.keypress( (maxcol, remaining), key )
def mouse_event(self, size, event, button, col, row, focus):
"""
Pass mouse event to appropriate part of frame.
Focus may be changed on button 1 press.
"""
(maxcol, maxrow) = size
(htrim, ftrim),(hrows, frows) = self.frame_top_bottom(
(maxcol, maxrow), focus)
if row < htrim: # within header
focus = focus and self.focus_part == 'header'
if is_mouse_press(event) and button==1:
if self.header.selectable():
self.set_focus('header')
if not hasattr(self.header, 'mouse_event'):
return False
return self.header.mouse_event( (maxcol,), event,
button, col, row, focus )
if row >= maxrow-ftrim: # within footer
focus = focus and self.focus_part == 'footer'
if is_mouse_press(event) and button==1:
if self.footer.selectable():
self.set_focus('footer')
if not hasattr(self.footer, 'mouse_event'):
return False
return self.footer.mouse_event( (maxcol,), event,
button, col, row-maxrow+frows, focus )
# within body
focus = focus and self.focus_part == 'body'
if is_mouse_press(event) and button==1:
if self.body.selectable():
self.set_focus('body')
if not hasattr(self.body, 'mouse_event'):
return False
return self.body.mouse_event( (maxcol, maxrow-htrim-ftrim),
event, button, col, row-htrim, focus )
def __iter__(self):
"""
Return an iterator over the positions in this Frame top to bottom.
"""
if self._header:
yield 'header'
yield 'body'
if self._footer:
yield 'footer'
def __reversed__(self):
"""
Return an iterator over the positions in this Frame bottom to top.
"""
if self._footer:
yield 'footer'
yield 'body'
if self._header:
yield 'header'
class PileError(Exception):
pass
class Pile(Widget, WidgetContainerMixin, WidgetContainerListContentsMixin):
"""
A pile of widgets stacked vertically from top to bottom
"""
_sizing = frozenset([FLOW, BOX])
def __init__(self, widget_list, focus_item=None):
"""
:param widget_list: child widgets
:type widget_list: iterable
:param focus_item: child widget that gets the focus initially.
Chooses the first selectable widget if unset.
:type focus_item: Widget or int
*widget_list* may also contain tuples such as:
(*given_height*, *widget*)
always treat *widget* as a box widget and give it *given_height* rows,
where given_height is an int
(``'pack'``, *widget*)
allow *widget* to calculate its own height by calling its :meth:`rows`
method, ie. treat it as a flow widget.
(``'weight'``, *weight*, *widget*)
if the pile is treated as a box widget then treat widget as a box
widget with a height based on its relative weight value, otherwise
treat the same as (``'pack'``, *widget*).
Widgets not in a tuple are the same as (``'weight'``, ``1``, *widget*)`
.. note:: If the Pile is treated as a box widget there must be at least
one ``'weight'`` tuple in :attr:`widget_list`.
"""
self.__super.__init__()
self._contents = MonitoredFocusList()
self._contents.set_modified_callback(self._invalidate)
self._contents.set_focus_changed_callback(lambda f: self._invalidate())
self._contents.set_validate_contents_modified(self._contents_modified)
focus_item = focus_item
for i, original in enumerate(widget_list):
w = original
if not isinstance(w, tuple):
self.contents.append((w, (WEIGHT, 1)))
elif w[0] in (FLOW, PACK):
f, w = w
self.contents.append((w, (PACK, None)))
elif len(w) == 2:
height, w = w
self.contents.append((w, (GIVEN, height)))
elif w[0] == FIXED: # backwards compatibility
_ignore, height, w = w
self.contents.append((w, (GIVEN, height)))
elif w[0] == WEIGHT:
f, height, w = w
self.contents.append((w, (f, height)))
else:
raise PileError(
"initial widget list item invalid %r" % (original,))
if focus_item is None and w.selectable():
focus_item = i
if self.contents and focus_item is not None:
self.set_focus(focus_item)
self.pref_col = 0
def _contents_modified(self, slc, new_items):
for item in new_items:
try:
w, (t, n) = item
if t not in (PACK, GIVEN, WEIGHT):
raise ValueError
except (TypeError, ValueError):
raise PileError("added content invalid: %r" % (item,))
def _get_widget_list(self):
ml = MonitoredList(w for w, t in self.contents)
def user_modified():
self._set_widget_list(ml)
ml.set_modified_callback(user_modified)
return ml
def _set_widget_list(self, widgets):
focus_position = self.focus_position
self.contents = [
(new, options) for (new, (w, options)) in zip(widgets,
# need to grow contents list if widgets is longer
chain(self.contents, repeat((None, (WEIGHT, 1)))))]
if focus_position < len(widgets):
self.focus_position = focus_position
widget_list = property(_get_widget_list, _set_widget_list, doc="""
A list of the widgets in this Pile
.. note:: only for backwards compatibility. You should use the new
standard container property :attr:`contents`.
""")
def _get_item_types(self):
ml = MonitoredList(
# return the old item type names
({GIVEN: FIXED, PACK: FLOW}.get(f, f), height)
for w, (f, height) in self.contents)
def user_modified():
self._set_item_types(ml)
ml.set_modified_callback(user_modified)
return ml
def _set_item_types(self, item_types):
focus_position = self.focus_position
self.contents = [
(w, ({FIXED: GIVEN, FLOW: PACK}.get(new_t, new_t), new_height))
for ((new_t, new_height), (w, options))
in zip(item_types, self.contents)]
if focus_position < len(item_types):
self.focus_position = focus_position
item_types = property(_get_item_types, _set_item_types, doc="""
A list of the options values for widgets in this Pile.
.. note:: only for backwards compatibility. You should use the new
standard container property :attr:`contents`.
""")
def _get_contents(self):
return self._contents
def _set_contents(self, c):
self._contents[:] = c
contents = property(_get_contents, _set_contents, doc="""
The contents of this Pile as a list of (widget, options) tuples.
options currently may be one of
(``'pack'``, ``None``)
allow widget to calculate its own height by calling its
:meth:`rows <Widget.rows>` method, i.e. treat it as a flow widget.
(``'given'``, *n*)
Always treat widget as a box widget with a given height of *n* rows.
(``'weight'``, *w*)
If the Pile itself is treated as a box widget then
the value *w* will be used as a relative weight for assigning rows
to this box widget. If the Pile is being treated as a flow
widget then this is the same as (``'pack'``, ``None``) and the *w*
value is ignored.
If the Pile itself is treated as a box widget then at least one
widget must have a (``'weight'``, *w*) options value, or the Pile will
not be able to grow to fill the required number of rows.
This list may be modified like a normal list and the Pile widget
will updated automatically.
.. seealso:: Create new options tuples with the :meth:`options` method
""")
def options(self, height_type=WEIGHT, height_amount=1):
"""
Return a new options tuple for use in a Pile's :attr:`contents` list.
:param height_type: ``'pack'``, ``'given'`` or ``'weight'``
:param height_amount: ``None`` for ``'pack'``, a number of rows for
``'fixed'`` or a weight value (number) for ``'weight'``
"""
if height_type == PACK:
return (PACK, None)
if height_type not in (GIVEN, WEIGHT):
raise PileError('invalid height_type: %r' % (height_type,))
return (height_type, height_amount)
def selectable(self):
"""Return True if the focus item is selectable."""
w = self.focus
return w is not None and w.selectable()
def set_focus(self, item):
"""
Set the item in focus, for backwards compatibility.
.. note:: only for backwards compatibility. You should use the new
standard container property :attr:`focus_position`.
to set the position by integer index instead.
:param item: element to focus
:type item: Widget or int
"""
if isinstance(item, int):
return self._set_focus_position(item)
for i, (w, options) in enumerate(self.contents):
if item == w:
self.focus_position = i
return
raise ValueError("Widget not found in Pile contents: %r" % (item,))
def get_focus(self):
"""
Return the widget in focus, for backwards compatibility. You may
also use the new standard container property .focus to get the
child widget in focus.
"""
if not self.contents:
return None
return self.contents[self.focus_position][0]
focus = property(get_focus,
doc="the child widget in focus or None when Pile is empty")
focus_item = property(get_focus, set_focus, doc="""
A property for reading and setting the widget in focus.
.. note::
only for backwards compatibility. You should use the new
standard container properties :attr:`focus` and
:attr:`focus_position` to get the child widget in focus or modify the
focus position.
""")
def _get_focus_position(self):
"""
Return the index of the widget in focus or None if this Pile is
empty.
"""
if not self.contents:
raise IndexError, "No focus_position, Pile is empty"
return self.contents.focus
def _set_focus_position(self, position):
"""
Set the widget in focus.
position -- index of child widget to be made focus
"""
try:
if position < 0 or position >= len(self.contents):
raise IndexError
except (TypeError, IndexError):
raise IndexError, "No Pile child widget at position %s" % (position,)
self.contents.focus = position
focus_position = property(_get_focus_position, _set_focus_position, doc="""
index of child widget in focus. Raises :exc:`IndexError` if read when
Pile is empty, or when set to an invalid index.
""")
def get_pref_col(self, size):
"""Return the preferred column for the cursor, or None."""
if not self.selectable():
return None
self._update_pref_col_from_focus(size)
return self.pref_col
def get_item_size(self, size, i, focus, item_rows=None):
"""
Return a size appropriate for passing to self.contents[i][0].render
"""
maxcol = size[0]
w, (f, height) = self.contents[i]
if f == GIVEN:
return (maxcol, height)
elif f == WEIGHT and len(size) == 2:
if not item_rows:
item_rows = self.get_item_rows(size, focus)
return (maxcol, item_rows[i])
else:
return (maxcol,)
def get_item_rows(self, size, focus):
"""
Return a list of the number of rows used by each widget
in self.contents
"""
remaining = None
maxcol = size[0]
if len(size) == 2:
remaining = size[1]
l = []
if remaining is None:
# pile is a flow widget
for w, (f, height) in self.contents:
if f == GIVEN:
l.append(height)
else:
l.append(w.rows((maxcol,),
focus=focus and self.focus_item == w))
return l
# pile is a box widget
# do an extra pass to calculate rows for each widget
wtotal = 0
for w, (f, height) in self.contents:
if f == PACK:
rows = w.rows((maxcol,), focus=focus and self.focus_item == w)
l.append(rows)
remaining -= rows
elif f == GIVEN:
l.append(height)
remaining -= height
else:
l.append(None)
wtotal += height
if wtotal == 0:
raise PileError, "No weighted widgets found for Pile treated as a box widget"
if remaining < 0:
remaining = 0
for i, (w, (f, height)) in enumerate(self.contents):
li = l[i]
if li is None:
rows = int(float(remaining) * height / wtotal + 0.5)
l[i] = rows
remaining -= rows
wtotal -= height
return l
def render(self, size, focus=False):
maxcol = size[0]
item_rows = None
combinelist = []
for i, (w, (f, height)) in enumerate(self.contents):
item_focus = self.focus_item == w
canv = None
if f == GIVEN:
canv = w.render((maxcol, height), focus=focus and item_focus)
elif f == PACK or len(size)==1:
canv = w.render((maxcol,), focus=focus and item_focus)
else:
if item_rows is None:
item_rows = self.get_item_rows(size, focus)
rows = item_rows[i]
if rows>0:
canv = w.render((maxcol, rows), focus=focus and item_focus)
if canv:
combinelist.append((canv, i, item_focus))
if not combinelist:
return SolidCanvas(" ", size[0], (size[1:]+(0,))[0])
out = CanvasCombine(combinelist)
if len(size) == 2 and size[1] != out.rows():
# flow/fixed widgets rendered too large/small
out = CompositeCanvas(out)
out.pad_trim_top_bottom(0, size[1] - out.rows())
return out
def get_cursor_coords(self, size):
"""Return the cursor coordinates of the focus widget."""
if not self.selectable():
return None
if not hasattr(self.focus_item, 'get_cursor_coords'):
return None
i = self.focus_position
w, (f, height) = self.contents[i]
item_rows = None
maxcol = size[0]
if f == GIVEN or (f == WEIGHT and len(size) == 2):
if f == GIVEN:
maxrow = height
else:
if item_rows is None:
item_rows = self.get_item_rows(size, focus=True)
maxrow = item_rows[i]
coords = self.focus_item.get_cursor_coords((maxcol, maxrow))
else:
coords = self.focus_item.get_cursor_coords((maxcol,))
if coords is None:
return None
x,y = coords
if i > 0:
if item_rows is None:
item_rows = self.get_item_rows(size, focus=True)
for r in item_rows[:i]:
y += r
return x, y
def rows(self, size, focus=False ):
return sum(self.get_item_rows(size, focus))
def keypress(self, size, key ):
"""Pass the keypress to the widget in focus.
Unhandled 'up' and 'down' keys may cause a focus change."""
if not self.contents:
return key
item_rows = None
if len(size) == 2:
item_rows = self.get_item_rows(size, focus=True)
i = self.focus_position
if self.selectable():
tsize = self.get_item_size(size, i, True, item_rows)
key = self.focus.keypress(tsize, key)
if self._command_map[key] not in ('cursor up', 'cursor down'):
return key
if self._command_map[key] == 'cursor up':
candidates = range(i-1, -1, -1) # count backwards to 0
else: # self._command_map[key] == 'cursor down'
candidates = range(i+1, len(self.contents))
if not item_rows:
item_rows = self.get_item_rows(size, focus=True)
for j in candidates:
if not self.contents[j][0].selectable():
continue
self._update_pref_col_from_focus(size)
self.focus_position = j
if not hasattr(self.focus, 'move_cursor_to_coords'):
return
rows = item_rows[j]
if self._command_map[key] == 'cursor up':
rowlist = range(rows-1, -1, -1)
else: # self._command_map[key] == 'cursor down'
rowlist = range(rows)
for row in rowlist:
tsize = self.get_item_size(size, j, True, item_rows)
if self.focus_item.move_cursor_to_coords(
tsize, self.pref_col, row):
break
return
# nothing to select
return key
def _update_pref_col_from_focus(self, size):
"""Update self.pref_col from the focus widget."""
if not hasattr(self.focus, 'get_pref_col'):
return
i = self.focus_position
tsize = self.get_item_size(size, i, True)
pref_col = self.focus.get_pref_col(tsize)
if pref_col is not None:
self.pref_col = pref_col
def move_cursor_to_coords(self, size, col, row):
"""Capture pref col and set new focus."""
self.pref_col = col
#FIXME guessing focus==True
focus=True
wrow = 0
item_rows = self.get_item_rows(size, focus)
for i, (r, w) in enumerate(zip(item_rows,
(w for (w, options) in self.contents))):
if wrow + r > row:
break
wrow += r
else:
return False
if not w.selectable():
return False
if hasattr(w, 'move_cursor_to_coords'):
tsize = self.get_item_size(size, i, focus, item_rows)
rval = w.move_cursor_to_coords(tsize, col, row-wrow)
if rval is False:
return False
self.focus_position = i
return True
def mouse_event(self, size, event, button, col, row, focus):
"""
Pass the event to the contained widget.
May change focus on button 1 press.
"""
wrow = 0
item_rows = self.get_item_rows(size, focus)
for i, (r, w) in enumerate(zip(item_rows,
(w for (w, options) in self.contents))):
if wrow + r > row:
break
wrow += r
focus = focus and self.focus_item == w
if is_mouse_press(event) and button == 1:
if w.selectable():
self.focus_position = i
if not hasattr(w, 'mouse_event'):
return False
tsize = self.get_item_size(size, i, focus, item_rows)
return w.mouse_event(tsize, event, button, col, row-wrow,
focus)
class ColumnsError(Exception):
pass
class Columns(Widget, WidgetContainerMixin, WidgetContainerListContentsMixin):
"""
Widgets arranged horizontally in columns from left to right
"""
_sizing = frozenset([FLOW, BOX])
def __init__(self, widget_list, dividechars=0, focus_column=None,
min_width=1, box_columns=None):
"""
:param widget_list: iterable of flow or box widgets
:param dividechars: number of blank characters between columns
:param focus_column: index into widget_list of column in focus,
if ``None`` the first selectable widget will be chosen.
:param min_width: minimum width for each column which is not
calling widget.pack() in *widget_list*.
:param box_columns: a list of column indexes containing box widgets
whose height is set to the maximum of the rows
required by columns not listed in *box_columns*.
*widget_list* may also contain tuples such as:
(*given_width*, *widget*)
make this column *given_width* screen columns wide, where *given_width*
is an int
(``'pack'``, *widget*)
call :meth:`pack() <Widget.pack>` to calculate the width of this column
(``'weight'``, *weight*, *widget*)`
give this column a relative *weight* (number) to calculate its width from the
screen columns remaining
Widgets not in a tuple are the same as (``'weight'``, ``1``, *widget*)
If the Columns widget is treated as a box widget then all children
are treated as box widgets, and *box_columns* is ignored.
If the Columns widget is treated as a flow widget then the rows
are calcualated as the largest rows() returned from all columns
except the ones listed in *box_columns*. The box widgets in
*box_columns* will be displayed with this calculated number of rows,
filling the full height.
"""
self.__super.__init__()
self._contents = MonitoredFocusList()
self._contents.set_modified_callback(self._invalidate)
self._contents.set_focus_changed_callback(lambda f: self._invalidate())
self._contents.set_validate_contents_modified(self._contents_modified)
box_columns = set(box_columns or ())
for i, original in enumerate(widget_list):
w = original
if not isinstance(w, tuple):
self.contents.append((w, (WEIGHT, 1, i in box_columns)))
elif w[0] in (FLOW, PACK): # 'pack' used to be called 'flow'
f = PACK
_ignored, w = w
self.contents.append((w, (f, None, i in box_columns)))
elif len(w) == 2:
width, w = w
self.contents.append((w, (GIVEN, width, i in box_columns)))
elif w[0] == FIXED: # backwards compatibility
f = GIVEN
_ignored, width, w = w
self.contents.append((w, (GIVEN, width, i in box_columns)))
elif w[0] == WEIGHT:
f, width, w = w
self.contents.append((w, (f, width, i in box_columns)))
else:
raise ColumnsError(
"initial widget list item invalid: %r" % (original,))
if focus_column is None and w.selectable():
focus_column = i
self.dividechars = dividechars
if self.contents and focus_column is not None:
self.focus_position = focus_column
if focus_column is None:
focus_column = 0
self.dividechars = dividechars
self.pref_col = None
self.min_width = min_width
self._cache_maxcol = None
def _contents_modified(self, slc, new_items):
for item in new_items:
try:
w, (t, n, b) = item
if t not in (PACK, GIVEN, WEIGHT):
raise ValueError
except (TypeError, ValueError):
raise ColumnsError("added content invalid %r" % (item,))
def _get_widget_list(self):
ml = MonitoredList(w for w, t in self.contents)
def user_modified():
self._set_widget_list(ml)
ml.set_modified_callback(user_modified)
return ml
def _set_widget_list(self, widgets):
focus_position = self.focus_position
self.contents = [
(new, options) for (new, (w, options)) in zip(widgets,
# need to grow contents list if widgets is longer
chain(self.contents, repeat((None, (WEIGHT, 1, False)))))]
if focus_position < len(widgets):
self.focus_position = focus_position
widget_list = property(_get_widget_list, _set_widget_list, doc="""
A list of the widgets in this Columns
.. note:: only for backwards compatibility. You should use the new
standard container property :attr:`contents`.
""")
def _get_column_types(self):
ml = MonitoredList(
# return the old column type names
({GIVEN: FIXED, PACK: FLOW}.get(t, t), n)
for w, (t, n, b) in self.contents)
def user_modified():
self._set_column_types(ml)
ml.set_modified_callback(user_modified)
return ml
def _set_column_types(self, column_types):
focus_position = self.focus_position
self.contents = [
(w, ({FIXED: GIVEN, FLOW: PACK}.get(new_t, new_t), new_n, b))
for ((new_t, new_n), (w, (t, n, b)))
in zip(column_types, self.contents)]
if focus_position < len(column_types):
self.focus_position = focus_position
column_types = property(_get_column_types, _set_column_types, doc="""
A list of the old partial options values for widgets in this Pile,
for backwards compatibility only. You should use the new standard
container property .contents to modify Pile contents.
""")
def _get_box_columns(self):
ml = MonitoredList(
i for i, (w, (t, n, b)) in enumerate(self.contents) if b)
def user_modified():
self._set_box_columns(ml)
ml.set_modified_callback(user_modified)
return ml
def _set_box_columns(self, box_columns):
box_columns = set(box_columns)
self.contents = [
(w, (t, n, i in box_columns))
for (i, (w, (t, n, b))) in enumerate(self.contents)]
box_columns = property(_get_box_columns, _set_box_columns, doc="""
A list of the indexes of the columns that are to be treated as
box widgets when the Columns is treated as a flow widget.
.. note:: only for backwards compatibility. You should use the new
standard container property :attr:`contents`.
""")
def _get_has_pack_type(self):
import warnings
warnings.warn(".has_flow_type is deprecated, "
"read values from .contents instead.", DeprecationWarning)
return PACK in self.column_types
def _set_has_pack_type(self, value):
import warnings
warnings.warn(".has_flow_type is deprecated, "
"read values from .contents instead.", DeprecationWarning)
has_flow_type = property(_get_has_pack_type, _set_has_pack_type, doc="""
.. deprecated:: 1.0 Read values from :attr:`contents` instead.
""")
def _get_contents(self):
return self._contents
def _set_contents(self, c):
self._contents[:] = c
contents = property(_get_contents, _set_contents, doc="""
The contents of this Columns as a list of `(widget, options)` tuples.
This list may be modified like a normal list and the Columns
widget will update automatically.
.. seealso:: Create new options tuples with the :meth:`options` method
""")
def options(self, width_type=WEIGHT, width_amount=1, box_widget=False):
"""
Return a new options tuple for use in a Pile's .contents list.
This sets an entry's width type: one of the following:
``'pack'``
Call the widget's :meth:`Widget.pack` method to determine how wide
this column should be. *width_amount* is ignored.
``'given'``
Make column exactly width_amount screen-columns wide.
``'weight'``
Allocate the remaining space to this column by using
*width_amount* as a weight value.
:param width_type: ``'pack'``, ``'given'`` or ``'weight'``
:param width_amount: ``None`` for ``'pack'``, a number of screen columns
for ``'given'`` or a weight value (number) for ``'weight'``
:param box_widget: set to `True` if this widget is to be treated as a box
widget when the Columns widget itself is treated as a flow widget.
:type box_widget: bool
"""
if width_type == PACK:
width_amount = None
if width_type not in (PACK, GIVEN, WEIGHT):
raise ColumnsError('invalid width_type: %r' % (width_type,))
return (width_type, width_amount, box_widget)
def _invalidate(self):
self._cache_maxcol = None
self.__super._invalidate()
def set_focus_column(self, num):
"""
Set the column in focus by its index in :attr:`widget_list`.
:param num: index of focus-to-be entry
:type num: int
.. note:: only for backwards compatibility. You may also use the new
standard container property :attr:`focus_position` to set the focus.
"""
self._set_focus_position(num)
def get_focus_column(self):
"""
Return the focus column index.
.. note:: only for backwards compatibility. You may also use the new
standard container property :attr:`focus_position` to get the focus.
"""
return self.focus_position
def set_focus(self, item):
"""
Set the item in focus
.. note:: only for backwards compatibility. You may also use the new
standard container property :attr:`focus_position` to get the focus.
:param item: widget or integer index"""
if isinstance(item, int):
return self._set_focus_position(item)
for i, (w, options) in enumerate(self.contents):
if item == w:
self.focus_position = i
return
raise ValueError("Widget not found in Columns contents: %r" % (item,))
def get_focus(self):
"""
Return the widget in focus, for backwards compatibility. You may
also use the new standard container property .focus to get the
child widget in focus.
"""
if not self.contents:
return None
return self.contents[self.focus_position][0]
focus = property(get_focus,
doc="the child widget in focus or None when Columns is empty")
def _get_focus_position(self):
"""
Return the index of the widget in focus or None if this Columns is
empty.
"""
if not self.widget_list:
raise IndexError, "No focus_position, Columns is empty"
return self.contents.focus
def _set_focus_position(self, position):
"""
Set the widget in focus.
position -- index of child widget to be made focus
"""
try:
if position < 0 or position >= len(self.contents):
raise IndexError
except (TypeError, IndexError):
raise IndexError, "No Columns child widget at position %s" % (position,)
self.contents.focus = position
focus_position = property(_get_focus_position, _set_focus_position, doc="""
index of child widget in focus. Raises IndexError if read when
Columns is empty, or when set to an invalid index.
""")
focus_col = property(_get_focus_position, _set_focus_position, doc="""
A property for reading and setting the index of the column in
focus.
.. note:: only for backwards compatibility. You may also use the new
standard container property :attr:`focus_position` to get the focus.
""")
def column_widths(self, size, focus=False):
"""
Return a list of column widths.
0 values in the list mean hide corresponding column completely
"""
maxcol = size[0]
# FIXME: get rid of this check and recalculate only when
# a 'pack' widget has been modified.
if maxcol == self._cache_maxcol and not PACK in self.column_types:
return self._cache_column_widths
widths = []
weighted = []
shared = maxcol + self.dividechars
for i, (w, (t, width, b)) in enumerate(self.contents):
if t == GIVEN:
static_w = width
elif t == PACK:
# FIXME: should be able to pack with a different
# maxcol value
static_w = w.pack((maxcol,), focus)[0]
else:
static_w = self.min_width
if shared < static_w + self.dividechars and i > self.focus_position:
break
widths.append(static_w)
shared -= static_w + self.dividechars
if t not in (GIVEN, PACK):
weighted.append((width, i))
# drop columns on the left until we fit
for i, w in enumerate(widths):
if shared >= 0:
break
shared += widths[i] + self.dividechars
widths[i] = 0
if weighted and weighted[0][1] == i:
del weighted[0]
if shared:
# divide up the remaining space between weighted cols
weighted.sort()
wtotal = sum(weight for weight, i in weighted)
grow = shared + len(weighted) * self.min_width
for weight, i in weighted:
width = int(float(grow) * weight / wtotal + 0.5)
width = max(self.min_width, width)
widths[i] = width
grow -= width
wtotal -= weight
self._cache_maxcol = maxcol
self._cache_column_widths = widths
return widths
def render(self, size, focus=False):
"""
Render columns and return canvas.
:param size: see :meth:`Widget.render` for details
:param focus: ``True`` if this widget is in focus
:type focus: bool
"""
widths = self.column_widths(size, focus)
box_maxrow = None
if len(size) == 1:
box_maxrow = 1
# two-pass mode to determine maxrow for box columns
for i, (mc, (w, (t, n, b))) in enumerate(zip(widths, self.contents)):
if b:
continue
rows = w.rows((mc,),
focus = focus and self.focus_position == i)
box_maxrow = max(box_maxrow, rows)
l = []
for i, (mc, (w, (t, n, b))) in enumerate(zip(widths, self.contents)):
# if the widget has a width of 0, hide it
if mc <= 0:
continue
if box_maxrow and b:
sub_size = (mc, box_maxrow)
else:
sub_size = (mc,) + size[1:]
canv = w.render(sub_size,
focus = focus and self.focus_position == i)
if i < len(widths) - 1:
mc += self.dividechars
l.append((canv, i, self.focus_position == i, mc))
if not l:
return SolidCanvas(" ", size[0], (size[1:]+(1,))[0])
canv = CanvasJoin(l)
if canv.cols() < size[0]:
canv.pad_trim_left_right(0, size[0] - canv.cols())
return canv
def get_cursor_coords(self, size):
"""Return the cursor coordinates from the focus widget."""
w, (t, n, b) = self.contents[self.focus_position]
if not w.selectable():
return None
if not hasattr(w, 'get_cursor_coords'):
return None
widths = self.column_widths(size)
if len(widths) <= self.focus_position:
return None
colw = widths[self.focus_position]
if len(size) == 1 and b:
coords = w.get_cursor_coords((colw, self.rows(size)))
else:
coords = w.get_cursor_coords((colw,)+size[1:])
if coords is None:
return None
x, y = coords
x += sum([self.dividechars + wc
for wc in widths[:self.focus_position] if wc > 0])
return x, y
def move_cursor_to_coords(self, size, col, row):
"""
Choose a selectable column to focus based on the coords.
see :meth:`Widget.move_cursor_coords` for details
"""
widths = self.column_widths(size)
best = None
x = 0
for i, (width, (w, options)) in enumerate(zip(widths, self.contents)):
end = x + width
if w.selectable():
if col != RIGHT and (col == LEFT or x > col) and best is None:
# no other choice
best = i, x, end, w, options
break
if col != RIGHT and x > col and col-best[2] < x-col:
# choose one on left
break
best = i, x, end, w, options
if col != RIGHT and col < end:
# choose this one
break
x = end + self.dividechars
if best is None:
return False
i, x, end, w, (t, n, b) = best
if hasattr(w, 'move_cursor_to_coords'):
if isinstance(col, int):
move_x = min(max(0, col - x), end - x - 1)
else:
move_x = col
if len(size) == 1 and b:
rval = w.move_cursor_to_coords((end - x, self.rows(size)),
move_x, row)
else:
rval = w.move_cursor_to_coords((end - x,) + size[1:],
move_x, row)
if rval is False:
return False
self.focus_position = i
self.pref_col = col
return True
def mouse_event(self, size, event, button, col, row, focus):
"""
Send event to appropriate column.
May change focus on button 1 press.
"""
widths = self.column_widths(size)
x = 0
for i, (width, (w, (t, n, b))) in enumerate(zip(widths, self.contents)):
if col < x:
return False
w = self.widget_list[i]
end = x + width
if col >= end:
x = end + self.dividechars
continue
focus = focus and self.focus_col == i
if is_mouse_press(event) and button == 1:
if w.selectable():
self.focus_position = i
if not hasattr(w, 'mouse_event'):
return False
if len(size) == 1 and b:
return w.mouse_event((end - x, self.rows(size)), event, button,
col - x, row, focus)
return w.mouse_event((end - x,) + size[1:], event, button,
col - x, row, focus)
return False
def get_pref_col(self, size):
"""Return the pref col from the column in focus."""
widths = self.column_widths(size)
w, (t, n, b) = self.contents[self.focus_position]
if len(widths) <= self.focus_position:
return 0
col = None
cwidth = widths[self.focus_position]
if hasattr(w, 'get_pref_col'):
if len(size) == 1 and b:
col = w.get_pref_col((cwidth, self.rows(size)))
else:
col = w.get_pref_col((cwidth,) + size[1:])
if isinstance(col, int):
col += self.focus_col * self.dividechars
col += sum(widths[:self.focus_position])
if col is None:
col = self.pref_col
if col is None and w.selectable():
col = cwidth // 2
col += self.focus_position * self.dividechars
col += sum(widths[:self.focus_position] )
return col
def rows(self, size, focus=False):
"""
Return the number of rows required by the columns.
This only makes sense if :attr:`widget_list` contains flow widgets.
see :meth:`Widget.rows` for details
"""
widths = self.column_widths(size, focus)
rows = 1
for i, (mc, (w, (t, n, b))) in enumerate(zip(widths, self.contents)):
if b:
continue
rows = max(rows,
w.rows((mc,), focus=focus and self.focus_position == i))
return rows
def keypress(self, size, key):
"""
Pass keypress to the focus column.
:param size: `(maxcol,)` if :attr:`widget_list` contains flow widgets or
`(maxcol, maxrow)` if it contains box widgets.
:type size: int, int
"""
if self.focus_position is None: return key
widths = self.column_widths(size)
if self.focus_position >= len(widths):
return key
i = self.focus_position
mc = widths[i]
w, (t, n, b) = self.contents[i]
if self._command_map[key] not in ('cursor up', 'cursor down',
'cursor page up', 'cursor page down'):
self.pref_col = None
if len(size) == 1 and b:
key = w.keypress((mc, self.rows(size, True)), key)
else:
key = w.keypress((mc,) + size[1:], key)
if self._command_map[key] not in ('cursor left', 'cursor right'):
return key
if self._command_map[key] == 'cursor left':
candidates = range(i-1, -1, -1) # count backwards to 0
else: # key == 'right'
candidates = range(i+1, len(self.contents))
for j in candidates:
if not self.contents[j][0].selectable():
continue
self.focus_position = j
return
return key
def selectable(self):
"""Return the selectable value of the focus column."""
w = self.focus
return w is not None and w.selectable()
def _test():
import doctest
doctest.testmod()
if __name__=='__main__':
_test()
|
bk2204/urwid
|
urwid/container.py
|
Python
|
lgpl-2.1
| 84,505
|
import os
import subprocess
import psycopg2
import momoko
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.escape
from tornado import gen
from tornado.options import define, options
from handlers import (AuthCreateUserHandler,
AuthLoginHandler, AuthLogoutHandler,
CategoryHandler, CategoryListHandler,
ProductsListHandler, ProductDetailHandler,
MessageHandler, MainHandler,
AdminHandler, UploadHandler)
from handlers import InvoicesListHandler, InvoiceDetailsHandler
define("port", default=8888, help="run on the given port", type=int)
define("psql_host", default="localhost", help="db host")
define("psql_dbname", default="marynado", help="db name")
define("psql_user", default="marynado", help="db user")
define("psql_password", default="marynado", help="db password")
define("psql_port", default="5432", help="db port")
# DSN for momoko
dsn = 'dbname={} user={} password={} ' \
'host={} port={}'.format(options.psql_dbname,
options.psql_user,
options.psql_password,
options.psql_host,
options.psql_port)
class Application(tornado.web.Application):
def __init__(self):
self.db = None
handlers = [
(r"/auth/create\/?", AuthCreateUserHandler),
(r"/auth/login\/?", AuthLoginHandler),
(r"/auth/logout\/?", AuthLogoutHandler),
(r"/api/categories\/?", CategoryListHandler),
(r"/api/categories/(\d+)\/?", CategoryHandler),
(r"/api/categories/(\d+)/products\/?", ProductsListHandler),
(r"/api/categories/(\d+)/products/(\d+)\/?", ProductDetailHandler),
(r"/api/message\/?", MessageHandler),
(r"/api/invoices\/?", InvoicesListHandler),
(r"/api/invoices/(\d+)\/?", InvoiceDetailsHandler),
(r"/admin\/?", AdminHandler),
(r"/admin/upload", UploadHandler),
(r"/.*", MainHandler),
]
settings = dict(
app_title='Marynado',
template_path=os.path.join(os.path.dirname(__file__),
"templates"),
static_path=os.path.join(os.path.dirname(__file__),
"static"),
xsrf_cookies=False, # Turn ON and add csrf tokens to forms
cookie_secret="hgqimqierg#qcieohgc*qoeihx@#qeihr",
debug=True,
login_url="/auth/login/"
)
super(Application, self).__init__(handlers, **settings)
@gen.coroutine
def maybe_create_tables(self):
try:
yield self.db.execute("SELECT COUNT(*) FROM users")
except psycopg2.ProgrammingError:
subprocess.check_call(['psql',
'--host=' + options.psql_host,
'--dbname=' + options.psql_dbname,
'--user=' + options.psql_user],
env={'PGPASSWORD': options.psql_password},
stdin=open('sql/schema.sql'))
def create_app(dsn):
application = Application()
ioloop = tornado.ioloop.IOLoop.instance()
application.db = momoko.Pool(dsn=dsn, size=1, ioloop=ioloop)
future = application.db.connect()
ioloop.add_future(future, lambda f: ioloop.stop())
ioloop.start()
future.result()
return application, ioloop
def main():
tornado.options.parse_command_line()
application, ioloop = create_app(dsn)
application.maybe_create_tables()
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
ioloop.start()
if __name__ == "__main__":
main()
|
freundallein/marynado
|
application.py
|
Python
|
apache-2.0
| 3,900
|
"""
Format the current file with black or isort.
Available in Tools/Python/Black and Tools/Python/Isort.
"""
from __future__ import annotations
import logging
import subprocess
import traceback
from functools import partial
from pathlib import Path
from tkinter import messagebox
from porcupine import menubar, tabs, textutils, utils
from porcupine.plugins import python_venv
log = logging.getLogger(__name__)
def run_tool(tool: str, code: str, path: Path | None) -> str:
python = python_venv.find_python(None if path is None else utils.find_project_root(path))
if python is None:
messagebox.showerror(
"Can't find a Python installation", f"You need to install Python to run {tool}."
)
return code
fail_str = f"Running {tool} failed"
try:
# run in subprocess just to make sure that it can't crash porcupine
# set cwd so that black/isort finds its config in pyproject.toml
#
# FIXME: file must not be named black.py or similar
result = subprocess.run(
[str(python), "-m", tool, "-"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=(Path.home() if path is None else path.parent),
input=code.encode("utf-8"),
)
return result.stdout.decode("utf-8")
except subprocess.CalledProcessError as e:
messagebox.showerror(
fail_str,
utils.tkinter_safe_string(e.stderr.decode("utf-8"), hide_unsupported_chars=True),
)
except Exception:
log.exception(f"running {tool} failed")
messagebox.showerror(fail_str, traceback.format_exc())
return code
def format_code_in_textwidget(tool: str, tab: tabs.FileTab) -> None:
before = tab.textwidget.get("1.0", "end - 1 char")
after = run_tool(tool, before, tab.path)
if before != after:
with textutils.change_batch(tab.textwidget):
tab.textwidget.replace("1.0", "end - 1 char", after)
def setup() -> None:
menubar.add_filetab_command("Tools/Python/Black", partial(format_code_in_textwidget, "black"))
menubar.add_filetab_command("Tools/Python/Isort", partial(format_code_in_textwidget, "isort"))
|
Akuli/porcupine
|
porcupine/plugins/python_tools.py
|
Python
|
mit
| 2,238
|
# This file is part of REXT
# core.Harvester.py - super class for harvester scripts
# Author: Ján Trenčanský
# License: GNU GPL v3
import cmd
import core.globals
import interface.utils
from interface.messages import print_error, print_help
class RextHarvester(cmd.Cmd):
host = ""
port = "80"
def __init__(self):
cmd.Cmd.__init__(self)
interface.utils.change_prompt(self, core.globals.active_module_path + core.globals.active_script)
self.cmdloop()
def do_back(self, e):
return True
def do_info(self, e):
print(self.__doc__)
def do_run(self, e):
pass
def do_set(self, e):
args = e.split(' ')
try:
if args[0] == "host":
if interface.utils.validate_ipv4(args[1]):
self.host = args[1]
else:
print_error("please provide valid IPv4 address")
elif args[0] == "port":
if str.isdigit(args[1]):
self.port = args[1]
else:
print_error("port value must be integer")
except IndexError:
print_error("please specify value for variable")
def complete_set(self, text, line, begidx, endidx):
modules = ["host", "port"]
module_line = line.partition(' ')[2]
igon = len(module_line) - len(text)
return [s[igon:] for s in modules if s.startswith(module_line)]
def help_set(self):
print_help("Set value of variable: \"set host 192.168.1.1\"")
def help_back(self):
print_help("Exit script")
def help_run(self, e):
print_help("Run script")
def help_info(self, e):
print_help("Show info about loaded module")
|
j91321/rext
|
core/Harvester.py
|
Python
|
gpl-3.0
| 1,762
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from REDACTED.mask_rcnn.object_detection import matcher
from REDACTED.mask_rcnn.object_detection import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and umatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0].value == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
|
mlperf/training_results_v0.7
|
Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v4-512/object_detection/argmax_matcher.py
|
Python
|
apache-2.0
| 9,028
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# videolibrary.py - jsonrpc interface for XBMC-compatible remotes
# -----------------------------------------------------------------------
# $Id$
#
# JSONRPC and XBMC eventserver to be used for XBMC-compatible
# remotes. Only tested with Yatse so far. If something is not working,
# do not blame the remote, blame this plugin.
#
# Not all API calls are implemented yet.
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2014 Dirk Meyer, et al.
#
# First Edition: Dirk Meyer <dischi@freevo.org>
# Maintainer: Dirk Meyer <dischi@freevo.org>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# ----------------------------------------------------------------------- */
# python imports
import logging
# kaa imports
import kaa
import kaa.beacon
import kaa.webmetadata
import kaa.metadata
# jsonrpc imports
import utils
# get logging object
log = logging.getLogger('freevo')
tvdb = kaa.webmetadata.tv.backends['thetvdb']
def _fill_episode_details(f, properties):
"""
Helper function to provide episode details
"""
info = {
'episodeid': f._beacon_id[1],
'label': '%s %sx%02d - %s' % (f.get('series'), f.get('season'), f.get('episode'), f.get('title'))
}
for prop in properties:
if prop == 'season':
info[prop] = f.get('season')
elif prop == 'episode':
info[prop] = f.get('episode')
elif prop == 'showtitle':
info[prop] = f.get('series')
elif prop == 'title':
info[prop] = f.get('title')
elif prop == 'thumbnail':
info[prop] = utils.register_image(f.thumbnail, f)
elif prop in ('rating',):
info[prop] = 0.0
elif prop == 'file':
info[prop] = f.url
elif prop == 'plot':
info[prop] = f.get('description')
elif prop == 'playcount':
info[prop] = 0
elif prop == 'resume':
info[prop] = False
elif prop in ('firstaired', 'dateadded', 'originaltitle'):
info[prop] = ''
elif prop in ('cast', ):
info[prop] = []
elif prop == 'streamdetails':
# Filled correctly later for on episode details
info[prop] = {
'audio': [],
'subtitle': [],
'video': [] }
elif prop == 'tvshowid':
info[prop] = -1
else:
log.error('no support for %s' % prop)
info[prop] = ''
return info
@kaa.coroutine()
def GetTVShows(properties, limits):
"""
JsonRPC Callback VideoLibrary.GetTVShows
"""
tvshows = []
for name in sorted((yield kaa.beacon.query(attr='series', type='video'))):
series = kaa.webmetadata.tv.series(name)
if not series:
continue
info = {}
for prop in properties:
if prop == 'art':
info[prop] = {}
if series.banner:
info[prop]['banner'] = utils.register_image(series.banner)
if series.image:
info[prop]['fanart'] = utils.register_image(series.image)
if series.poster:
info[prop]['poster'] = utils.register_image(series.poster)
elif prop in ('watchedepisodes', 'playcount'):
info[prop] = 0
elif prop in ('season',):
info[prop] = -1
elif prop in ('year',):
info[prop] = series.year
elif prop in ('rating',):
info[prop] = 0.0
elif prop == 'plot':
info[prop] = series.overview
elif prop in ('genre', 'studio', 'cast', 'tag'):
info[prop] = []
elif prop in ('title', 'originaltitle', 'sorttitle'):
info[prop] = name
elif prop in ('mpaa', 'lastplayed', 'dateadded', 'imdbnumber', 'premiered', 'votes'):
info[prop] = ''
elif prop in ('episode'):
info[prop] = len((yield kaa.beacon.query(series=series.name, type='video')))
else:
log.error('no support for %s' % prop)
info[prop] = ''
info['tvshowid'] = series.id
tvshows.append(info)
start = limits['start']
end = min(limits['end'], len(tvshows))
yield {
'limits': {'start': start, 'end': end, 'total': len(tvshows)},
'tvshows': tvshows[start:end+1] }
yield None
@kaa.coroutine()
def GetSeasons(tvshowid, properties, limits=None):
"""
JsonRPC Callback VideoLibrary.GetSeasons
"""
seasons = []
result = {'seasons': [] }
for series in tvdb.series:
if series.id == tvshowid:
for season in sorted((yield kaa.beacon.query(attr='season', series=series.name, type='video'))):
season = series.seasons[season-1]
info = { 'label': 'Season %s' % season.number, 'tvshowid': tvshowid, 'seasonid': season.number }
for prop in properties:
if prop == 'season':
info[prop] = season.number
elif prop == 'tvshowid':
info[prop] = tvshowid
elif prop == 'showtitle':
info[prop] = season.series.name
elif prop in ('watchedepisodes', 'playcount'):
info[prop] = 0
elif prop == 'thumbnail':
if season.poster:
info[prop] = utils.register_image(season.poster)
elif prop in ('episode'):
info[prop] = len((yield kaa.beacon.query(series=season.series.name, season=season.number, type='video')))
else:
log.error('no support for %s' % prop)
info[prop] = ''
result['seasons'].append(info)
start = 0
end = len(result['seasons'])
if limits:
start = limits['start']
end = min(end, limits['end'])
result['limits'] = {'start': start, 'end': end, 'total': len(result['seasons'])}
result['seasons'] = result['seasons'][start:end+1]
yield result
@kaa.coroutine()
def GetEpisodes(properties, limits, tvshowid=-1, season=-1):
"""
JsonRPC Callback VideoLibrary.GetEpisodes
"""
episodes = []
for name in sorted((yield kaa.beacon.query(attr='series', type='video'))):
for f in (yield kaa.beacon.query(series=name, type='video')):
episodes.append(_fill_episode_details(f, properties))
start = limits['start']
end = min(limits['end'], len(episodes))
yield {
'limits': {'start': start, 'end': end, 'total': len(episodes)},
'episodes': episodes[start:end+1] }
@kaa.coroutine()
def GetEpisodeDetails(episodeid, properties):
"""
JsonRPC Callback VideoLibrary.GetEpisodeDetails
"""
result = (yield kaa.beacon.query(id=episodeid, type='video'))
if len(result) != 1:
log.error('bad query')
yield {}
details = _fill_episode_details(result[0], properties)
if 'streamdetails' in properties:
metadata = kaa.metadata.parse(result[0].url)
value = {
'audio': [],
'subtitle': [],
'video': [] }
if metadata:
for v in metadata.video:
value['video'].append(utils.fill_video_details(v, metadata))
for a in metadata.audio:
value['audio'].append(utils.fill_audio_details(a))
for s in metadata.subtitles:
value['subtitle'].append(utils.fill_subtitle_details(s))
details['streamdetails'] = value
yield {'episodedetails': details}
###################################################################################################
# Movie
###################################################################################################
def _fill_movie_details(f, properties):
"""
Helper function to provide episode details
"""
info = {
'movieid': f._beacon_id[1],
'label': f.get('title')
}
# it kind of sucks, but we have to ask kaa.(web)metadata again for
# each movie to get the details.
metadata = kaa.metadata.parse(f.url[7:])
webmetadata = kaa.webmetadata.parse(f.url[7:], metadata)
for prop in properties:
if prop == 'title':
info[prop] = f.get('title')
elif prop == 'thumbnail':
info[prop] = utils.register_image(f.get('poster'), f)
elif prop in ('rating',):
info[prop] = 0.0
elif prop == 'file':
info[prop] = f.url
elif prop == 'plot':
info[prop] = f.get('description')
elif prop in ('playcount', 'setid'):
info[prop] = 0
elif prop == 'resume':
info[prop] = { 'position': 0, 'total': 0 }
elif prop == 'set':
info[prop] = ''
elif prop == 'fanart':
info[prop] = utils.register_image((webmetadata and webmetadata.image) or '')
elif prop == 'imdbnumber':
info[prop] = (webmetadata and webmetadata.imdb) or ''
elif prop == 'genre':
info[prop] = (webmetadata and webmetadata.genre) or []
elif prop == 'year':
info[prop] = (webmetadata and webmetadata.year) or 0
elif prop == 'runtime':
info[prop] = f.get('length')
elif prop in ('firstaired', 'dateadded', 'originaltitle', 'sorttitle', 'trailer'):
info[prop] = ''
elif prop in ('cast', 'mpaa', 'studio', 'director', 'tag'):
info[prop] = []
elif prop == 'streamdetails':
info[prop] = {
'audio': [],
'subtitle': [],
'video': [] }
if metadata:
for v in metadata.video:
info[prop]['video'].append(utils.fill_video_details(v, metadata))
for a in metadata.audio:
info[prop]['audio'].append(utils.fill_audio_details(a))
for s in metadata.subtitles:
info[prop]['subtitle'].append(utils.fill_subtitle_details(s))
else:
log.error('no support for %s' % prop)
info[prop] = ''
return info
def GetMovieSets(properties, limits):
"""
JsonRPC Callback VideoLibrary.GetMovieSets
"""
moviesets = []
start = limits['start']
end = min(limits['end'], len(moviesets))
return {
'limits': {'start': start, 'end': end, 'total': len(moviesets)},
'moviesets': moviesets[start:end+1] }
@kaa.coroutine()
def GetMovies(properties, limits):
"""
JsonRPC Callback VideoLibrary.GetMovies
"""
movies = []
# only provide the once kaa.webmetadata detected as movie
for info in (yield kaa.beacon.query(type='video', movie=True)):
movies.append(_fill_movie_details(info, properties))
movies.sort(lambda x,y: cmp(x['movieid'], y['movieid']))
start = limits['start']
end = min(limits['end'], len(movies))
yield {
'limits': {'start': start, 'end': end, 'total': len(movies)},
'movies': movies[start:end+1] }
@kaa.coroutine()
def GetMovieDetails(movieid, properties):
"""
JsonRPC Callback VideoLibrary.GetMovieDetails
"""
info = (yield kaa.beacon.query(type='video', id=movieid))
yield {'moviedetails': _fill_movie_details(info[0], properties) }
|
pacificIT/freevo2
|
src/plugins/jsonrpc/videolibrary.py
|
Python
|
gpl-2.0
| 12,369
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
"""
Interface to Glog-style formatter.
import twitter.common.log
if not using twitter.common.app:
for __main__:
log = twitter.common.log.init('my_binary_name')
otherwise init will be called automatically on app.main()
for library/endpoint:
from twitter.common import log
log.info('info baby')
log.debug('debug baby')
log.fatal('oops fatal!')
Will log to my_binary_name.{INFO,WARNING,ERROR,...} into log_dir using the
Google logging format.
See twitter.com.log.options for customizations.
"""
from __future__ import print_function
import getpass
import logging
import os
from socket import gethostname
import sys
import time
from twitter.common.log.formatters import glog, plain
from twitter.common.log.handlers import ScribeHandler
from twitter.common.log.options import LogOptions
from twitter.common.dirutil import safe_mkdir
class GenericFilter(logging.Filter):
def __init__(self, levelfn=lambda record_level: True):
self._levelfn = levelfn
logging.Filter.__init__(self)
def filter(self, record):
return 1 if self._levelfn(record.levelno) else 0
class ProxyFormatter(logging.Formatter):
class UnknownSchemeException(Exception): pass
_SCHEME_TO_FORMATTER = {
glog.GlogFormatter.SCHEME: glog.GlogFormatter(),
plain.PlainFormatter.SCHEME: plain.PlainFormatter()
}
def __init__(self, scheme_fn):
logging.Formatter.__init__(self)
self._scheme_fn = scheme_fn
def preamble(self):
scheme = self._scheme_fn()
if scheme not in ProxyFormatter._SCHEME_TO_FORMATTER:
raise ProxyFormatter.UnknownSchemeException("Unknown logging scheme: %s" % scheme)
formatter = ProxyFormatter._SCHEME_TO_FORMATTER[scheme]
if hasattr(formatter, 'logfile_preamble') and callable(formatter.logfile_preamble):
return formatter.logfile_preamble()
def format(self, record):
scheme = self._scheme_fn()
if scheme not in ProxyFormatter._SCHEME_TO_FORMATTER:
raise ProxyFormatter.UnknownSchemeException("Unknown logging scheme: %s" % scheme)
return ProxyFormatter._SCHEME_TO_FORMATTER[scheme].format(record)
_FILTER_TYPES = {
logging.DEBUG: 'DEBUG',
logging.INFO: 'INFO',
logging.WARN: 'WARNING',
logging.ERROR: 'ERROR',
logging.FATAL: 'FATAL' # strangely python logging translates this to CRITICAL
}
def print_stderr(message):
"""Emit a message on standard error if logging to stderr is permitted."""
if LogOptions.stderr_log_level() != LogOptions.LOG_LEVEL_NONE:
print(message, file=sys.stderr)
def _safe_setup_link(link_filename, real_filename):
"""
Create a symlink from link_filename to real_filename.
"""
real_filename = os.path.relpath(real_filename, os.path.dirname(link_filename))
if os.path.exists(link_filename):
try:
os.unlink(link_filename)
except OSError:
pass
try:
os.symlink(real_filename, link_filename)
except OSError as e:
# Typically permission denied.
pass
class PreambleFileHandler(logging.FileHandler):
def __init__(self, filename, preamble=None):
self._preamble = preamble
logging.FileHandler.__init__(self, filename)
def _open(self):
stream = logging.FileHandler._open(self)
if self._preamble:
stream.write(self._preamble)
return stream
def _initialize_disk_logging():
safe_mkdir(LogOptions.log_dir())
def _setup_aggregated_disk_logging(filebase):
filename = os.path.join(LogOptions.log_dir(), filebase + '.log')
formatter = ProxyFormatter(LogOptions.disk_log_scheme)
file_handler = PreambleFileHandler(filename, formatter.preamble())
file_handler.setFormatter(formatter)
file_handler.addFilter(GenericFilter(lambda level: level >= LogOptions.disk_log_level()))
return [file_handler]
def _setup_disk_logging(filebase):
handlers = []
def gen_filter(level):
return GenericFilter(
lambda record_level: record_level == level and level >= LogOptions.disk_log_level())
def gen_link_filename(filebase, level):
return '%(filebase)s.%(level)s' % {
'filebase': filebase,
'level': level,
}
hostname = gethostname()
username = getpass.getuser()
pid = os.getpid()
datestring = time.strftime('%Y%m%d-%H%M%S', time.localtime())
def gen_verbose_filename(filebase, level):
return '%(filebase)s.%(hostname)s.%(user)s.log.%(level)s.%(date)s.%(pid)s' % {
'filebase': filebase,
'hostname': hostname,
'user': username,
'level': level,
'date': datestring,
'pid': pid
}
logroot = LogOptions.log_dir()
for filter_type, filter_name in _FILTER_TYPES.items():
formatter = ProxyFormatter(LogOptions.disk_log_scheme)
filter = gen_filter(filter_type)
full_filebase = os.path.join(logroot, filebase)
logfile_link = gen_link_filename(full_filebase, filter_name)
logfile_full = gen_verbose_filename(full_filebase, filter_name)
file_handler = PreambleFileHandler(logfile_full, formatter.preamble())
file_handler.setFormatter(formatter)
file_handler.addFilter(filter)
handlers.append(file_handler)
_safe_setup_link(logfile_link, logfile_full)
return handlers
def _setup_scribe_logging():
filter = GenericFilter(lambda r_l: r_l >= LogOptions.scribe_log_level())
formatter = ProxyFormatter(LogOptions.scribe_log_scheme)
scribe_handler = ScribeHandler(buffer=LogOptions.scribe_buffer(),
category=LogOptions.scribe_category(),
host=LogOptions.scribe_host(),
port=LogOptions.scribe_port())
scribe_handler.setFormatter(formatter)
scribe_handler.addFilter(filter)
return [scribe_handler]
def _setup_stderr_logging():
filter = GenericFilter(lambda r_l: r_l >= LogOptions.stderr_log_level())
formatter = ProxyFormatter(LogOptions.stderr_log_scheme)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(formatter)
stderr_handler.addFilter(filter)
return [stderr_handler]
def teardown_disk_logging():
root_logger = logging.getLogger()
global _DISK_LOGGERS
for handler in _DISK_LOGGERS:
root_logger.removeHandler(handler)
_DISK_LOGGERS = []
def teardown_scribe_logging():
root_logger = logging.getLogger()
global _SCRIBE_LOGGERS
for handler in _SCRIBE_LOGGERS:
root_logger.removeHandler(handler)
_SCRIBE_LOGGERS = []
def teardown_stderr_logging():
root_logger = logging.getLogger()
global _STDERR_LOGGERS
for handler in _STDERR_LOGGERS:
root_logger.removeHandler(handler)
_STDERR_LOGGERS = []
_SCRIBE_LOGGERS = []
_STDERR_LOGGERS = []
_DISK_LOGGERS = []
def init(filebase=None):
"""
Sets up default stderr logging and, if filebase is supplied, sets up disk logging using:
{--log_dir}/filebase.{INFO,WARNING,...}
If '--log_simple' is specified, logs are written into a single file:
{--log_dir}/filebase.log
"""
logging._acquireLock()
# set up permissive logger
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# clear existing handlers
teardown_scribe_logging()
teardown_stderr_logging()
teardown_disk_logging()
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
# setup INFO...FATAL handlers
if filebase:
_initialize_disk_logging()
initializer = _setup_aggregated_disk_logging if LogOptions.simple() else _setup_disk_logging
for handler in initializer(filebase):
root_logger.addHandler(handler)
_DISK_LOGGERS.append(handler)
if LogOptions._is_scribe_logging_required():
try:
for handler in _setup_scribe_logging():
root_logger.addHandler(handler)
_SCRIBE_LOGGERS.append(handler)
except ScribeHandler.ScribeHandlerException as err:
print_stderr(err)
for handler in _setup_stderr_logging():
root_logger.addHandler(handler)
_STDERR_LOGGERS.append(handler)
logging._releaseLock()
if len(_DISK_LOGGERS) > 0:
print_stderr('Writing log files to disk in %s' % LogOptions.log_dir())
if len(_SCRIBE_LOGGERS) > 0:
print_stderr('Sending log messages to scribe host=%s:%d category=%s'
% (LogOptions.scribe_host(), LogOptions.scribe_port(), LogOptions.scribe_category()))
return root_logger
|
WCCCEDU/twitter-commons
|
src/python/twitter/common/log/initialize.py
|
Python
|
apache-2.0
| 9,100
|
# -*- coding: utf-8 -*-
from ckan_sdk import (Packages,
Groups,
Tags,
Resource)
import pprint
s = Packages()
pprint.pprint(s.get())
pprint.pprint(s.help)
pprint.pprint(s.resp)
# search
pprint.pprint(s.search(q='spending'))
pprint.pprint(s.help)
s = Groups()
pprint.pprint(s.get())
s = Tags()
pprint.pprint(s.get())
s = Resource()
pprint.pprint(s.get(q='District Names'))
"""
Authorized requests
-------------------
Will look like, for requests that need them, eventually
"""
s = Packages(api_key='XXX')
pprint.pprint(s.get())
|
rosscdh/ckan-parliament-uk
|
examples.py
|
Python
|
mit
| 603
|
#! /usr/bin/python
import argparse
import logging
import os
import re
import subprocess
import sys
import xdg.BaseDirectory as xdgbase
import time
APP_NAME = 'nag'
def main():
App().Run()
class App(object):
def __init__(self):
self.key_sequence = KeySequence()
self.nag_interval = 300
self.nag_header = '\nReminders:'
self.use_git = True
os.environ.pop('GIT_DIR', None)
# for path in xdgbase.load_config_paths(APP_NAME, 'config.py'):
# with file(path) as stream:
# exec stream in self.__dict__
self.nag_home = xdgbase.save_config_path(APP_NAME)
self.nag_file_dir = xdgbase.save_config_path(APP_NAME, 'files')
self.runtime = os.path.join(xdgbase.get_runtime_dir(), APP_NAME)
self.timestamp = os.path.join(self.runtime, 'timestamp')
def Run(self):
parser = argparse.ArgumentParser(description='')
parser.add_argument(
'--debug', action='store_true',
help=argparse.SUPPRESS)
subparsers = parser.add_subparsers()
for name in ['push', 'me']:
parser_push = subparsers.add_parser(
name, description='Adds a new reminder.')
parser_push.add_argument('message', nargs=argparse.REMAINDER)
parser_push.set_defaults(func=self.DoPush)
for name in ['show', 'list']:
parser_show = subparsers.add_parser(
name, description='Show existing reminders.')
parser_show.add_argument('key', nargs='*')
parser_show.set_defaults(func=self.DoShow)
for name in ['maybe', 'ps1']:
parser_maybe = subparsers.add_parser(
name, description='Show reminders (maybe).')
parser_maybe.add_argument('key', nargs='?')
parser_maybe.set_defaults(func=self.DoMaybe)
parser_rm = subparsers.add_parser(
'rm', description='Delete a reminder.')
parser_rm.add_argument('key', nargs='+')
parser_rm.set_defaults(func=self.DoRm)
parser_log = subparsers.add_parser(
'log', description='Show a change log.')
parser_log.set_defaults(func=self.DoLog)
parser_git = subparsers.add_parser(
'git', description='Run a git command.')
parser_git.add_argument('command', nargs=argparse.REMAINDER)
parser_git.set_defaults(func=self.DoGit)
parser_help = subparsers.add_parser(
'help', description='Show help.')
parser_help.set_defaults(func=self.DoHelp)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug('debug logging enabled')
args.func(args)
def DoPush(self, args):
keys = self._SortedNagKeys()
if not keys:
next_key = self.key_sequence.First()
else:
last_key = keys[-1]
next_key = self.key_sequence.Next(keys[-1])
self._SetNag(next_key, ' '.join(args.message))
self._GitCommit('added reminder: ' + next_key)
print 'reminder saved as', next_key
def DoShow(self, args):
if args.key:
map(self._ShowNag, args.key)
else:
self._ShowAllNags()
def DoMaybe(self, args):
if os.path.isfile(self.timestamp):
now = time.time()
then = os.path.getmtime(self.timestamp)
if (now - then) < self.nag_interval:
return
keys = self._SortedNagKeys()
if keys:
print self.nag_header
for key in keys:
self._ShowNag(key)
self._UpdateTimestamp()
def DoRm(self, args):
for key in args.key:
filename = self._NagFile(key)
if os.path.isfile(filename):
os.unlink(filename)
self._GitCommit('deleted reminders: ' + ' '.join(args.key))
def DoHelp(self, args):
print """\
Add the following to your .bashrc:
PROMPT_COMMAND='nag ps1'
Create a new reminder:
nag me My free text reminder.
Clear the remidner with
nag rm X"""
def DoLog(self, args):
self._GitPopen('log').wait()
def DoGit(self, args):
p = self._GitPopen(*args.command)
p.wait()
def _GitCheckCall(self, *command):
self._Git(subprocess.check_call, command)
def _GitCheckOutput(self, *command):
self._Git(subprocess.check_output, command)
def _GitPopen(self, *command, **kwargs):
return self._Git(subprocess.Popen, command, **kwargs)
def _Git(self, func, command, **kwargs):
if not self.use_git: return None
return func(('git',) + command, cwd=self.nag_home, **kwargs)
def _GitCommit(self, message):
if not os.path.isdir(os.path.join(self.nag_home, '.git')):
self._GitCheckCall('init')
self._GitCheckCall('add', '--all')
self._GitCheckCall('commit', '-q', '-m', message)
def _SetNag(self, key, content):
filename = self._NagFile(key)
with open(filename, 'w') as stream:
stream.write(content)
def _ShowNag(self, key):
nag_file = self._NagFile(key)
if os.path.isfile(nag_file):
print '%s: %s' % (key, open(nag_file).read().rstrip())
def _ShowAllNags(self):
keys = self._SortedNagKeys()
for key in keys:
self._ShowNag(key)
self._UpdateTimestamp()
if not keys:
print 'no reminders set'
def _NagFile(self, key):
return os.path.join(self.nag_file_dir, key)
def _SortedNagKeys(self):
names = os.listdir(self.nag_file_dir)
self.key_sequence.Sort(names)
return names
def _UpdateTimestamp(self):
open(self.timestamp, 'w').close()
class KeySequence(object):
def SortKey(self, name):
assert self.IsValid(name)
if IsNumberString(name):
return (1, int(name))
else:
return (0, name)
def Sort(self, seq):
seq.sort(key=self.SortKey)
def IsValid(self, name):
return bool(re.match(r'^(?:[a-z]|[1-9]\d*)$', name))
def First(self):
return 'a'
def Next(self, name):
assert self.IsValid(name)
if name == 'z':
return '1'
elif IsNumberString(name):
return str(int(name) + 1)
else:
return chr(ord(name) + 1)
def IsNumberString(s):
return bool(re.match(r'\d+', s))
if __name__ == '__main__': main()
|
johnw42/nag
|
nag.py
|
Python
|
gpl-2.0
| 5,919
|
import sublime
import sublime_plugin
class CopyPathToClipboard(sublime_plugin.TextCommand):
def run(self, edit):
line_number, column = self.view.rowcol(self.view.sel()[0].begin())
line_number += 1
sublime.set_clipboard(self.view.file_name() + ':' + str(line_number))
|
ice3/shellrc
|
config/sublime-text-2/Packages/User/copy_path_to_clipboard.py
|
Python
|
gpl-3.0
| 297
|
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models import (
CharField, ForeignKey, ManyToManyField, PROTECT, BooleanField)
from django.urls import reverse
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.utils.translation import (
pgettext_lazy, ugettext, ugettext_lazy as _)
from tinymce.models import HTMLField
from common.utils.abbreviate import abbreviate
from common.utils.html import href, sc, hlp
from common.utils.text import str_list, str_list_w_last, ex
from .base import (
CommonModel, AutoriteModel, UniqueSlugModel, TypeDeParente,
PublishedManager, PublishedQuerySet, AncrageSpatioTemporel,
slugify_unicode, ISNI_VALIDATORS)
from .evenement import Evenement
__all__ = ('TypeDeParenteDIndividus', 'ParenteDIndividus', 'Individu')
class TypeDeParenteDIndividus(TypeDeParente):
class Meta(object):
unique_together = ('nom', 'nom_relatif')
verbose_name = _('type de parenté d’individus')
verbose_name_plural = _('types de parenté d’individus')
ordering = ('classement',)
@staticmethod
def invalidated_relations_when_saved(all_relations=False):
if all_relations:
return ('parentes',)
return ()
class ParenteDIndividus(CommonModel):
type = ForeignKey('TypeDeParenteDIndividus', related_name='parentes',
verbose_name=_('type'), on_delete=PROTECT)
parent = ForeignKey('Individu', related_name='enfances',
verbose_name=_('individu parent'), on_delete=PROTECT)
enfant = ForeignKey('Individu', related_name='parentes',
verbose_name=_('individu enfant'), on_delete=PROTECT)
class Meta(object):
verbose_name = _('parenté d’individus')
verbose_name_plural = _('parentés d’individus')
ordering = ('type', 'parent', 'enfant')
@staticmethod
def invalidated_relations_when_saved(all_relations=False):
if all_relations:
return ('parent', 'enfant')
return ()
def clean(self):
try:
parent, enfant = self.parent, self.enfant
except Individu.DoesNotExist:
return
if parent and enfant and parent == enfant:
raise ValidationError(_('Un individu ne peut avoir une '
'parenté avec lui-même.'))
def __str__(self):
return ugettext('%(parent)s, %(type)s de %(enfant)s') % {
'parent': self.parent, 'type': self.type.nom,
'enfant': self.enfant}
class IndividuQuerySet(PublishedQuerySet):
def are_feminins(self):
return all(i.is_feminin() for i in self)
class IndividuManager(PublishedManager):
def get_queryset(self):
return IndividuQuerySet(self.model, using=self._db)
def are_feminins(self):
return self.get_queryset().are_feminins()
class Individu(AutoriteModel, UniqueSlugModel):
particule_nom = CharField(
_('particule du nom d’usage'), max_length=10, blank=True,
db_index=True)
# TODO: rendre le champ nom 'blank'
nom = CharField(_('nom d’usage'), max_length=200, db_index=True)
particule_nom_naissance = CharField(
_('particule du nom de naissance'), max_length=10, blank=True,
db_index=True)
nom_naissance = CharField(
_('nom de naissance'), max_length=200, blank=True, db_index=True,
help_text=_('Ne remplir que s’il est différent du nom d’usage.'))
prenoms = CharField(_('prénoms'), max_length=50, blank=True,
db_index=True, help_text=ex('Antonio'))
prenoms_complets = CharField(
_('prénoms complets'), max_length=100, blank=True, db_index=True,
help_text=
ex('Antonio Lucio',
post=' Ne remplir que s’il existe un ou des prénoms '
'peu usités pour cet individu.'))
pseudonyme = CharField(_('pseudonyme'), max_length=200, blank=True,
db_index=True)
DESIGNATIONS = (
('S', _('Standard (nom, prénoms et pseudonyme)')),
('P', _('Pseudonyme (uniquement)')),
('L', _('Nom d’usage (uniquement)')), # L pour Last name
('B', _('Nom de naissance (standard)')), # B pour Birth name
('F', _('Prénom(s) (uniquement)')), # F pour First name
)
designation = CharField(_('affichage'), max_length=1,
choices=DESIGNATIONS, default='S')
TITRES = (
('M', _('M.')),
('J', _('Mlle')), # J pour Jouvencelle
('F', _('Mme')),
)
titre = CharField(pgettext_lazy('individu', 'titre'), max_length=1,
choices=TITRES, blank=True, db_index=True)
naissance = AncrageSpatioTemporel(has_heure=False,
verbose_name=_('naissance'))
deces = AncrageSpatioTemporel(has_heure=False,
verbose_name=_('décès'))
professions = ManyToManyField(
'Profession', related_name='individus', blank=True,
verbose_name=_('professions'))
enfants = ManyToManyField(
'self', through='ParenteDIndividus', related_name='parents',
symmetrical=False, verbose_name=_('enfants'))
biographie = HTMLField(_('biographie'), blank=True)
isni = CharField(
_('Identifiant ISNI'), max_length=16, blank=True,
validators=ISNI_VALIDATORS,
help_text=_('Exemple : « 0000000121269154 » pour Mozart.'))
sans_isni = BooleanField(_('sans ISNI'), default=False)
objects = IndividuManager()
class Meta(object):
verbose_name = _('individu')
verbose_name_plural = _('individus')
ordering = ('nom',)
permissions = (('can_change_status', _('Peut changer l’état')),)
@staticmethod
def invalidated_relations_when_saved(all_relations=False):
relations = ('auteurs', 'elements_de_distribution',)
if all_relations:
relations += ('enfants', 'dossiers',)
return relations
def get_slug(self):
parent = super(Individu, self).get_slug()
return slugify_unicode(self.nom) or parent
def get_absolute_url(self):
return reverse('individu_detail', args=(self.slug,))
def permalien(self):
return reverse('individu_permanent_detail', args=(self.pk,))
def link(self):
return self.html()
link.short_description = _('lien')
def oeuvres(self):
oeuvres = self.auteurs.oeuvres()
return oeuvres.exclude(extrait_de__in=oeuvres)
def oeuvres_with_descendants(self):
return self.auteurs.oeuvres()
def publications(self):
return self.auteurs.sources()
def apparitions(self):
# FIXME: Gérer la période d’activité des membres d’un groupe.
sql = """
SELECT DISTINCT COALESCE(distribution.evenement_id, programme.evenement_id)
FROM libretto_elementdedistribution AS distribution
LEFT JOIN libretto_elementdeprogramme AS programme
ON (programme.id = distribution.element_de_programme_id)
WHERE distribution.individu_id = %s
"""
with connection.cursor() as cursor:
cursor.execute(sql, (self.pk,))
evenement_ids = [t[0] for t in cursor.fetchall()]
return Evenement.objects.filter(id__in=evenement_ids)
def evenements_referents(self):
return Evenement.objects.filter(
programme__oeuvre__auteurs__individu=self).distinct()
def membre_de(self):
return self.membres.order_by('-debut', 'instrument', 'classement')
def calc_titre(self, tags=False):
titre = self.titre
if not titre:
return ''
if tags:
if titre == 'M':
return hlp(ugettext('M.'), 'Monsieur')
elif titre == 'J':
return hlp(ugettext('M<sup>lle</sup>'), 'Mademoiselle')
elif titre == 'F':
return hlp(ugettext('M<sup>me</sup>'), 'Madame')
if titre == 'M':
return ugettext('Monsieur')
elif titre == 'J':
return ugettext('Mademoiselle')
elif titre == 'F':
return ugettext('Madame')
raise ValueError('Type de titre inconnu, il devrait être M, J, ou F.')
def is_feminin(self):
return self.titre in ('J', 'F',)
def get_particule(self, naissance=False, lon=True):
particule = (self.particule_nom_naissance if naissance
else self.particule_nom)
if lon and particule and particule[-1] not in "'’":
return f'{particule} '
return particule
def calc_professions(self, tags=True):
if not self.pk:
return ''
return mark_safe(
str_list_w_last(
p.html(feminin=self.is_feminin(), tags=tags, caps=i == 0)
for i, p in enumerate(self.professions.all())
)
)
calc_professions.short_description = _('professions')
calc_professions.admin_order_field = 'professions__nom'
def html(self, tags=True, lon=False,
show_prenoms=True, designation=None, abbr=True, links=True):
if designation is None:
designation = self.designation
titre = self.calc_titre(tags)
prenoms = (self.prenoms_complets if lon and self.prenoms_complets
else self.prenoms)
nom = self.nom
if lon:
nom = f'{self.get_particule()}{nom}'
pseudonyme = self.pseudonyme
def standard(main, prenoms):
particule = self.get_particule(naissance=(designation == 'B'),
lon=lon)
l = []
if nom and not prenoms:
l.append(titre)
l.append(main)
if show_prenoms and (prenoms or particule and not lon):
if lon:
l.insert(max(len(l) - 1, 0), prenoms)
else:
if prenoms:
prenoms = abbreviate(prenoms, tags=tags, enabled=abbr)
if particule:
particule = sc(particule, tags)
prenom_and_particule = (f'{prenoms} {particule}'
if prenoms and particule
else (prenoms or particule))
l.append(f'({prenom_and_particule})')
out = str_list(l, ' ')
if pseudonyme:
alias = (ugettext('dite') if self.is_feminin()
else ugettext('dit'))
out += f' {alias}\u00A0{pseudonyme}'
return out
if designation in 'SL':
main = nom
elif designation == 'F':
main = prenoms
elif designation == 'P':
main = pseudonyme
elif designation == 'B':
nom_naissance = self.nom_naissance
if lon:
nom_naissance = f'{self.get_particule(True)}{nom_naissance}'
main = nom_naissance
main = sc(main, tags)
out = standard(main, prenoms) if designation in 'SB' else main
if tags:
return href(self.get_absolute_url(), out, links)
return out
html.short_description = _('rendu HTML')
def nom_seul(self, tags=False, abbr=False, links=False):
return self.html(tags=tags, lon=False, show_prenoms=False,
abbr=abbr, links=links)
def nom_complet(self, tags=True, designation='S',
abbr=False, links=True):
return self.html(tags=tags, lon=True,
designation=designation, abbr=abbr, links=links)
def related_label(self, tags=False):
return self.html(tags=tags, abbr=False)
related_label.short_description = _('individu')
def related_label_html(self):
return self.related_label(tags=True)
def clean(self):
naissance = self.naissance.date
deces = self.deces.date
if naissance and deces and deces < naissance:
message = _('Le décès ne peut précéder la naissance.')
raise ValidationError({'naissance_date': message,
'deces_date': message})
if self.isni and self.sans_isni:
message = _('« ISNI » ne peut être rempli '
'lorsque « Sans ISNI » est coché.')
raise ValidationError({'isni': message, 'sans_isni': message})
def __str__(self):
return strip_tags(self.html(tags=False))
@staticmethod
def autocomplete_search_fields():
return (
'nom__unaccent__icontains',
'nom_naissance__unaccent__icontains',
'pseudonyme__unaccent__icontains',
'prenoms__unaccent__icontains',
)
|
dezede/dezede
|
libretto/models/individu.py
|
Python
|
bsd-3-clause
| 12,969
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import account_invoice
from . import l10n_br_account
from . import l10n_br_account_service
from . import product
from . import res_company
from . import res_partner
|
thinkopensolutions/l10n-brazil
|
l10n_br_account_service/models/__init__.py
|
Python
|
agpl-3.0
| 307
|
""" Basic fields """
import pytz
import inspect
import datetime
import decimal
from pyramid.compat import NativeIO
from . import iso8601
from . import vocabulary
from .field import InputField
from .fieldset import Fieldset
from .directives import field
from .composite import CompositeField
from .interfaces import _, null, Invalid, IVocabulary
def takes_one_arg(fn, name):
try:
argspec = inspect.getargspec(fn)
args = argspec[0]
if len(args) == 1 and name in args:
return True
except TypeError:
pass
return False
def voc_factory_mapper(factory):
if takes_one_arg(factory, 'request'):
def _wrapper_request(form):
return factory(getattr(form, 'request', None))
return _wrapper_request
elif takes_one_arg(factory, 'context'):
def _wrapper_context(form):
return factory(getattr(form, 'context', None))
return _wrapper_context
elif takes_one_arg(factory, 'content'):
def _wrapper_content(form):
return factory(getattr(form, 'content', None))
return _wrapper_content
else:
def _wrapper(form):
return factory(form)
return _wrapper
class VocabularyField(InputField):
vocabulary = None
voc_factory = None
no_value_token = '--NOVALUE--'
def __init__(self, *args, **kw):
super(VocabularyField, self).__init__(*args, **kw)
if self.voc_factory is None and self.vocabulary is None:
raise ValueError("Vocabulary or vocabulary factory is required.")
if self.voc_factory is not None and self.vocabulary is not None:
raise ValueError("Vocabulary and vocabulary factory are defined.")
# convert vocabulary
voc = self.vocabulary
if (voc is not None and not IVocabulary.providedBy(voc)):
self.vocabulary = vocabulary.Vocabulary(*voc)
if self.voc_factory is not None:
self.voc_factory = voc_factory_mapper(self.voc_factory)
def bind(self, request, prefix, value, params, context=None):
clone = super(VocabularyField, self).bind(
request, prefix, value, params, context)
if clone.vocabulary is None:
clone.vocabulary = self.voc_factory(context)
return clone
def is_checked(self, term):
raise NotImplementedError()
def update_items(self):
self.items = []
for count, term in enumerate(self.vocabulary):
label = term.title if term.title is not None else term.token
self.items.append(
{'id': '%s-%i' % (self.id, count), 'name': self.name,
'value': term.token, 'label': label,
'description': term.description,
'checked': self.is_checked(term)})
class BaseChoiceField(VocabularyField):
""" base choice field """
error_msg = _('"${val}" is not in vocabulary')
def to_form(self, value):
try:
return self.vocabulary.get_term(value).token
except LookupError:
raise Invalid(self.error_msg, self, {'val': value})
def to_field(self, value):
if not value:
return null
try:
return self.vocabulary.get_term_bytoken(value).value
except LookupError:
raise Invalid(self.error_msg, self, {'val': value})
def is_checked(self, term):
return 'checked' if term.token == self.form_value else None
def update(self):
super(BaseChoiceField, self).update()
self.update_items()
def extract(self):
value = super(BaseChoiceField, self).extract()
if not value or value == self.no_value_token:
return null
return value
class BaseMultiChoiceField(VocabularyField):
""" multi choice field """
missing = []
error_msg = _('"${val}" is not in vocabulary')
def to_form(self, value):
val = value
try:
res = []
for val in value:
res.append(self.vocabulary.get_term(val).token)
return res
except:
raise Invalid(self.error_msg, self, {'val': val})
def to_field(self, value):
if not value:
return null
val = value
try:
res = []
for val in value:
res.append(self.vocabulary.get_term_bytoken(val).value)
return res
except:
raise Invalid(self.error_msg, self, {'val': val})
def extract(self):
if self.name not in self.params:
return null
value = []
tokens = self.params.getall(self.name)
for token in tokens:
if token == self.no_value_token:
continue
value.append(token)
return value
def is_checked(self, term):
return 'checked' if term.token in self.form_value else None
def update(self):
super(BaseMultiChoiceField, self).update()
if self.form_value in (null, None):
self.form_value = []
self.update_items()
@field('text')
class TextField(InputField):
"""HTML Text input widget. Field name is ``text``."""
klass = 'form-control text-widget'
value = ''
missing = ''
class Number(object):
error_msg = _('"${val}" is not a number')
def to_form(self, value):
try:
return str(self.typ(value))
except Exception:
raise Invalid(self.error_msg, self)
def to_field(self, value):
if not value:
return null
try:
return self.typ(value)
except Exception:
raise Invalid(self.error_msg, self, mapping={'val': value})
@field('int')
class IntegerField(Number, TextField):
"""Integer input widget. Field name is ``int``."""
typ = int
value = 0
klass = 'form-control int-widget'
@field('float')
class FloatField(Number, TextField):
"""Float input widget. Field name is ``float``."""
typ = float
klass = 'form-control float-widget'
@field('decimal')
class DecimalField(Number, TextField):
"""Decimal input widget. Field name is ``decimal``."""
typ = decimal.Decimal
klass = 'form-control decimal-widget'
@field('textarea')
class TextAreaField(TextField):
"""HTML Text Area input widget. Field name is ``textarea``."""
klass = 'form-control textarea-widget'
html_attrs = TextField.html_attrs + ('rows', 'cols')
rows = 5
cols = 40
value = ''
default = ''
tmpl_input = 'form:textarea'
@field('file')
class FileField(InputField):
"""HTML File input widget. Field name is ``file``."""
klass = 'input-file'
html_type = 'file'
max_size = 0
allowed_types = ()
error_max_size = "Maximum file size exceeded."
error_unknown_type = "Unknown file type."
tmpl_input = 'form:input-file'
def validate(self, value):
if value is null and self.form_value:
value = self.form_value
super(FileField, self).validate(value)
if value is null:
return
if self.max_size:
value['fp'].seek(0, 2)
size = value['fp'].tell()
value['fp'].seek(0)
if size > self.max_size:
raise Invalid(self.error_max_size, self)
if self.allowed_types and value['mimetype'] not in self.allowed_types:
raise Invalid(self.error_unknown_type, self)
def extract(self):
value = self.params.get(self.name, null)
if hasattr(value, 'file'):
value.file.seek(0)
return {
'fp': value.file,
'filename': value.filename,
'mimetype': value.type,
'size': value.length}
elif value:
fp = NativeIO(value)
fp.filename = self.params.get('%s-filename'%self.name, '')
return {
'fp': fp,
'filename': self.params.get('%s-filename'%self.name, ''),
'mimetype': self.params.get('%s-mimetype'%self.name, ''),
'size': len(value)}
return null
@field('lines')
class LinesField(TextAreaField):
"""Text area based widget, each line is treated as sequence element.
Field name is ``lines``."""
klass = 'form-control textlines-widget'
missing = []
error_msg = _('"${val}" is not a list')
def to_form(self, value):
try:
return '\n'.join(value)
except Exception:
raise Invalid(self.error_msg, self, {'val': value})
def to_field(self, value):
if not value:
return null
try:
return list(filter(None, [s.strip() for s in value.split('\n')]))
except Exception:
raise Invalid(self.error_msg, self, {'val': value})
@field('password')
class PasswordField(TextField):
"""HTML Password input widget. Field name is ``password``."""
klass = 'form-control password-widget'
html_type = 'password'
@field('multichoice')
class MultiChoiceField(BaseMultiChoiceField):
"""HTML Checkboxs input based widget. Field name is ``multichoice``."""
klass = 'multichoice-widget'
html_type = 'checkbox'
tmpl_input = 'form:multichoice'
class DateField(TextField):
"""Simple date input field."""
missing = None
error_msg = _('"${val}" is not a date object')
error_invalid_date = _('Invalid date')
def to_form(self, value):
if value is null:
return null
if isinstance(value, datetime.datetime):
value = value.date()
if not isinstance(value, datetime.date):
raise Invalid(self.error_msg, self, {'val': value})
return value.isoformat()
def to_field(self, value):
if not value:
return null
try:
result = iso8601.parse_date(value)
result = result.date()
except (iso8601.ParseError, TypeError):
try:
year, month, day = map(int, value.split('-', 2))
result = datetime.date(year, month, day)
except Exception:
raise Invalid(self.error_invalid_date, self)
return result
class DateTimeField(TextField):
default_tzinfo = iso8601.Utc()
missing = None
error_msg = _('"${val}" is not a datetime object')
error_invalid_date = _('Invalid date')
def to_form(self, value):
if value is null or value is None or not value:
return null
if type(value) is datetime.date: # cannot use isinstance; dt subs date
value = datetime.datetime.combine(value, datetime.time())
if not isinstance(value, datetime.datetime):
raise Invalid(self.error_msg, self, {'val': value})
if value.tzinfo is None:
value = value.replace(tzinfo=self.default_tzinfo)
return value.isoformat()
def to_field(self, value):
if not value:
return null
try:
result = iso8601.parse_date(
value, default_timezone=self.default_tzinfo)
except (iso8601.ParseError, TypeError):
try:
year, month, day = map(int, value.split('-', 2))
result = datetime.datetime(year, month, day,
tzinfo=self.default_tzinfo)
except Exception:
raise Invalid(self.error_invalid_date, self)
return result
@field('radio')
class RadioField(BaseChoiceField):
"""HTML Radio input widget. Field name is ``radio``."""
klass = 'radio-widget'
inline = False
html_type = 'radio'
html_attrs = BaseChoiceField.html_attrs + ('checked',)
tmpl_input = 'form:radio'
@field('bool')
class BoolField(RadioField):
"""Boolean input widget. Field name is ``bool``."""
vocabulary = vocabulary.Vocabulary(
(True, 'true', 'yes'),
(False, 'false', 'no'))
inline = True
@field('choice')
class ChoiceField(BaseChoiceField):
"""HTML Select input widget. Field name is ``choice``."""
size = 1
klass = 'form-control select-widget'
multiple = None
prompt_message = _('select a value ...')
tmpl_input = 'form:select'
def update_items(self):
super(ChoiceField, self).update_items()
if not self.required:
self.items.insert(0, {
'id': self.id + '-novalue',
'name': self.name,
'value': self.no_value_token,
'label': self.prompt_message,
'checked': 'checked' if self.form_value is null else None,
'description': '',
})
@field('multiselect')
class MultiSelectField(BaseMultiChoiceField):
"""HTML Multi Select input widget. Field name is ``multiselect``.
Extra params:
:param size: Size of multiselect field, default is ``5``
"""
size = 5
klass = 'form-control select-widget'
multiple = 'multiple'
tmpl_input = 'form:select'
@field('timezone')
class TimezoneField(ChoiceField):
""" Timezone field. Field name is ``timezone``."""
error_msg = _('Invalid timezone "${val}"')
_tzs = dict((str(tz).lower(), str(tz)) for tz in pytz.all_timezones)
vocabulary = vocabulary.Vocabulary(
*[(str(tz).lower(), str(tz).lower(), str(tz))
for tz in pytz.all_timezones])
def to_form(self, value):
if value is null:
return null
return str(value).lower()
def to_field(self, value):
if value is null or not value:
return null
try:
v = str(value).lower()
if v.startswith('gmt'):
v = 'etc/%s' % v
try:
return pytz.timezone(v)
except:
return pytz.timezone(self._tzs[v])
except:
raise Invalid(self.error_msg, self, {'val': value})
class OptionsField(CompositeField):
""" Options field
``key``: Name of group key name
``defaults``: Build defaults for unselected groups
``extract_all``: Extract values for all groups
"""
key = ''
defaults = False
extract_all = False
tmpl_input = 'form:options'
def __init__(self, *args, **kw):
super(OptionsField, self).__init__(*args, **kw)
voc = vocabulary.Vocabulary(
*[vocabulary.Term(fname, fname, field.title)
for fname, field in self.fields.items()])
if not self.key:
self.key = self.name
self.fields = Fieldset(
RadioField(
self.key,
missing = voc[0].value,
default = voc[0].value,
required = False,
vocabulary = voc)) + self.fields
def to_field(self, value):
value = super(OptionsField, self).to_field(value)
if self.defaults:
for name, f in self.fields.items():
if name not in value:
value[name] = (f.default
if f.default is not null else f.missing)
return value
def validate(self, value):
key = value.get(self.key)
if key not in self.fields:
key = self.fields[self.key].default
super(OptionsField, self).validate(
{key: value.get(key, self.fields[key].missing)})
def extract(self):
value = super(OptionsField, self).extract()
if not self.extract_all:
opotion = value[self.key]
if opotion in value:
return {self.key: opotion, opotion: value[opotion]}
else:
return {}
return value
|
djedproject/djed.form
|
djed/form/fields.py
|
Python
|
isc
| 15,778
|
#!/usr/bin/python3
import os, os.path
import sys
sys.path=[os.path.dirname(__file__)]+sys.path
from subprocess import call, Popen, PIPE, STDOUT
from tm import imgWidthFilter, labelFilter, svgFilter
from MarkdownPP import MarkdownPP
from MarkdownPP.Modules import modules
# just for translations
from PyQt5 import QtCore, QtWidgets
_translate = QtCore.QCoreApplication.translate
def includeMd(path, outFile):
"""
Includes files from a subdirectory, eventually taking in account
groups and some sorting. The early version makes no grouping.
@param path the subdirectory where .md files are searched
@param outFile the .mdpp file to write into
"""
for root, dirs, files in os.walk(path):
for f in files:
if f.endswith(".md"):
outFile.write("!INCLUDE \"%s\", 1\n" %os.path.join(root,f))
def makeMdpp(outdir="."):
"""
make the file manual.mdpp to use with the preprocessor
which will include multiple .md files to feed later pandoc.
@param outdir output directory
"""
with open(os.path.join(outdir,"manual.mdpp"),"w") as mdppFile :
mdppFile.write("""\
# {toc}
!TOC
# {intro}
!INCLUDE "index.md", 1
# {apps}
""".format(
toc=_translate("textmanual","Table of contents"),
intro=_translate("textmanual","Introduction"),
apps=_translate("textmanual","Applications"),
))
includeMd("_apps", mdppFile)
def makeTex(outdir="."):
"""
use pandoc to create TEX file from MD file, and fixes some
drawbacks of pandoc
@param outdir output directory
"""
mdFile=os.path.join(outdir,"manual.md")
texFile=os.path.join(outdir,"manual.tex")
call("pandoc --template=pandoc.latex -t latex -o %s %s" %(texFile, mdFile),
shell=True)
# fix width syntax for images, fix hyperlink discrepancies, fix SVG includes
data=open(texFile,"r").read()
for m in (imgWidthFilter, labelFilter, svgFilter):
data=m.run(data)
open(texFile,"w").write(data)
def PDFnotYetIndexed(fname):
"""
@param fname a file name, without any suffix
@return True if pdflatex did not index correctly the PDF output
"""
f=fname+".log"
return not os.path.exists(f) or\
call("grep -iq 'Rerun to get cross-references ' "+f,
shell=True)==0 or\
call("grep -iq 'Rerun to get outlines ' "+f,
shell=True)==0
def translators(langDir, lang=None):
"""
create a list of translators
@param langDir a path containing .qm translation
@param lang the preferred locale, like en_IN.UTF-8, fr_FR.UTF-8, etc.
@result a list of QtCore.QTranslator instances
"""
if lang==None:
lang=QtCore.QLocale.system().name()
result=[]
qtTranslator=QtCore.QTranslator()
qtTranslator.load("qt_" + lang,
QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath))
result.append(qtTranslator)
# path to the translation files (.qm files)
sparkTranslator=QtCore.QTranslator()
sparkTranslator.load(lang, langDir);
result.append(sparkTranslator)
return result
def firstExistingPath(l):
"""
Returns the first existing path taken from a list of
possible paths.
@param l a list of paths
@return the first path which exists in the filesystem, or None
"""
for p in l:
if os.path.exists(p):
return p
return None
def common_paths():
"""
Finds common paths
@result a dictionary of common paths
"""
path={}
curPath = os.path.dirname(os.path.realpath(__file__))
path["current"] = curPath
sharedPath = "/usr/share/expeyes17"
path["translation"] = firstExistingPath(
[os.path.join(p, "lang") for p in
(curPath, "..",)])
return path
if __name__=="__main__":
app = QtWidgets.QApplication(sys.argv)
path = common_paths()
for t in translators(path["translation"]):
print("GRRR",t)
app.installTranslator(t)
outdir="manual"
if len(sys.argv) >1:
outdir=sys.argv[1]
os.makedirs(outdir, exist_ok=True)
print("making %s/manual.mdpp to feed the Markdown Preprocessor" %outdir)
makeMdpp(outdir=outdir)
print("making %s/manual.md with markdown-pp" %outdir)
MarkdownPP(input=open("%s/manual.mdpp" %outdir,"r"),
output=open("%s/manual.md" %outdir,"w"),
modules=list(modules))
print("making %s/manual.tex with pandoc" %outdir)
makeTex(outdir=outdir)
print("making %s/manual.odt with pandoc" %outdir)
call("pandoc -o %s/manual.odt %s/manual.md" %(outdir,outdir), shell=True)
print("making %s/manual.pdf with pdfLaTeX" %outdir)
if os.path.exists("%s/manual.log" %outdir):
os.unlink("%s/manual.log" %outdir)
while PDFnotYetIndexed("%s/manual" %outdir):
call("cd %s; pdflatex -interaction=nonstopmode manual.tex" %outdir,
shell=True)
|
csparkresearch/ExpEYES17-Qt
|
SPARK17/textManual/textManual.py
|
Python
|
mit
| 4,975
|
# -*- coding: utf-8 -*-
# Copyright 2008 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''Test cases for the zim.fs module.'''
from __future__ import with_statement
import tests
import os
import time
import zim.fs
from zim.fs import *
from zim.errors import Error
def modify_file_mtime(path, func):
'''Helper function to modify a file in such a way that mtime
changed.
'''
mtime = os.stat(path).st_mtime
m = mtime
i = 0
while m == mtime:
time.sleep(1)
func(path)
m = os.stat(path).st_mtime
i += 1
assert i < 5
#~ print '>>>', m, mtime
class FilterOverWriteWarning(tests.LoggingFilter):
def __init__(self):
tests.LoggingFilter.__init__(self, 'zim.fs', 'mtime check failed')
class FilterFileMissingWarning(tests.LoggingFilter):
def __init__(self):
tests.LoggingFilter.__init__(self, 'zim.fs', 'File missing:')
class TestFS(tests.TestCase):
def testFunctions(self):
smb_urls = (
('smb://MyHost.local/share/My%20Documents', r'\\MyHost.local\share\My Documents'),
)
for url, share in smb_urls:
if os.name == 'nt':
self.assertEqual(normalize_win32_share(share), share)
self.assertEqual(normalize_win32_share(url), share)
else:
self.assertEqual(normalize_win32_share(share), url)
self.assertEqual(normalize_win32_share(url), url)
self.assertTrue(isabs('/foo/bar'))
self.assertTrue(isabs('~/foo/bar'))
self.assertFalse(isabs('./bar'))
self.assertEqual(joinpath('foo', 'bar'), os.sep.join(('foo', 'bar')))
self.assertEqual(cleanup_filename('foo&bar:dus\\foo.txt'), 'foo&bardusfoo.txt')
def testFilePath(self):
'''Test Path object'''
path = FilePath(['foo', 'bar'])
test = os.path.abspath(os.path.join('foo', 'bar'))
self.assertEqual(path.path, test)
path = FilePath('/foo/bar')
uri = 'file:///' + os.path.abspath('/foo/bar').replace('\\', '/').strip('/')
self.assertEqual(path.uri, uri)
self.assertEqual(FilePath('file:///foo/bar'), FilePath('/foo/bar'))
self.assertEqual(FilePath('file:/foo/bar'), FilePath('/foo/bar'))
self.assertEqual(FilePath('file://localhost/foo/bar'), FilePath('/foo/bar'))
self.assertEqual(FilePath('file:///C:/foo/bar'), FilePath('/C:/foo/bar'))
if os.name == 'nt':
self.assertEqual(FilePath('file:///C:/foo/bar'), FilePath(r'C:\foo\bar'))
path = FilePath('/foo//bar/baz/')
drive, p = os.path.splitdrive(path.path)
self.assertEqual(path.split(), [drive + os.sep + 'foo', 'bar', 'baz'])
dirs = []
for d in path:
dirs.append(d)
wanted = map(lambda p: Dir(os.path.abspath(drive + p)),
['/foo', '/foo/bar', '/foo/bar/baz'])
self.assertEqual(dirs, wanted)
for path1, path2, common in (
('/foo/bar/baz/', '/foo/dus', '/foo'),
('/foo/bar', '/dus/ja', '/'),
):
self.assertEqual(FilePath(path1).commonparent(FilePath(path2)), Dir(common))
if os.name == 'nt':
path1 = 'C:\foo\bar'
path2 = 'D:\foo\bar\baz'
self.assertEqual(FilePath(path1).commonparent(FilePath(path2)), None)
for path1, path2, relpath in (
('/foo/bar/baz', '/foo', 'bar/baz'),
):
self.assertEqual(FilePath(path1).relpath(FilePath(path2)), relpath)
self.assertRaises(AssertionError, FilePath('/foo/bar').relpath, FilePath('/dus/ja'))
for path1, path2, relpath in (
('/foo/bar', '/dus/ja/', '../../foo/bar'),
('/source/dir/foo/bar/dus.pdf', '/source/dir/foo', 'bar/dus.pdf'),
('/source/dir/foo/dus.pdf', '/source/dir/foo', 'dus.pdf'),
('/source/dir/dus.pdf', '/source/dir/foo', '../dus.pdf'),
):
self.assertEqual(FilePath(path1).relpath(FilePath(path2), allowupward=True), relpath)
if os.name == 'nt':
path1 = 'C:\foo\bar'
path2 = 'D:\foo\bar\baz'
self.assertEqual(FilePath(path1).relpath(FilePath(path2), allowupward=True), None)
self.assertEqual(FilePath('/foo') + 'bar', FilePath('/foo/bar'))
path = FilePath('~/foo')
self.assertNotEqual(path.path, '~/foo')
self.assertEqual(path.user_path, '~/foo')
self.assertEqual(path.serialize_zim_config(), '~/foo')
path = FilePath('/foo')
self.assertIsNotNone(path.path)
self.assertIsNone(path.user_path)
self.assertIsNotNone(path.serialize_zim_config())
# Test unicode compat
string = u'\u0421\u0430\u0439\u0442\u043e\u0432\u044b\u0439'
path = FilePath(string)
self.assertTrue(path.path.endswith(string))
#~ self.assertRaises(Error, Path, string.encode('utf-8'))
path = FilePath((string, 'foo'))
self.assertTrue(path.path.endswith(os.sep.join((string, 'foo'))))
#~ self.assertRaises(Error, Path, (string.encode('utf-8'), 'foo'))
def testFileHandle(self):
'''Test FileHandle object'''
self.on_close_called = False
tmpdir = self.create_tmp_dir('testFileHandle')
fh = FileHandle(
tmpdir + '/foo.txt', mode='w', on_close=self.on_close)
fh.write('duss')
fh.close()
self.assertTrue(self.on_close_called)
def on_close(self):
self.on_close_called = True
def testFile(self):
'''Test File object'''
tmpdir = self.create_tmp_dir('testFile')
file = File(tmpdir + '/foo/bar/baz.txt')
assert not file.exists()
file.touch()
self.assertTrue(os.path.isfile(tmpdir + '/foo/bar/baz.txt'))
File(tmpdir + '/anotherfile.txt').touch()
file.cleanup()
self.assertTrue(os.path.isfile(tmpdir + '/anotherfile.txt'))
self.assertTrue(os.path.isdir(tmpdir))
self.assertFalse(os.path.isfile(tmpdir + '/foo/bar/baz.txt'))
self.assertFalse(os.path.isdir(tmpdir + '/foo'))
file = File(tmpdir + '/bar.txt')
file.writelines(['c\n', 'd\n'])
self.assertEqual(file.readlines(), ['c\n', 'd\n'])
# with error
try:
fh = file.open('w')
fh.write('foo')
raise IOError
except IOError:
del fh
self.assertEqual(file.readlines(), ['c\n', 'd\n'])
self.assertTrue(os.path.isfile(file.encodedpath + '.zim-new~'))
# test recovery on windows
if os.name == 'nt':
new = file.encodedpath + '.zim-new~'
orig = file.encodedpath + '.zim-orig~'
bak = file.encodedpath + '.bak~'
os.remove(file.encodedpath) # don't clean up folder
open(new, 'w').write('NEW\n')
open(orig, 'w').write('ORIG\n')
self.assertTrue(file.exists())
with tests.LoggingFilter('zim.fs', 'Left over file found:'):
self.assertEqual(file.read(), 'NEW\n')
self.assertFalse(os.path.isfile(new))
self.assertFalse(os.path.isfile(orig))
self.assertTrue(os.path.isfile(file.encodedpath))
self.assertTrue(os.path.isfile(bak))
bak1 = file.encodedpath + '.bak1~'
os.remove(file.encodedpath) # don't clean up folder
open(orig, 'w').write('ORIG 1\n')
self.assertFalse(file.exists())
with tests.LoggingFilter('zim.fs', ''):
self.assertRaises(FileNotFoundError, file.read)
self.assertFalse(os.path.isfile(orig))
self.assertTrue(os.path.isfile(bak))
self.assertTrue(os.path.isfile(bak1))
# test read-only
path = tmpdir + '/read-only-file.txt'
open(path, 'w').write('test 123')
os.chmod(path, 0o444)
file = File(path)
self.assertRaises(FileWriteError, file.write, 'Overwritten!')
os.chmod(path, 0o644) # make it removable again
# with windows line-ends
file = open(tmpdir + '/newlines.txt', 'wb')
# binary mode means no automatic newline conversions
file.write('Some lines\r\nWith win32 newlines\r\n')
file = File(tmpdir + '/newlines.txt')
self.assertEqual(file.read(), 'Some lines\nWith win32 newlines\n')
# test encoding error
non_utf8_file = File('tests/data/non-utf8.txt')
self.assertRaises(FileUnicodeError, non_utf8_file.read)
# test byte order mark
file = File('tests/data/byteordermark.txt')
self.assertEqual(file.raw(), '\xef\xbb\xbffoobar\n')
self.assertEqual(file.read(), 'foobar\n')
self.assertEqual(file.readlines(), ['foobar\n'])
# test compare & copyto
file1 = File(tmpdir + '/foo.txt')
file2 = File(tmpdir + '/bar.txt')
file1.write('foo\nbar\n')
file2.write('foo\nbar\n')
self.assertTrue(file1.compare(file2))
file2.write('foo\nbar\nbaz\n')
self.assertFalse(file1.compare(file2))
file2.copyto(file1)
self.assertTrue(file1.compare(file2))
# rename is being used when testing Dir
# test mimetype
file = File('test.txt')
self.assertFalse(file.isimage())
file = File('test.jpg')
self.assertTrue(file.isimage())
file = File(tmpdir + '/foo/')
self.assertFalse(file.isdir())
dir = Dir(tmpdir + '/foo/')
dir.touch()
self.assertTrue(file.isdir())
def testTmpFile(self):
'''Test TmpFile object'''
dir = get_tmpdir()
file = TmpFile('foo.txt')
self.assertTrue(file.ischild(dir))
# What else to test here ?
def testDir(self):
'''Test Dir object'''
tmpdir = self.create_tmp_dir('testDir')
dir = Dir(tmpdir + '/foo/bar')
assert not dir.exists()
file1 = dir.file('unique.txt')
file1.touch()
file2 = dir.new_file('unique.txt')
file2.touch()
file3 = dir.new_file('unique.txt')
self.assertEqual(file1.basename, 'unique.txt')
self.assertEqual(file2.basename, 'unique001.txt')
self.assertEqual(file3.basename, 'unique002.txt')
self.assertEqual(dir.list(), ['unique.txt', 'unique001.txt'])
# we did not touch unique002.txt, so don't want to see it show up here
file1.rename(dir.file('foo.txt'))
self.assertEqual(file1.basename, 'unique.txt') # don't update the object !
self.assertEqual(dir.list(), ['foo.txt', 'unique001.txt'])
file1 = dir.file('foo.txt')
file1.rename(dir.subdir('foo').file('bar.txt'))
self.assertEqual(dir.list(), ['foo', 'unique001.txt'])
self.assertEqual(dir.subdir('foo').list(), ['bar.txt'])
fdir = FilteredDir(dir)
fdir.ignore('*.txt')
self.assertEqual(fdir.list(), ['foo'])
self.assertEqual(File((dir, 'foo.txt')), dir.file('foo.txt'))
self.assertEqual(dir.file(File((dir, 'foo.txt'))), dir.file('foo.txt'))
self.assertEqual(dir.file(FilePath((dir, 'foo.txt'))), dir.file('foo.txt'))
self.assertEqual(dir.file(('foo.txt',)), dir.file('foo.txt'))
self.assertRaises(PathLookupError, dir.file, File('/foo/bar.txt')) # not below dir
self.assertEqual(dir.resolve_file('../foo.txt'), dir.dir.file('foo.txt'))
self.assertEqual(dir.resolve_file(File('/foo/bar.txt')), File('/foo/bar.txt'))
self.assertEqual(Dir((dir, 'bar')), dir.subdir('bar'))
self.assertEqual(dir.subdir(Dir((dir, 'bar'))), dir.subdir('bar'))
self.assertEqual(dir.subdir(FilePath((dir, 'bar'))), dir.subdir('bar'))
self.assertEqual(dir.subdir(('bar',)), dir.subdir('bar'))
self.assertRaises(PathLookupError, dir.subdir, Dir('/foo/bar')) # not below dir
self.assertEqual(dir.resolve_dir('../bar'), dir.dir.subdir('bar'))
self.assertEqual(dir.resolve_dir(Dir('/foo/bar')), Dir('/foo/bar'))
self.assertRaises(OSError, dir.remove) # dir not empty
self.assertTrue(dir.exists())
dir.cleanup()
self.assertTrue(dir.exists())
dir.remove_children()
self.assertEqual(dir.list(), [])
self.assertTrue(dir.exists())
dir.remove()
self.assertFalse(dir.exists())
self.assertEqual(dir.list(), []) # list non-existing dir
# TODO skip if no gio available
# TODO slow test
#~ def testMonitor(self):
#~ tmpdir = Dir(self.create_tmp_dir('testMonitor'))
#~ # Monitor file
#~ events = []
#~ def monitor(*args):
#~ events.append(args)
#~ file = tmpdir.file('foo')
#~ file.connect('changed', monitor)
#~ file.touch()
#~ file.write('Foo')
#~ # timeout ?
#~ print '>>', events
#~ # Monitor dir
#~ tmpdir.connect('changed', monitor)
#~ tmpdir.file('bar').touch()
#~ # timeout ?
#~ print '>>', events
@tests.slowTest
class TestFileOverwrite(tests.TestCase):
def setUp(self):
self.path = self.create_tmp_dir() + '/file.txt'
def modify(self, func):
modify_file_mtime(self.path, func)
def runTest(self):
'''Test file overwrite check'''
# Check we can write without reading
file = File(self.path, checkoverwrite=True)
file.write('bar')
self.assertEquals(file.read(), 'bar')
# Check edge case where file goes missing after read or write
os.remove(file.encodedpath)
self.assertFalse(file.exists())
self.assertTrue(file.check_has_changed_on_disk())
with FilterFileMissingWarning():
file.write('bar')
self.assertEquals(file.read(), 'bar')
self.assertFalse(file.check_has_changed_on_disk())
# Check overwrite error when content changed
self.modify(lambda p: open(p, 'w').write('XXX'))
# modify mtime and content
with FilterOverWriteWarning():
self.assertRaises(FileWriteError, file.write, 'foo')
self.assertTrue(file.check_has_changed_on_disk())
self.assertEquals(file.read(), 'XXX')
# Check md5 check passes
file = File(self.path, checkoverwrite=True)
file.write('bar')
self.modify(lambda p: open(p, 'w').write('bar'))
# modify mtime but keep content the same
with FilterOverWriteWarning():
file.write('foo')
self.assertEquals(file.read(), 'foo')
@tests.slowTest
@tests.skipUnless(hasattr(os, 'symlink'), 'OS does not supprot symlinks')
class TestSymlinks(tests.TestCase):
def runTest(self):
'''Test file operations are safe for symlinks'''
# Set up a file structue with a symlink
tmpdir = self.create_tmp_dir()
targetdir = Dir(tmpdir + '/target')
targetdir.file('foo.txt').touch()
targetfile = File(tmpdir + '/target.txt')
targetfile.write('foo\n')
dir = Dir(tmpdir + '/data')
file = dir.file('bar.txt')
file.touch()
os.symlink(targetdir.encodedpath, dir.encodedpath + '/link')
os.symlink(targetfile.encodedpath, dir.encodedpath + '/link.txt')
# Test transparent access to the linked data
linkedfile = dir.file('link.txt')
self.assertTrue(linkedfile.read(), 'foo\n')
self.assertEqual(dir.list(), ['bar.txt', 'link', 'link.txt'])
linkeddir = dir.subdir('link')
self.assertEqual(linkeddir.list(), ['foo.txt'])
# Test modifying a linked file
linkedfile.write('bar\n')
self.assertTrue(linkedfile.read(), 'bar\n')
self.assertTrue(targetfile.read(), 'bar\n')
linkedfile.rename(dir.file('renamed_link.txt'))
self.assertEqual(dir.list(), ['bar.txt', 'link', 'renamed_link.txt'])
linkedfile = dir.file('renamed_link.txt')
linkedfile.write('foobar\n')
self.assertTrue(linkedfile.read(), 'foobar\n')
self.assertTrue(targetfile.read(), 'foobar\n')
# Test removing the links (but not the data)
linkedfile.remove()
self.assertFalse(linkedfile.exists())
self.assertTrue(targetfile.exists())
self.assertTrue(targetfile.read(), 'foobar\n')
dir.remove_children()
self.assertEqual(dir.list(), [])
self.assertTrue(targetdir.exists())
self.assertEqual(targetdir.list(), ['foo.txt'])
@tests.slowTest
@tests.skipUnless(zim.fs.gio, 'Trashing not supported, \'gio\' is missing')
class TestTrash(tests.TestCase):
def runTest(self):
'''Test trashing files and folders'''
root = Dir(self.create_tmp_dir())
file = root.file('test.txt')
file.touch()
self.assertTrue(file.exists())
self.assertTrue(file.trash())
self.assertFalse(file.exists())
dir = root.subdir('test')
dir.touch()
self.assertTrue(dir.exists())
self.assertTrue(dir.trash())
self.assertFalse(dir.exists())
# fails silent if file does not exist
self.assertFalse(file.trash())
self.assertFalse(dir.trash())
# How can we cause gio to give an error and test that case ??
from utils import FunctionThread
@tests.slowTest
class TestIOFunctionThread(tests.TestCase):
def runTest(self):
dir = Dir(self.create_tmp_dir())
file = dir.file('test.txt')
func = FunctionThread(file.write, ('fooo\n',))
func.start()
func.join()
self.assertTrue(func.done)
self.assertFalse(func.error)
self.assertEqual(file.read(), 'fooo\n')
|
Osndok/zim-desktop-wiki
|
tests/fs.py
|
Python
|
gpl-2.0
| 15,370
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb, sys
# for ide
if False:
from gluon import *
def clumusuario(email):
"""consulto usuario tabla clave unificada"""
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select username from auth_user where email='%s';"""%(email))
registro=cursor.fetchall()
log("usuario: "+str(registro))
dbmysql.close()
if not registro:
salida='no configurado'
elif registro[0][0]=='':
salida='no configurado'
else:
salida=str(registro[0][0])
return salida
def consulta_id(usuario):
"""consulto email tabla clave unificada"""
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select id from auth_user where username='%s';"""%(usuario))
registro=cursor.fetchall()
log("id: "+str(registro))
dbmysql.close()
if not registro:
salida='no creado'
elif registro[0][0]=='':
salida='no configurado'
else:
salida=int(registro[0][0])
return salida
def consulta_emailalt(usuario):
"""consulto email tabla clave unificada"""
try:
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select email from auth_user where username='%s';"""%(usuario))
registro=cursor.fetchall()
#log("mailalt: "+str(registro))
dbmysql.close()
except Exception as e:
return ['error',e.args]
if not registro:
salida=['error','no creado']
elif registro[0][0]=='':
salida=['error','no configurado']
else:
salida=['ok',str(registro[0][0])]
return salida
def consulta_autogestion(usuario):
"""consulto email tabla clave unificada"""
try:
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select * from auth_user where username='%s';"""%(usuario))
registro=cursor.fetchall()
#log("mailalt: "+str(registro))
dbmysql.close()
except Exception as e:
return ['error',e.args]
if not registro:
salida=['error','no creado']
elif registro[0][0]=='':
salida=['error','no configurado']
else:
#return registro
aux={
"id":registro[0][0],
"first_name":registro[0][1],
"last_name":registro[0][2],
"email":registro[0][3],
"username":registro[0][4],
"password":registro[0][5],
"registration_key":registro[0][6],
"reset_password_key":registro[0][7],
"registration_id":registro[0][8],
"is_active":registro[0][9],
"created_on":registro[0][10],
"created_by":registro[0][11],
"modified_on":registro[0][12],
"modified_by":registro[0][13]
}
salida=['ok', aux]
return salida
def todos_autogestion():
"""consulto email tabla clave unificada"""
try:
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select first_name,last_name,email,username,created_on,modified_on from auth_user;""")
registro=cursor.fetchall()
#log("mailalt: "+str(registro))
dbmysql.close()
except Exception as e:
return ['error',e.args]
#return registro
resultado={}
for i in registro:
resultado[i[3]]={
'nombre':i[0],
'apellido':i[1],
'mailalt':i[2],
'usuario':i[3],
'fcreado':i[4],
'fmodificado':i[5]
}
return ['ok',resultado]
def agrego_autogestion(username,nombre,apellido,correo,creadopor):
"""agrego usuario a autogestion"""
#consulto que el usuario exista en seguusua
log("intento agregar a clum: "+str(username)+" "+str(nombre)+" "+str(apellido)+" "+str(correo)+" "+str(creadopor))
consulta=seguusua(username)
if consulta[0]=='error':
log('no existe en seguusua')
return ['error',consulta[1]]
email=str(correo)
usuarioclum=clumusuario(email)
if usuarioclum!='no configurado':
return ['error',str(correo)+" utilizado por "+str(usuarioclum)+". No se agrega "+str(username)+" en autogestion."]
#valido que no exista en la base, si existe y no esta configurado lo borro para no duplicar registro
usuario_clum=consulta_emailalt(username)[1]
if usuario_clum=='no configurado':
elimino_autogestion(username)
creador=consulta_id(creadopor)
#solo lo creo si no existe"
if usuario_clum=='no creado':
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user_insert'),
passwd=myconf.take('datos.clum_pass_insert'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
modeloclum="(first_name,last_name,email,username,registration_id,is_active,created_on,created_by,respuesta,pregunta,tyc)"
valores="""'%s','%s','%s','%s','%s','T','%s','%s','Ninguna','Ninguna','T'"""%(nombre,apellido,correo,username,username,datetime.datetime.now(),creador)
#log("valores"+str(valores))
sqladd="""insert into auth_user %s values (%s);"""%(modeloclum,valores)
cursor.execute(sqladd)
dbmysql.commit()
registro=cursor.fetchall()
log("fetch: "+str(registro))
dbmysql.close()
log("agregado a clum: "+str(valores))
retorno="agregado ("+str(valores)+") fetch: "+str(registro)
return ['ok',retorno]
else:
return ['error','usuario ya existe en autogestion']
def elimino_autogestion(username):
log("intento borrar de clum: "+str(username))
consulta=seguusua(username)
if consulta[0]=='error':
log('no existe en seguusua')
return ['error',consulta[1]]
usuario_clum=consulta_emailalt(username)[1]
if usuario_clum[1]!='no creado':
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user_insert'),
passwd=myconf.take('datos.clum_pass_insert'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
sqldelete="""delete from auth_user where username='%s';"""%(username)
log(sqldelete)
cursor.execute(sqldelete)
dbmysql.commit()
usuario_clum=consulta_emailalt(username)[1]
if usuario_clum[1]=='no creado':
return['ok','borrado']
else:
return['error','no borrado']
def clave_unificada(usuario, id_clave, **kwargs):
# la clave se encuentra almacenada temporalmente en memoria (redis).
# testeo que todos los servicios esten disponibles, requiero que exista en kerberos y tenga rama mr, opcional en sw
# guardar clave con
# redis_server.setex("new"+session.sesiong,base64.b64encode(session.sesiong+request.vars.newpass),tiemposession+10)
# kerberos
log('cambio clave ' + str('usuario'))
if hasattr(auth.user, 'username'):
datamap = dni_datos_map(seguusua(auth.user.username)[1]['DNI'])
if datamap[0]=='ok':
usuarioadmin=datamap[1]['NOMBRE']+' '+datamap[1]['APELLIDO']+' ('+auth.user.username+')'
else:
usuarioadmin=auth.user.username
else:
usuarioadmin='admin'
fechacambio=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if ('solomr' in kwargs):
solomr = kwargs['solomr']
else:
solomr = False
existe_krb=principal_krb(usuario)
if existe_krb[0]=='error':
if existe_krb[1]=='no existe en kerberos':
#intento crearlo
resultado_krb=crear_princ_kerberos(usuario)
if resultado_krb[0]=='error':
#no pude crear salgo y devuelvo error
return resultado_krb
else:
#salgo si es otro tipo de error
return existe_krb
# asegure que el usuario existe en kerberos
# obtengo clave
try:
clave = base64.b64decode(redis_server.get(id_clave)).replace(id_clave,'')
except Exception as e:
return ['error','no pude obtener la clave '+str(e.args)]
# el cambio en kerberos es el unico que no tiene vuelta atras. Intento cambiarlo sino retorno error
rtdo_krb = cambiar_kerberos(usuario,clave)
if rtdo_krb[0]=='error':
return rtdo_krb
# ldap
ramasldap=ramas(usuario)
if ramasldap[0] == 'ok':
log('intento reparar rama mr')
# existe en ldap, intento actualizar datos contra map
consultadni=seguusua(usuario)
if consultadni[0] == 'ok':
dni=consultadni[1]['DNI']
consultamap=dni_datos_map(dni)
if consultamap[0]=='ok':
# tengo todo para actuzalizar ldap contra map
apellido=consultamap[1]['APELLIDO']
nombre=consultamap[1]['NOMBRE']
rama='mr'
reparticion=consultamap[1]['REPARTICION']
elimino_ldap(usuario,'mr')
mr_result=agrego_ldap(usuario,rama,nombre,apellido,reparticion,clave=clave)
else:
mr_result=reparo_rama_mr(usuario,clave)
if mr_result[0]=='ok':
# seguridadweb
# mover!
existe_sw=sw_datos(usuario)
if existe_sw[0]=='ok':
rtdo_sw=sw_cambio_pass(usuario, clave)
else:
rtdo_sw=['ok','no tiene sw']
if solomr==True:
# ya lo cambie cuando repare la rama
rtdo_ldap = mr_result
else:
# cambio en el resto de las ramas
rtdo_ldap=ldap_cambio_pass(usuario,clave)
avisocorreo=envio_aviso_cambio(usuario,usuarioadmin,fechacambio)
log(str(usuario)+str(' krb: '+rtdo_krb[1])+' sw: '+str(rtdo_sw[1])+' ldap: '+str(rtdo_ldap)[1]+' aviso: '+str(avisocorreo))
if rtdo_krb[0]=='ok' and rtdo_sw[0]=='ok' and rtdo_ldap[0]=='ok':
return ['ok', 'clave cambiada con exito']
else:
return ['ok', 'clave cambiada con errores: krb '+str(rtdo_krb[1])+' sw '+str(rtdo_sw[1])+' ldap '+str(rtdo_ldap[1])]
else:
log('algo fallo en reparar mr: '+str(mr_result))
return mr_result
else:
log('algo fallo en la consulta en seguusua: '+str(consultadni))
return consultadni
else:
log('algo fallo en la consulta a ldap :'+str(ramasldap))
return ramasldap
def test_clave_unificada():
#variables necesarias
session.last_login='prueba1'
session.clave_id='aaa'
request.vars.fnewpass='Qq9'+str(id_generator(size=12))
redis_server.setex(session.clave_id,base64.b64encode(session.clave_id+request.vars.fnewpass),tiemposession+100)
return ['clave: '+str(request.vars.fnewpass),clave_unificada(session.last_login,session.clave_id,solomr=True)]
def envio_aviso_cambio(usuario,usuarioadmin,fechacambio):
anio_correo = str(datetime.datetime.now().year)
#busco correo autogestion
mailalternativo=consulta_emailalt(usuario)
#reviso que tenga rama webmail
casilla=consulta_casilla(usuario)
correo=[]
if mailalternativo[0]=='ok':
correo.append(mailalternativo[1])
if casilla[0]=='ok':
correo.append(str(usuario)+'@rosario.gov.ar')
if correo==[]:
mensaje='no hay direcciones disponibles donde enviar la notificacion '+ str([usuario,usuarioadmin,fechacambio])
log(mensaje)
return ['error',mensaje]
#genero mail
mensaje_text = """Estimado Usuario %s:
Su clave de acceso fue cambiada por %s el %s
SI UD NO HA SOLICITADO DICHO CAMBIO, LE ROGAMOS INFORME ESTE INCIDENTE A MESA DE AYUDA
Para poder utilizar los servicios informáticos, ahora deberá cambiarla Ud. personalmente,
de la siguiente manera:
* Ingrese a http://www.rosario.gov.ar/unificada
* Ingresar Usuario y Contraseña y Aceptar.
* Ingresar Nueva Contraseña, repetirla para confirmación y Aceptar.
Recuerde respetar las siguientes directivas:
* Mantenga su contraseña en secreto
* Cambie su contraseña cuando sospeche que alguien más la conoce
* Seleccione una contraseña 'de calidad':
o Que sea fácil de recordar.
o Que no esté basada en algún dato que otra persona pueda adivinar u obtener fácilmente mediante información relacionada con usted, por ejemplo
nombres, números de teléfono, patente de su vehículo, fecha de nacimiento, etc.
o Que no tenga caracteres idénticos consecutivos o grupos
totalmente numéricos o totalmente alfabéticos
o Que contengan diferentes clases de caracteres. Son diferentes clases de caracteres: letras mayúsculas, letras minúsculas, números y
signos de puntuación
o Evite reutilizar o reciclar viejas contraseñas
* Notifique cualquier incidente de seguridad relacionado con sus contraseñas: pérdida, robo o indicio de pérdida de confidencialidad.
"""%(usuario, usuarioadmin, fechacambio)
anio_correo = str(datetime.datetime.now().year)
open_mensaje_html = open("applications/useradm/static/correos/correo3.html", "r")
mensaje_html = open_mensaje_html.read()
mensaje_html = mensaje_html.replace("anio_correo", anio_correo)
mensaje_html = mensaje_html.replace("mensaje_correo", mensaje_text.replace("\n", '<BR>').replace('https://www.rosario.gov.ar/unificada',"<a href='https://www.rosario.gov.ar/unificada'>Clave unificada</a>"))
#session.exito = "Correo enviado a "+str(correo)[:5]+"...@..."+str(correo)[-8:]
#mail.send(to=[correo],subject='Usuario para Recibo Digital MR',reply_to='seguridad@rosario.gov.ar',message=(mensaje_text, mensaje_html))
mensajemail="Aviso a "+str(usuario)+" al correo "+str(correo)+" en "+str(URL('index',host=True))
asunto='Aviso de Cambio de clave desde soporte'
mail.send(to=correo,subject=asunto,reply_to='auditoria-sec@rosario.gov.ar', message=(mensaje_text, mensaje_html))
mail.send(to=['mredond1@rosario.gov.ar'],subject='Cambio clave useradm',reply_to='auditoria-sec@rosario.gov.ar', message=mensajemail)
log("Correo enviado a " + str(correo) + " usuario: " + str(usuario))
return ['ok',"Correo enviado a "+str(correo)+" usuario: "+str(usuario)+ " cambiado por: "+str(usuarioadmin)]
# envio a direccion @rosario.gov.ar (con webmail) y alternativo
def test_envio_aviso_cambio():
resultado=envio_aviso_cambio('mredond1','test','1/1/2018')
esperado=['ok', "Correo enviado a ['redondomarco@gmail.com', 'mredond1@rosario.gov.ar'] usuario: mredond1 cambiado por: test"]
if resultado==esperado:
return True
def desbloquear(usuario):
"""
Intento desbloquear la clave
"""
try:
# desbloquear seguridadweb
unlocksw = sw_desbloquear(usuario)
# desbloquear en ldap
unlockldap = ldap_desbloquear(usuario)
return['ok', unlocksw[1], unlockldap[1]]
except Exception as e:
return ['error', str(e)]
|
redondomarco/useradm
|
src/models/unificada.py
|
Python
|
gpl-3.0
| 16,421
|
# ------------------------------------------------------------------------------
# Security Central
# ------------------------------------------------------------------------------
from .models import User
from pyramid.security import Allow, Everyone, Authenticated, ALL_PERMISSIONS
from pyramid.authentication import SessionAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from .utils.gauth import getSecret, verifyOneTimePassword
import logging
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------------------
def includeme(config):
"""Set up the authentication and authorization policies"""
authnPolicy = SessionAuthenticationPolicy(callback=getGroups)
authzPolicy = ACLAuthorizationPolicy()
config.set_authentication_policy(authnPolicy)
config.set_authorization_policy(authzPolicy)
config.set_root_factory(Root)
# Custom predicates
config.add_view_predicate("userNeedsVerification",
UserNeedsVerificationPredicate)
log.info("security set up")
# ------------------------------------------------------------------------------
# Authentication
# ------------------------------------------------------------------------------
def getGroups(name, request):
user = request.user
if user is None:
log.info("getGroups called for non-existant user %s" % name)
return None
if user.usesGauth and not user.gauthVerified:
log.debug("getGroups called for non-verified user %s" % name)
return None
return getGroupsForUser(user)
def getGroupsForUser(user):
groups = []
return groups
class Authentication:
TO_VERIFY, OK, FAILED, LOCKED_OUT = range(4)
def checkAuthentication(name, givenPass):
"""Check the given login and password matches an active user"""
result = Authentication.FAILED
name = name.replace(':', ';')
user = User.getByLogin(name)
if user:
if user.failed_logins < 99:
if givenPass and user.verifyPassword(givenPass):
log.info("User %s password OK" % name)
if user.usesGauth:
user.gauthVerified = False
result = Authentication.TO_VERIFY
else:
result = Authentication.OK
user.failed_logins = 0
else:
log.info("User %s authentication FAILED" % name)
user.failed_logins += 1
else:
log.warning("User %s locked out" % name)
result = Authentication.LOCKED_OUT
else:
log.info("User %s does not exist" % name)
return result, user
def checkVerification(user, givenOtp):
"""Verify the given one-time-password of users who use gauth"""
result = Authentication.FAILED
if user.usesGauth:
if user.failed_logins < 3:
secret = getSecret(user.gauth_key, user.id)
if givenOtp and verifyOneTimePassword(givenOtp, secret):
log.info("User %s verification OK" % user.login)
result = Authentication.OK
user.failed_logins = 0
else:
log.info("User %s verification FAILED" % user.login)
user.failed_logins += 1
else:
log.warning("User %s locked out" % user.login)
result = Authentication.LOCKED_OUT
else:
log.error("User %s does not use gauth!!!" % user.login)
return result
# ------------------------------------------------------------------------------
# View Predicates
# ------------------------------------------------------------------------------
class UserNeedsVerificationPredicate(object):
def __init__(self, flag, config):
self.flag = flag
def text(self):
if self.flag:
return "User does need verification"
else:
return "User does not need verification"
phash = text
def __call__(self, context, request):
user = request.user
needsVerification = user and user.usesGauth and not user.gauthVerified
return self.flag == needsVerification
# ------------------------------------------------------------------------------
# Security Domains
# ------------------------------------------------------------------------------
class Root(dict):
"""The root security domain"""
__acl__ = [(Allow, Everyone, ()),
(Allow, Authenticated, ('view', 'edit', 'play')),
(Allow, 'role:admin', ALL_PERMISSIONS) ]
def __init__(self, request):
pass
class UserSettings(object):
"""The security domain for user settings"""
def __init__(self, request):
self.request = request
@property
def __acl__(self):
# just delegate acl handling to the current user
if self.request.user:
return self.request.user.__acl__
|
linuxsoftware/dominoes
|
davezdominoes/gamecoordinator/security.py
|
Python
|
agpl-3.0
| 5,040
|
from django.http import Http404
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.contrib.contenttypes.models import ContentType
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailadmin.edit_handlers import ObjectList, extract_panel_definitions_from_model_class
from wagtail.wagtailsnippets.models import get_snippet_content_types
from wagtail.wagtailsnippets.permissions import user_can_edit_snippet_type
# == Helper functions ==
def get_snippet_type_name(content_type):
""" e.g. given the 'advert' content type, return ('Advert', 'Adverts') """
# why oh why is this so convoluted?
opts = content_type.model_class()._meta
return (
force_text(opts.verbose_name),
force_text(opts.verbose_name_plural)
)
def get_snippet_type_description(content_type):
""" return the meta description of the class associated with the given content type """
opts = content_type.model_class()._meta
try:
return force_text(opts.description)
except:
return ''
def get_content_type_from_url_params(app_name, model_name):
"""
retrieve a content type from an app_name / model_name combo.
Throw Http404 if not a valid snippet type
"""
try:
content_type = ContentType.objects.get_by_natural_key(app_name, model_name)
except ContentType.DoesNotExist:
raise Http404
if content_type not in get_snippet_content_types():
# don't allow people to hack the URL to edit content types that aren't registered as snippets
raise Http404
return content_type
SNIPPET_EDIT_HANDLERS = {}
def get_snippet_edit_handler(model):
if model not in SNIPPET_EDIT_HANDLERS:
panels = extract_panel_definitions_from_model_class(model)
edit_handler = ObjectList(panels)
SNIPPET_EDIT_HANDLERS[model] = edit_handler
return SNIPPET_EDIT_HANDLERS[model]
# == Views ==
@permission_required('wagtailadmin.access_admin')
def index(request):
snippet_types = [
(
get_snippet_type_name(content_type)[1],
get_snippet_type_description(content_type),
content_type
)
for content_type in get_snippet_content_types()
if user_can_edit_snippet_type(request.user, content_type)
]
return render(request, 'wagtailsnippets/snippets/index.html', {
'snippet_types': snippet_types,
})
@permission_required('wagtailadmin.access_admin') # further permissions are enforced within the view
def list(request, content_type_app_name, content_type_model_name):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
if not user_can_edit_snippet_type(request.user, content_type):
raise PermissionDenied
model = content_type.model_class()
snippet_type_name, snippet_type_name_plural = get_snippet_type_name(content_type)
items = model.objects.all()
return render(request, 'wagtailsnippets/snippets/type_index.html', {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'snippet_type_name_plural': snippet_type_name_plural,
'items': items,
})
@permission_required('wagtailadmin.access_admin') # further permissions are enforced within the view
def create(request, content_type_app_name, content_type_model_name):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
if not user_can_edit_snippet_type(request.user, content_type):
raise PermissionDenied
model = content_type.model_class()
snippet_type_name = get_snippet_type_name(content_type)[0]
instance = model()
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' created.").format(
snippet_type=capfirst(get_snippet_type_name(content_type)[0]),
instance=instance
)
)
return redirect('wagtailsnippets_list', content_type.app_label, content_type.model)
else:
messages.error(request, _("The snippet could not be created due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/create.html', {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'edit_handler': edit_handler,
})
@permission_required('wagtailadmin.access_admin') # further permissions are enforced within the view
def edit(request, content_type_app_name, content_type_model_name, id):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
if not user_can_edit_snippet_type(request.user, content_type):
raise PermissionDenied
model = content_type.model_class()
snippet_type_name = get_snippet_type_name(content_type)[0]
instance = get_object_or_404(model, id=id)
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' updated.").format(
snippet_type=capfirst(snippet_type_name),
instance=instance
)
)
return redirect('wagtailsnippets_list', content_type.app_label, content_type.model)
else:
messages.error(request, _("The snippet could not be saved due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/edit.html', {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'instance': instance,
'edit_handler': edit_handler
})
@permission_required('wagtailadmin.access_admin') # further permissions are enforced within the view
def delete(request, content_type_app_name, content_type_model_name, id):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
if not user_can_edit_snippet_type(request.user, content_type):
raise PermissionDenied
model = content_type.model_class()
snippet_type_name = get_snippet_type_name(content_type)[0]
instance = get_object_or_404(model, id=id)
if request.POST:
instance.delete()
messages.success(
request,
_("{snippet_type} '{instance}' deleted.").format(
snippet_type=capfirst(snippet_type_name),
instance=instance
)
)
return redirect('wagtailsnippets_list', content_type.app_label, content_type.model)
return render(request, 'wagtailsnippets/snippets/confirm_delete.html', {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'instance': instance,
})
@permission_required('wagtailadmin.access_admin')
def usage(request, content_type_app_name, content_type_model_name, id):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
model = content_type.model_class()
instance = get_object_or_404(model, id=id)
# Pagination
p = request.GET.get('p', 1)
paginator = Paginator(instance.get_usage(), 20)
try:
used_by = paginator.page(p)
except PageNotAnInteger:
used_by = paginator.page(1)
except EmptyPage:
used_by = paginator.page(paginator.num_pages)
return render(request, "wagtailsnippets/snippets/usage.html", {
'instance': instance,
'used_by': used_by
})
|
benemery/wagtail
|
wagtail/wagtailsnippets/views/snippets.py
|
Python
|
bsd-3-clause
| 8,696
|
import os
import re
import sys
import warnings
import numpy as np
import pandas as pd
from PySide import *
from PySide import QtGui
from PySide.QtCore import *
from PySide.QtCore import QUrl
from PySide.QtGui import *
from PySide.QtWebKit import QWebView
import math
# TODO: CLEAN OUTLIERS ON A PER 50m basis! Clone the dataframe and step through them. over a range! Interpolate
# TODO: MULTI WELL PAD SETUP
# TODO: All of the trig and extrap methods.
# TODO: GEOGRAPHIC COORDINATES: Take Surface and surveys, convert to Lat/Long and plot each discrete point?
# TODO: MAKE SURE SUBSEA ON TVD PLOTS IS CENTERED
# TODO: Fix TVD miniplots where only change of inc is between two survey points
# TODO: When done testing, uncomment file dialogs and confirm KB function
warnings.simplefilter(action="ignore", category=RuntimeWarning)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
# title of the window
self.setWindowTitle('Well Plotter v0.5 Alpha')
# fix window size
self.resize(800, 300)
# status bar with initial message
self.statusBar().showMessage('Ready to plot')
self.setWindowIcon(QtGui.QIcon('oilrig.png'))
# Layout
cWidget = QtGui.QWidget(self)
grid = QtGui.QGridLayout(cWidget)
grid.setSpacing(2)
quitAction = QtGui.QAction("Exit", self)
quitAction.setShortcut("Ctrl+Q")
quitAction.setStatusTip('Quit the Application')
quitAction.triggered.connect(self.close_application)
quickTVD = QtGui.QAction("Extrapolate", self)
quickTVD.setShortcut("Ctrl+E")
quickTVD.setStatusTip('Extrapolate Wellpath holding INC')
quickTVD.triggered.connect(self.quickTVD)
self.statusBar()
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&File')
toolsMenu = mainMenu.addMenu('&Tools')
aboutMenu = mainMenu.addMenu('&About')
fileMenu.addAction(quitAction)
toolsMenu.addAction(quickTVD)
# aboutMenu.addAction(self.testWindow)
Button_input1 = QtGui.QPushButton("Import Surveys")
grid.addWidget(Button_input1, 0, 0)
Button_input1.clicked.connect(self.winsurvImport)
surveysPathBox = self.survTextBox = QtGui.QLineEdit(self)
self.survTextBox.setReadOnly(True)
Button_input2 = QtGui.QPushButton("Import Gamma")
grid.addWidget(Button_input2, 1, 0)
Button_input2.clicked.connect(self.importGamma)
gammaPathBox = self.gammaTextBox = QtGui.QLineEdit(self)
self.gammaTextBox.setReadOnly(True)
Button_input3 = QtGui.QPushButton("Import ROP and Gas")
grid.addWidget(Button_input3, 2, 0)
Button_input3.clicked.connect(self.importROPandGas)
ropPathBox = self.ropTextBox = QtGui.QLineEdit(self)
self.ropTextBox.setReadOnly(True)
Button_input4 = QtGui.QPushButton("Generate WellPlot")
grid.addWidget(Button_input4, 3, 0)
Button_input4.clicked.connect(self.plotWell)
Button_input5 = QtGui.QPushButton("Plot Vert. TVD Slices")
grid.addWidget(Button_input5, 4, 0)
Button_input5.clicked.connect(self.plotInflections)
Button_input6 = QtGui.QPushButton("Plot to Google Maps")
grid.addWidget(Button_input6, 5, 0)
Button_input6.clicked.connect(self.googlePlot)
Button_input7 = QtGui.QPushButton("Write to Excel")
grid.addWidget(Button_input7, 6, 0)
Button_input7.clicked.connect(self.writeSpreadsheet)
# Button_input8 = QtGui.QPushButton("Quick Extrapolations")
# grid.addWidget(Button_input8, 7, 0)
# Button_input8.clicked.connect(self.quickTVD)
grid.addWidget(surveysPathBox, 0, 1)
grid.addWidget(gammaPathBox, 1, 1)
grid.addWidget(ropPathBox, 2, 1)
self.setCentralWidget(cWidget)
def winsurvImport(self):
surveyFileName = QtGui.QFileDialog.getOpenFileName(self, "Open Survey File (.txt)", '',
"TXT Files (*.txt *.TXT)")
surveyFileName = surveyFileName[0]
print(surveyFileName)
self.survTextBox.setText(surveyFileName)
surveyFile = open(surveyFileName, 'r')
surveySkipRows = 0
with open(surveyFileName) as myFile:
for num, line in enumerate(myFile, 1):
if 'Measured' in line:
surveySkipRows = int(num)
break
surveyText = surveyFile.read()
rkbRegex = re.compile(r'(RKB:\s)((\d)?(\d)?(\d)?(\d)?\d.\d(\d)?)')
wellnameRegex = re.compile(r'(Lease/Well:\s)(................................)')
wellnameRegexResult = wellnameRegex.search(surveyText)
rkbRegexResult = rkbRegex.search(surveyText)
self.wellname = wellnameRegexResult.group(2)
self.KB = float(rkbRegexResult.group(2))
self.surveyfilename = surveyFileName
# go back to the start of the file for our pandas import after our regex searches
surveyFile.seek(0)
# Make a pandas dataframe for Survey file
readSurveyData = pd.read_csv(surveyFile, sep='\s+', error_bad_lines=False, tupleize_cols=True,
skiprows=surveySkipRows,
header=[0, 1])
# --Close the Survey File--
surveyFile.close()
readSurveyData = readSurveyData.apply(pd.to_numeric, errors='coerce')
readSurveyData = readSurveyData.dropna()
readSurveyData = readSurveyData.reset_index(drop=True)
# print(list(readSurveyData.columns.values))
columnlist = list(readSurveyData.columns.values)
# print(len(readSurveyData.columns))
columnIndexCounter = 0
for column in columnlist:
while columnIndexCounter < len(readSurveyData.columns):
print(columnlist[columnIndexCounter])
if (columnlist[columnIndexCounter]) == ('Depth', 'Meters'):
readSurveyData.rename(columns={(r'Depth', 'Meters'): ('Measured_Depth_Metres')}, inplace=True)
if (columnlist[columnIndexCounter]) == ('Angle', 'Deg'):
readSurveyData.rename(columns={(r'Angle', 'Deg'): ('Inclination_Angle_Deg')}, inplace=True)
if (columnlist[columnIndexCounter]) == ('Direction', 'Deg'):
readSurveyData.rename(columns={(r'Direction', 'Deg'): ('Drift_Direction_Deg')}, inplace=True)
if (columnlist[columnIndexCounter]) == ('Vertical', 'Depth'):
readSurveyData.rename(columns={(r'Vertical', 'Depth'): ('True_Vertical_Depth')}, inplace=True)
if (columnlist[columnIndexCounter]) == ('TVD', 'Meters'):
readSurveyData.rename(columns={(r'TVD', 'Meters'): ('Subsea_TVD_Meters')}, inplace=True)
if (columnlist[columnIndexCounter]) == ('N-S', 'Meters'):
readSurveyData.rename(columns={(r'N-S', 'Meters'): ('N-S_Meters')}, inplace=True)
if (columnlist[columnIndexCounter]) == ('E-W', 'Meters'):
readSurveyData.rename(columns={(r'E-W', 'Meters'): ('E-W_Meters')}, inplace=True)
if (columnlist[columnIndexCounter]) == ('Section', 'Meters'):
readSurveyData.rename(columns={(r'Section', 'Meters'): ('Vertical_Section_Meters')},
inplace=True)
if (columnlist[columnIndexCounter]) == ('Distance', 'Meters'):
readSurveyData.rename(columns={(r'Distance', 'Meters'): ('Closure_Distance_Meters')},
inplace=True)
if (columnlist[columnIndexCounter]) == ('Severity', 'Deg/30m'):
readSurveyData.rename(columns={(r'Severity', 'Deg/30m'): ('Dogleg_Severity_Deg/30m')},
inplace=True)
if (columnlist[columnIndexCounter]) == ('Severity', 'Deg/30'):
readSurveyData.rename(columns={(r'Severity', 'Deg/30'): ('Dogleg_Severity_Deg/30m')},
inplace=True)
if (columnlist[columnIndexCounter]) == """('Direction', 'Deg').1""":
readSurveyData.rename(columns={("""('Direction', 'Deg').1"""): ('Closure_Direction_Degrees')},
inplace=True)
columnIndexCounter = columnIndexCounter + 1
readSurveyData[('Depth_Subsea_Meters')] = float(self.KB) - readSurveyData[('True_Vertical_Depth')]
self.surveydata = readSurveyData
# def importSurveys(self):
# global fileName2
#
# fileName2tup = QtGui.QFileDialog.getOpenFileName(self, "Open Survey File (.txt)", '',
# "TXT Files (*.txt *.TXT)")
# fileName2 = fileName2tup[0]
#
# # fileName2 = 'C:/Users/Brett/Desktop/Python Projects/Wellplotting/TD Surveys.TXT'
#
# self.survTextBox.setText(fileName2)
#
# surveyFile = open(fileName2, 'r')
#
# os.chdir((os.path.dirname(fileName2)))
#
# # Find the header line number in survey file
# lookup = 'Measured'
# with open(fileName2) as myFile:
# for num, line in enumerate(myFile, 1):
# if lookup in line:
# surveySkipRows = int(num)
# break
#
# surveyText = surveyFile.read()
# rkbRegex = re.compile(r'(RKB:\s)((\d)?(\d)?(\d)?(\d)?\d.\d(\d)?)')
# rkbRegexResult = rkbRegex.search(surveyText)
# global KB
# KB = float(rkbRegexResult.group(2))
# surveyFile.seek(0)
# # Make a pandas dataframe for Survey file
# global readSurveyData
# readSurveyData = pd.read_csv(surveyFile, sep='\s+', error_bad_lines=False, tupleize_cols=True,
# skiprows=surveySkipRows,
# header=[0, 1])
# # --Close the Survey File--
# surveyFile.close()
# readSurveyData = readSurveyData.apply(pd.to_numeric, errors='coerce')
# readSurveyData = readSurveyData.dropna()
# readSurveyData = readSurveyData.reset_index(drop=True)
#
# # def confirmSurvey(self):
# # global KB
# # choice = QtGui.QMessageBox.question(self, 'Confirm KB',
# # "Is " + str(KB) + "m the correct KB for this well?",
# # QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
# # if choice == QtGui.QMessageBox.Yes:
# # msg = QtGui.QMessageBox()
# # msg.setIcon(QtGui.QMessageBox.Information)
# #
# # msg.setText('The KB has been set to: ' + str(KB) + "m")
# # msg.setWindowTitle("KB Updated.")
# # msg.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
# #
# #
# #
# # else:
# # uKB = QtGui.QInputDialog.getDouble(self, 'Update KB', 'Please enter the correct KB for this well.')
# # KB = float(uKB[0])
# #
# # confirmSurvey(self)
#
# readSurveyData['Depth_Subsea_Meters'] = float(KB) - readSurveyData[('Vertical', 'Depth')]
#
# self.statusBar().showMessage('Surveys Imported')
def importGamma(self):
# --GAMMA FILE--
# Initialize QT dialog for gamma file
global fileName
fileNametup = QtGui.QFileDialog.getOpenFileName(self, "Open Gamma File (.las)", '',
"LAS Files (*.las *.LAS)")
fileName = fileNametup[0]
# fileName = 'C:/Users/Brett/Desktop/Python Projects/Wellplotting/TD Gamma.las'
self.gammaTextBox.setText(fileName)
# Find the header line number in gamma file
lookup = '~A'
with open(fileName) as myFile:
for num, line in enumerate(myFile, 1):
if lookup in line:
gammaSkipRows = int(num + 2)
# Make a pandas Dataframe for Gamma
gammaFile = open(fileName, 'r')
global readGammaData
readGammaData = pd.read_csv(gammaFile, sep='\s+', skiprows=gammaSkipRows, na_values=['-999.25'],
names=['MD', 'Gamma', 'TVD'])
# --Close the Gamma File.--
gammaFile.close()
self.statusBar().showMessage('Gamma Imported')
def importROPandGas(self):
global fileName3
# Initialize Qt dialog for Gas/ROP file PASON!
fileName3tup = QtGui.QFileDialog.getOpenFileName(self, "Import Pason Gas/ROP File (.txt)", '',
"Pason Gas and ROP Files (*.txt *.TXT)")
fileName3 = fileName3tup[0]
# fileName3 = 'C:/Users/Brett/Desktop/Python Projects/Wellplotting/TD Gas and ROP.txt'
gasandropFile = open(fileName3, 'r')
self.ropTextBox.setText(fileName3)
# Make a pandas dataframe for Gas/ROP file
global readGasData
readGasData = pd.read_csv(gasandropFile, usecols=range(3), skiprows=20, sep='\s+', na_values=['-999.25', '--'],
names=['MD', 'ROP', 'Gas'])
gasandropFile.close()
# --Close the Gas/ROP File--
self.statusBar().showMessage('Gas and ROP Imported')
def plotWell(self):
from bokeh.models import LinearAxis, Range1d, HoverTool, CrosshairTool
from bokeh.plotting import figure, output_file, vplot
from bokeh.io import show
# Print the KB, Standard Dev, Median
print('The KB is: ' + str(self.KB))
print('ROP Standard dev is ' + str(readGasData['ROP'].std()))
print('ROP median is ' + str(readGasData['ROP'].median()))
# TODO: Look at a better way to do this, rolling median or something...
mask = (abs(readGasData['ROP'] - readGasData['ROP'].median()) > readGasData['ROP'].median() * 3)
readGasROPCol = readGasData['ROP']
readGasROPCol[mask] = readGasROPCol.median()
ropMean = np.nanmean(readGasData['ROP'])
# print('ROP mean is ' + str(ropMean))
# Get Gamma and ROP Standard dev
gammaSTD = np.nanstd(readGammaData['Gamma'])
gasplotYRangeEnd = readGasData['Gas'].max()
# print(gasplotYRangeEnd)
# Set End of Y Range as TD Subsea _10
plotYRangeEnd = ((self.surveydata['Depth_Subsea_Meters'].iloc[-1]) + 10)
# Set Start of Y Range as TD Subsea -10
plotYRangeStart = ((self.surveydata['Depth_Subsea_Meters'].iloc[-1]) - 10)
# Find the values where the well builds > 75 degrees as the start of the X Range
plotXRangeStartCalc = (self.surveydata['Inclination_Angle_Deg'].where(self.surveydata['Inclination_Angle_Deg'] > 70))
# Grab the index of the first value after 75 degrees
plotXRangeStartIndex = plotXRangeStartCalc.first_valid_index()
# Set the plotXRangeStart variable as the first MD value after 75 degrees
plotXRangeStart = (self.surveydata['Measured_Depth_Metres'].ix[plotXRangeStartIndex])
# Set End of X Range as TD MD + 20
plotXRangeEnd = ((self.surveydata['Measured_Depth_Metres'].iloc[-1]) + 20)
plotx1 = self.surveydata['Measured_Depth_Metres']
ploty1 = self.surveydata['Depth_Subsea_Meters']
# set the second series: Gamma MD Depths on X, Gamma counts on Y
plotx2 = readGammaData['MD']
ploty2 = readGammaData['Gamma']
hover = HoverTool(tooltips=[("(MD, Value)", "(@x{1.11}, @y{1.11})"), ], mode='vline')
hover2 = HoverTool(tooltips=[("(MD, Value)", "(@x{1.11}, @y{1.11})"), ], mode='vline')
# crosshair = CrosshairTool(dimensions=(['height', 'y']))
# crosshair2 = CrosshairTool(dimensions=(['height', 'y']))
# Wellpath and gamma plot
wellPlot = figure(width=1280, height=420, x_range=(plotXRangeStart, plotXRangeEnd),
y_range=(plotYRangeStart, plotYRangeEnd), min_border=80,
tools=[hover, "pan,wheel_zoom,box_zoom,reset,previewsave"])
wellPlot.line(plotx1, ploty1, line_color="black", legend='Well Path')
wellPlot.circle(plotx1, ploty1, fill_color='red', size=5, legend='Well Path')
wellPlot.xaxis.axis_label = "Measured Depth (m)"
wellPlot.yaxis.axis_label = "Subsea (m)"
wellPlot.xaxis.axis_label_text_font_size = '12pt'
wellPlot.yaxis.axis_label_text_font_size = '12pt'
wellPlot.ygrid.minor_grid_line_color = 'grey'
wellPlot.ygrid.grid_line_color = 'grey'
wellPlot.extra_y_ranges['foo'] = Range1d(0, 250)
wellPlot.line(plotx2, ploty2, line_color="blue", y_range_name="foo", legend='Gamma')
wellPlot.add_layout(LinearAxis(y_range_name="foo"), 'right')
wellPlot.yaxis
plotx3 = readGasData['MD']
ploty3 = readGasData['Gas']
ploty4 = readGasROPCol
wellPlotGasROP = figure(width=1280, height=300, x_range=(plotXRangeStart, plotXRangeEnd),
y_range=(0, gasplotYRangeEnd),
min_border=80,
tools=[hover2, "pan,wheel_zoom,box_zoom,reset, previewsave"])
# wellPlotGasROP.toolbar_location = None
wellPlotGasROP.line(plotx3, ploty3, line_color="red", legend='Total Gas')
wellPlotGasROP.extra_y_ranges['foo'] = Range1d(0, 5)
wellPlotGasROP.line(plotx3, ploty4, line_color="blue", y_range_name="foo", legend='ROP')
wellPlotGasROP.add_layout(LinearAxis(y_range_name="foo"), 'right')
wellPlotGasROP.xaxis.axis_label = "Measured Depth (m)"
wellPlotGasROP.yaxis.axis_label = "Subsea (m)"
wellPlotGasROP.xaxis.axis_label_text_font_size = '12pt'
wellPlotGasROP.yaxis.axis_label_text_font_size = '12pt'
wellPlotGasROP.ygrid.grid_line_color = 'grey'
wellPlots = vplot(wellPlot, wellPlotGasROP)
output_file("Wellplot.html", title="Wellplot")
# self.win = QWebView()
# self.win.showMaximized()
# self.win.load(QUrl('Wellplot.html'))
# self.win.show()
#
show(wellPlots)
self.statusBar().showMessage('Well Plotted')
def googlePlot(self):
import gmplot
import webbrowser
new = 2
mymap = gmplot.GoogleMapPlotter(51.19705, -108.5576, 16)
# mymap = GoogleMapPlotter.from_geocode("Stanford University")
# mymap.grid(37.42, 37.43, 0.001, -122.15, -122.14, 0.001)
# mymap.marker(37.427, -122.145, "yellow")
# mymap.marker(37.428, -122.146, "cornflowerblue")
# mymap.marker(37.429, -122.144, "k")
# lat, lng = mymap.geocode("Stanford University")
# mymap.marker(lat, lng, "red")
# mymap.circle(37.429, -122.145, 100, "#FF0000", ew=2)
path = [(51.19705, 51.199623, 51.204336), (-108.5576, -108.554936, -108.554927)]
# scatter_path = [(51.19705, 51.199623, 51.204336), (-108.5576, -108.554936, -108.554927)]
# path2 = [[i+.01 for i in path[0]], [i+.02 for i in path[1]]]
# path3 = [(37.433302 , 37.431257 , 37.427644 , 37.430303), (-122.14488, -122.133121, -122.137799, -122.148743)]
# path4 = [(37.423074, 37.422700, 37.422410, 37.422188, 37.422274, 37.422495, 37.422962, 37.423552, 37.424387, 37.425920, 37.425937),
# (-122.150288, -122.149794, -122.148936, -122.148142, -122.146747, -122.14561, -122.144773, -122.143936, -122.142992, -122.147863, -122.145953)]
mymap.plot(path[0], path[1], "red", edge_width=2)
# mymap.plot(path2[0], path2[1], "red")
# mymap.polygon(path3[0], path3[1], edge_color="cyan", edge_width=5, face_color="blue", face_alpha=0.1)
# mymap.heatmap(path[0], path[1], threshold=10, radius=40)
# mymap.heatmap(path3[0], path3[1], threshold=10, radius=40, dissipating=False, gradient=[(30,30,30,0), (30,30,30,1), (50, 50, 50, 1)])
# mymap.scatter(path[0], path[1], c='r', marker=True)
# mymap.scatter(path[0], path[1], s=90, marker=False, alpha=0.1)
# mymap.marker(51.19705, -108.5576, color='FF0000')
# Get more points with:
# http://www.findlatitudeandlongitude.com/click-lat-lng-list/
# scatter_path = ([37.424435, 37.424417, 37.424417, 37.424554, 37.424775, 37.425099, 37.425235, 37.425082, 37.424656, 37.423957, 37.422952, 37.421759, 37.420447, 37.419135, 37.417822, 37.417209],
# [-122.142048, -122.141275, -122.140503, -122.139688, -122.138872, -122.138078, -122.137241, -122.136405, -122.135568, -122.134731, -122.133894, -122.133057, -122.13222, -122.131383, -122.130557, -122.129999])
# mymap.scatter(scatter_path[0], scatter_path[1], c='r', marker=True)
mymap.draw('mymap.html')
webbrowser.open('mymap.html', new=new)
def writeSpreadsheet(self):
writer = pd.ExcelWriter('WellSheet.xlsx', engine='xlsxwriter')
self.surveydata.to_excel(writer, 'Survey')
readGasData.to_excel(writer, 'GasROP')
readGammaData.to_excel(writer, 'Gamma')
writer.save()
def plotInflections(self):
from itertools import groupby
from operator import itemgetter
from bokeh.plotting import figure, show, output_file, gridplot
# Create a copy of the survey dataframe
inflectionFrame = self.surveydata.copy(deep=True)
print(inflectionFrame)
# Normalize the inclination to + is up - is down
inflectionFrame['Inclination_Angle_Deg'] = inflectionFrame['Inclination_Angle_Deg'] - 90
# fetch the indexes where inclination is going up
posinflectionFrame = (inflectionFrame.where(inflectionFrame['Inclination_Angle_Deg'] > 0))
posinflectionFrame = posinflectionFrame.dropna()
posindexesList = posinflectionFrame.index.tolist()
# fetch the indexes where the inclination is going down.
neginflectionFrame = (inflectionFrame.where(inflectionFrame['Inclination_Angle_Deg'] < 0))
neginflectionFrame = neginflectionFrame.dropna()
negindexesList = neginflectionFrame.index.tolist()
poschunksList = []
for k, g in groupby(enumerate(posindexesList), lambda ix: ix[0] - ix[1]):
poschunksList.append(list(map(itemgetter(1), g)))
negchunksList = []
for k, g in groupby(enumerate(negindexesList), lambda ix: ix[0] - ix[1]):
negchunksList.append(list(map(itemgetter(1), g)))
# print(posinflectionFrame)
# print(posinflectionFrame.iloc[1])
posrangeList = []
for item in poschunksList:
if inflectionFrame['Measured_Depth_Metres'].iloc[item[0]] != inflectionFrame['Measured_Depth_Metres'].iloc[
item[-1]]:
posrangeList.append(((int(inflectionFrame['Measured_Depth_Metres'].iloc[item[0]])),
(int(inflectionFrame['Measured_Depth_Metres'].iloc[item[-1]]))))
negrangeList = []
for item in negchunksList:
if inflectionFrame['Measured_Depth_Metres'].iloc[item[0]] != inflectionFrame['Measured_Depth_Metres'].iloc[
item[-1]]:
negrangeList.append(((int(inflectionFrame['Measured_Depth_Metres'].iloc[item[0]])),
(int(inflectionFrame['Measured_Depth_Metres'].iloc[item[-1]]))))
# Set end of Y Range as TD Subsea +2
plotYRangeInflecStart = ((self.surveydata[('Depth_Subsea_Meters')].iloc[-1]) + 2)
# Set Start of Y Range as TD Subsea -2
plotYRangeInflecEnd = ((self.surveydata[('Depth_Subsea_Meters')].iloc[-1]) - 2)
# Create list vars for finding and iterating through climbing TVD Gamma Slices
posPlots = []
posPlotsGamma = []
posPlotsSubSea = []
# Create list vars for finding and iterating through dropping TVD Gamma Slices
negPlots = []
negPlotsGamma = []
negPlotsSubSea = []
# Lists for combined/allin one plot
# bothPlotsGamma = []
# bothPlotsSubSea = []
# Create list vars for creating MD miniplots climbing TVD Slices
posPlotsMD = []
posPlotsMDSlicesMD = []
posPlotsMDSlicesSS = []
# Create list vars for creating MD miniplots diving TVD Slices
negPlotsMD = []
negPlotsMDSlicesMD = []
negPlotsMDSlicesSS = []
# Positive MD Slice Code
for k, v in posrangeList:
posMDSlices = self.surveydata[
(self.surveydata['Measured_Depth_Metres'] > k) & (self.surveydata['Measured_Depth_Metres'] < v)]
posMDSlices = posMDSlices.dropna()
p = {
"plotx2": posMDSlices['Measured_Depth_Metres'],
"ploty2": posMDSlices['Depth_Subsea_Meters']
}
posPlotsMD.append(p)
posPlotsMDSlicesMD.append(posMDSlices['Measured_Depth_Metres'])
posPlotsMDSlicesSS.append(posMDSlices['Depth_Subsea_Meters'])
# Negative MD Slice Code
for k, v in negrangeList:
negMDSlices = self.surveydata[
(self.surveydata['Measured_Depth_Metres'] > k) & (self.surveydata['Measured_Depth_Metres'] < v)]
negMDSlices = negMDSlices.dropna()
p = {
"plotx2": negMDSlices['Measured_Depth_Metres'],
"ploty2": negMDSlices['Depth_Subsea_Meters']
}
negPlotsMD.append(p)
negPlotsMDSlicesMD.append(negMDSlices['Measured_Depth_Metres'])
negPlotsMDSlicesSS.append(negMDSlices['Depth_Subsea_Meters'])
# Positive (Climbing) TVD Gamma Slices code
for k, v in posrangeList:
posGammaFrame = readGammaData[(readGammaData.MD > k) & (readGammaData.MD < v)]
print(posGammaFrame)
posGammaFrame = posGammaFrame.dropna()
posGammaFrame['SubSea'] = float(self.KB) - posGammaFrame['TVD']
p = {
"plotx1": posGammaFrame['Gamma'],
"ploty1": posGammaFrame['SubSea']
}
posPlots.append(p)
posPlotsGamma.append(posGammaFrame['Gamma'])
posPlotsSubSea.append(posGammaFrame['SubSea'])
# Code for combined plots
# bothPlotsGamma.append(posGammaFrame['Gamma'])
# bothPlotsSubSea.append(posGammaFrame['SubSea'])
print(posGammaFrame)
# Negative (Dropping) TVD Gamma Slices code
for k, v in negrangeList:
negGammaFrame = readGammaData[(readGammaData.MD > k) & (readGammaData.MD < v)]
negGammaFrame = negGammaFrame.dropna()
negGammaFrame['SubSea'] = float(self.KB) - negGammaFrame['TVD']
p = {
"plotx1": negGammaFrame['Gamma'],
"ploty1": negGammaFrame['SubSea']
}
negPlots.append(p)
negPlotsGamma.append(negGammaFrame['Gamma'])
negPlotsSubSea.append(negGammaFrame['SubSea'])
# Code for combined plots
# bothPlotsGamma.append(negGammaFrame['Gamma'])
# bothPlotsSubSea.append(negGammaFrame['SubSea'])
# Set End of Y Range as TD Subsea _10
plotYRangeEnd = ((self.surveydata['Depth_Subsea_Meters'].iloc[-1]) + 10)
# Set Start of Y Range as TD Subsea -10
plotYRangeStart = ((self.surveydata['Depth_Subsea_Meters'].iloc[-1]) - 10)
# Find the values where the well builds > 75 degrees as the start of the X Range
plotXRangeStartCalc = (self.surveydata['Inclination_Angle_Deg'].where(self.surveydata['Inclination_Angle_Deg'] > 70))
# Grab the index of the first value after 75 degrees
plotXRangeStartIndex = plotXRangeStartCalc.first_valid_index()
# Set the plotXRangeStart variable as the first MD value after 75 degrees
plotXRangeStart = (self.surveydata['Measured_Depth_Metres'].ix[plotXRangeStartIndex])
# Set End of X Range as TD MD + 20
plotXRangeEnd = ((self.surveydata['Measured_Depth_Metres'].iloc[-1]) + 20)
# Create x and Y axes for under TVD Gamma miniplots
plotx3 = self.surveydata['Measured_Depth_Metres']
ploty3 = self.surveydata['Depth_Subsea_Meters']
# Positive Grid Plot Code
# Create and plot positive TVD slices
postvdPlotter = []
for i, plot in enumerate(posPlots):
plotTitle = (str(posrangeList[i][0]) + 'mMD' + ' to ' + str(posrangeList[i][1]) + 'mMD')
postvdPlot = figure(width=400, height=400, x_range=(0, 150),
y_range=(plotYRangeInflecEnd, plotYRangeInflecStart),
title=plotTitle, min_border=10, tools="pan,wheel_zoom,box_zoom,reset,previewsave")
plotx1 = posPlots[i]['plotx1']
ploty1 = posPlots[i]['ploty1']
postvdPlot.xaxis.axis_label = "Gamma (CPS)"
postvdPlot.yaxis.axis_label = "Subsea (m)"
postvdPlot.title_text_font_size = '16pt'
postvdPlot.xaxis.axis_label_text_font_size = '12pt'
postvdPlot.yaxis.axis_label_text_font_size = '12pt'
postvdPlot.line(plotx1, ploty1, line_width=1, line_color="green")
postvdPlotter.append(postvdPlot)
# Create and plot Positive MD miniplot slices.
posMDPlotter = []
for i, plot in enumerate(posPlotsMD):
posMDPlot = figure(width=400, height=225, x_range=(plotXRangeStart, plotXRangeEnd),
y_range=(plotYRangeStart, plotYRangeEnd),
min_border=10, tools="pan,wheel_zoom,box_zoom,reset,previewsave")
plotx2 = posPlotsMD[i]['plotx2']
ploty2 = posPlotsMD[i]['ploty2']
posMDPlot.xaxis.axis_label = "Measured Depth (m)"
posMDPlot.yaxis.axis_label = "Subsea (m)"
posMDPlot.xaxis.axis_label_text_font_size = '10pt'
posMDPlot.yaxis.axis_label_text_font_size = '10pt'
posMDPlot.line(plotx3, ploty3, line_dash=[4, 4], line_width=1, line_color="grey")
posMDPlot.line(plotx2, ploty2, line_width=3, line_color="red")
posMDPlotter.append(posMDPlot)
plotfilename = ("TVD_Up.html")
output_file(plotfilename, title="TVD Plot Climbing")
grid = gridplot([postvdPlotter, posMDPlotter])
# self.win = QWebView()
# self.win.showMaximized()
# self.win.load(QUrl('TVD_Up.html'))
# self.win.show()
show(grid)
# Negative Grid Plot Code
# Create and plot Negative TVD slices.
negtvdPlotter = []
for i, plot in enumerate(negPlots):
plotTitle = (str(negrangeList[i][0]) + 'mMD' + ' to ' + str(negrangeList[i][1]) + 'mMD')
negtvdPlot = figure(width=400, height=400, x_range=(0, 150),
y_range=(plotYRangeInflecEnd, plotYRangeInflecStart),
title=plotTitle, min_border=10,
tools="pan,wheel_zoom,box_zoom,reset,previewsave")
plotx1 = negPlots[i]['plotx1']
ploty1 = negPlots[i]['ploty1']
negtvdPlot.xaxis.axis_label = "Gamma (CPS)"
negtvdPlot.yaxis.axis_label = "Subsea (m)"
negtvdPlot.title_text_font_size = '16pt'
negtvdPlot.xaxis.axis_label_text_font_size = '12pt'
negtvdPlot.yaxis.axis_label_text_font_size = '12pt'
negtvdPlot.line(plotx1, ploty1, line_width=1, line_color="green")
negtvdPlotter.append(negtvdPlot)
# Create and plot Negative MD miniplot slices.
negMDPlotter = []
for i, plot in enumerate(negPlotsMD):
negMDPlot = figure(width=400, height=225, x_range=(plotXRangeStart, plotXRangeEnd),
y_range=(plotYRangeStart, plotYRangeEnd),
min_border=10, tools="pan,wheel_zoom,box_zoom,reset,previewsave")
plotx2 = negPlotsMD[i]['plotx2']
ploty2 = negPlotsMD[i]['ploty2']
negMDPlot.line(plotx3, ploty3, line_dash=[4, 4], line_width=1, line_color="grey")
negMDPlot.line(plotx2, ploty2, line_width=3, line_color="red")
negMDPlot.xaxis.axis_label = "Measured Depth (m)"
negMDPlot.yaxis.axis_label = "Subsea (m)"
negMDPlot.xaxis.axis_label_text_font_size = '10pt'
negMDPlot.yaxis.axis_label_text_font_size = '10pt'
negMDPlotter.append(negMDPlot)
plotfilename = ("TVD_Down.html")
output_file(plotfilename, title="TVD Plot Diving")
grid2 = gridplot([negtvdPlotter, negMDPlotter])
# self.win2 = QWebView()
# self.win2.showMaximized()
# self.win2.load(QUrl('TVD_Down.html'))
# self.win2.show()
show(grid2)
# #MULTI LINE CODE
# print(len(bothPlotsGamma))
# print(len(bothPlotsSubSea))
# tvdPlot = figure(width=500, height=500, x_range=(0, 150), y_range=(plotYRangeEnd, plotYRangeStart), min_border=80)
# #tvdPlot.multi_line(xs=posPlotsGamma, ys=posPlotsSubSea, color=['red','green','blue','aqua', 'brown', 'crimson'])
# tvdPlot.multi_line(xs=bothPlotsGamma, ys=bothPlotsSubSea, color=['red','green','blue','aqua', 'brown', 'crimson', 'DarkSalmon', 'DarkViolet', 'DarkTurquoise'])
# plotfilename = ("TVDMultiplot.html")
# output_file(plotfilename, title="TVD Plot")
# show(tvdPlot)
def close_application(self):
sys.exit()
def quickTVD(self):
# TVDNum = 0
def extrapToTVD():
# Need the TVD to extrapolate to inputted, Subtract extrapTVD from initTVD (adjacent) / hypoteneuse
knownTVD, ok = QInputDialog.getDouble(self.wid, "Input", "Enter the TVD to extrapolate to", decimals=2)
initTVD = float(le.text())
initMD = float(le2.text())
initINC = float(le3.text())
extrapKB = float(le5.text())
extrapMD = (((knownTVD - initTVD) / (math.cos(math.radians(initINC)))) + (initMD))
le4.setText(
'Held at an inclination of ' + str(initINC) + '°, ' + str(knownTVD) + 'mTVD will be reached at ' + str(
round(extrapMD, 2)) + ' mMD.')
self.wid.raise_()
def extrapToMD():
# Need the TVD to extrapolate to inputted, Subtract extrapTVD from initTVD (adjacent) / hypoteneuse
knownMD, ok = QInputDialog.getDouble(self.wid, "Input", "Enter the MD to extrapolate to", decimals=2)
initTVD = float(le.text())
initMD = float(le2.text())
initINC = float(le3.text())
extrapKB = float(le5.text())
extrapTVD = ((math.cos(math.radians(initINC)) * (knownMD - initMD)) + initTVD)
# ((COS(RADIANS(C4))*(C14-C6))+(C5))
le6.setText(
'Held at an inclination of ' + str(initINC) + '°, ' + str(knownMD) + 'mMD will be reached at ' + str(
round(extrapTVD, 2)) + ' mTVD.')
# self.wid.raise_()
def getTVDint():
TVDNum, ok = QInputDialog.getDouble(self.wid, "Input", "Enter the current TVD", decimals=2)
if ok:
le.setText(str(TVDNum))
# self.wid.raise_()
def kbSet():
extrapKB, ok = QInputDialog.getDouble(self.wid, "Input", "Enter the current KBr", decimals=2)
if ok:
le5.setText(str(extrapKB))
# self.wid.raise_()
def getMDint():
num, ok = QInputDialog.getDouble(self.wid, "Input", "Enter the current MD", decimals=2)
if ok:
le2.setText(str(num))
# self.wid.raise_()
def getIncint():
num, ok = QInputDialog.getDouble(self.wid, "Input", "Enter the current Inclination", decimals=2)
if ok:
le3.setText(str(num))
# self.wid.raise_()
self.wid = QtGui.QWidget()
self.wid.resize(800, 400)
self.wid.setWindowTitle('Quick Extrapolation')
self.wid.setWindowIcon(QtGui.QIcon('oilrig.png'))
self.wid.show()
grid = QtGui.QGridLayout(self.wid)
le = QLineEdit()
le.setReadOnly(True)
le2 = QLineEdit()
le2.setReadOnly(True)
le3 = QLineEdit()
le3.setReadOnly(True)
le4 = QLineEdit()
le4.setReadOnly(True)
le5 = QLineEdit()
le5.setReadOnly(True)
le6 = QLineEdit()
le6.setReadOnly(True)
if KB > 0:
extrapKB = KB
le5.setText(str(extrapKB))
Button_input1 = QtGui.QPushButton("Enter Known TVD")
grid.addWidget(Button_input1, 1, 0)
grid.addWidget(le, 1, 1)
Button_input1.clicked.connect(getTVDint)
Button_input2 = QtGui.QPushButton("Enter Known MD")
grid.addWidget(Button_input2, 2, 0)
grid.addWidget(le2, 2, 1)
Button_input2.clicked.connect(getMDint)
Button_input3 = QtGui.QPushButton("Enter Known INC")
grid.addWidget(Button_input3, 3, 0)
grid.addWidget(le3, 3, 1)
Button_input3.clicked.connect(getIncint)
Button_input4 = QtGui.QPushButton("Extrapolate to specified TVD")
grid.addWidget(Button_input4, 5, 0)
grid.addWidget(le4, 5, 1)
Button_input4.clicked.connect(extrapToTVD)
Button_input5 = QtGui.QPushButton("KB")
grid.addWidget(Button_input5, 4, 0)
grid.addWidget(le5, 4, 1)
Button_input5.clicked.connect(kbSet)
Button_input6 = QtGui.QPushButton("Extrapolate to specified MD")
grid.addWidget(Button_input6, 6, 0)
grid.addWidget(le6, 6, 1)
Button_input6.clicked.connect(extrapToMD)
def main():
app = QtGui.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
main()
|
BrettMontague/Wellplotting
|
Wellplotting v06.py
|
Python
|
gpl-3.0
| 39,201
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Gilles-Alexandre Quenot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword
from .pages.login import LoginPage
from .pages.accounts_list import AccountsList, AccountHistoryPage
__all__ = ['Fortuneo']
class Fortuneo(BaseBrowser):
DOMAIN_LOGIN = 'www.fortuneo.fr'
DOMAIN = 'www.fortuneo.fr'
PROTOCOL = 'https'
CERTHASH = 'f71bd27994f395963c4a500d9d330cb50cef37ee5946146f9ca2492c2552b2ba'
ENCODING = None # refer to the HTML encoding
PAGES = {
'.*identification.jsp.*':
LoginPage,
'.*/prive/mes-comptes/synthese-tous-comptes.jsp':
AccountsList,
'.*/prive/mes-comptes/synthese-mes-comptes\.jsp':
AccountsList,
'.*/prive/mes-comptes/livret/consulter-situation/consulter-solde\.jsp\?COMPTE_ACTIF=.*':
AccountHistoryPage,
'.*/prive/mes-comptes/compte-courant/consulter-situation/consulter-solde\.jsp\?COMPTE_ACTIF=.*':
AccountHistoryPage,
'.*/prive/default\.jsp.*':
AccountsList
}
def __init__(self, *args, **kwargs):
BaseBrowser.__init__(self, *args, **kwargs)
def home(self):
"""main page (login)"""
self.location('https://' + self.DOMAIN_LOGIN + '/fr/prive/identification.jsp')
def is_logged(self):
"""Return True if we are logged on website"""
if self.is_on_page(AccountHistoryPage) or self.is_on_page(AccountsList):
return True
else:
return False
def login(self):
"""Login to the website.
This function is called when is_logged() returns False and the
password attribute is not None."""
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if not self.is_on_page(LoginPage):
self.location('https://' + self.DOMAIN_LOGIN + '/fr/identification.jsp', no_login=True)
self.page.login(self.username, self.password)
if self.is_on_page(LoginPage):
raise BrowserIncorrectPassword()
self.location('https://' + self.DOMAIN_LOGIN + '/fr/prive/mes-comptes/synthese-mes-comptes.jsp')
def get_history(self, account):
if not self.is_on_page(AccountHistoryPage):
self.location(account._link_id)
return self.page.get_operations(account)
def get_accounts_list(self):
"""accounts list"""
if not self.is_on_page(AccountsList):
self.location('https://' + self.DOMAIN_LOGIN + '/fr/prive/mes-comptes/synthese-mes-comptes.jsp')
return self.page.get_list()
def get_account(self, id):
"""Get an account from its ID"""
assert isinstance(id, basestring)
l = self.get_accounts_list()
for a in l:
if a.id == id:
return a
return None
# vim:ts=4:sw=4
|
eirmag/weboob
|
modules/fortuneo/browser.py
|
Python
|
agpl-3.0
| 3,664
|
"""Tests for classes defined in fields.py."""
import datetime
import unittest
from pytz import UTC
from xmodule.fields import Date, Timedelta, RelativeTime
from xmodule.timeinfo import TimeInfo
class DateTest(unittest.TestCase):
date = Date()
def compare_dates(self, dt1, dt2, expected_delta):
self.assertEqual(
dt1 - dt2,
expected_delta,
str(dt1) + "-" + str(dt2) + "!=" + str(expected_delta)
)
def test_from_json(self):
"""Test conversion from iso compatible date strings to struct_time"""
self.compare_dates(
DateTest.date.from_json("2013-01-01"),
DateTest.date.from_json("2012-12-31"),
datetime.timedelta(days=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00"),
DateTest.date.from_json("2012-12-31T23"),
datetime.timedelta(hours=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00"),
DateTest.date.from_json("2012-12-31T23:59"),
datetime.timedelta(minutes=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00:00"),
DateTest.date.from_json("2012-12-31T23:59:59"),
datetime.timedelta(seconds=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00:00Z"),
DateTest.date.from_json("2012-12-31T23:59:59Z"),
datetime.timedelta(seconds=1)
)
self.compare_dates(
DateTest.date.from_json("2012-12-31T23:00:01-01:00"),
DateTest.date.from_json("2013-01-01T00:00:00+01:00"),
datetime.timedelta(hours=1, seconds=1)
)
def test_enforce_type(self):
self.assertEqual(DateTest.date.enforce_type(None), None)
self.assertEqual(DateTest.date.enforce_type(""), None)
self.assertEqual(
DateTest.date.enforce_type("2012-12-31T23:00:01"),
datetime.datetime(2012, 12, 31, 23, 0, 1, tzinfo=UTC)
)
self.assertEqual(
DateTest.date.enforce_type(1234567890000),
datetime.datetime(2009, 2, 13, 23, 31, 30, tzinfo=UTC)
)
self.assertEqual(
DateTest.date.enforce_type(datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC)),
datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC)
)
with self.assertRaises(TypeError):
DateTest.date.enforce_type([1])
def test_return_None(self):
self.assertIsNone(DateTest.date.from_json(""))
self.assertIsNone(DateTest.date.from_json(None))
with self.assertRaises(TypeError):
DateTest.date.from_json(['unknown value'])
def test_old_due_date_format(self):
current = datetime.datetime.today()
self.assertEqual(
datetime.datetime(current.year, 3, 12, 12, tzinfo=UTC),
DateTest.date.from_json("March 12 12:00")
)
self.assertEqual(
datetime.datetime(current.year, 12, 4, 16, 30, tzinfo=UTC),
DateTest.date.from_json("December 4 16:30")
)
self.assertIsNone(DateTest.date.from_json("12 12:00"))
def test_non_std_from_json(self):
"""
Test the non-standard args being passed to from_json
"""
now = datetime.datetime.now(UTC)
delta = now - datetime.datetime.fromtimestamp(0, UTC)
self.assertEqual(
DateTest.date.from_json(delta.total_seconds() * 1000),
now
)
yesterday = datetime.datetime.now(UTC) - datetime.timedelta(days=-1)
self.assertEqual(DateTest.date.from_json(yesterday), yesterday)
def test_to_json(self):
"""
Test converting time reprs to iso dates
"""
self.assertEqual(
DateTest.date.to_json(datetime.datetime.strptime("2012-12-31T23:59:59Z", "%Y-%m-%dT%H:%M:%SZ")),
"2012-12-31T23:59:59Z"
)
self.assertEqual(
DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:59:59Z")),
"2012-12-31T23:59:59Z"
)
self.assertEqual(
DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:00:01-01:00")),
"2012-12-31T23:00:01-01:00"
)
with self.assertRaises(TypeError):
DateTest.date.to_json('2012-12-31T23:00:01-01:00')
class TimedeltaTest(unittest.TestCase):
delta = Timedelta()
def test_from_json(self):
self.assertEqual(
TimedeltaTest.delta.from_json('1 day 12 hours 59 minutes 59 seconds'),
datetime.timedelta(days=1, hours=12, minutes=59, seconds=59)
)
self.assertEqual(
TimedeltaTest.delta.from_json('1 day 46799 seconds'),
datetime.timedelta(days=1, seconds=46799)
)
def test_enforce_type(self):
self.assertEqual(TimedeltaTest.delta.enforce_type(None), None)
self.assertEqual(
TimedeltaTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)),
datetime.timedelta(days=1, seconds=46799)
)
self.assertEqual(
TimedeltaTest.delta.enforce_type('1 day 46799 seconds'),
datetime.timedelta(days=1, seconds=46799)
)
with self.assertRaises(TypeError):
TimedeltaTest.delta.enforce_type([1])
def test_to_json(self):
self.assertEqual(
'1 days 46799 seconds',
TimedeltaTest.delta.to_json(datetime.timedelta(days=1, hours=12, minutes=59, seconds=59))
)
class TimeInfoTest(unittest.TestCase):
def test_time_info(self):
due_date = datetime.datetime(2000, 4, 14, 10, tzinfo=UTC)
grace_pd_string = '1 day 12 hours 59 minutes 59 seconds'
timeinfo = TimeInfo(due_date, grace_pd_string)
self.assertEqual(
timeinfo.close_date,
due_date + Timedelta().from_json(grace_pd_string)
)
class RelativeTimeTest(unittest.TestCase):
delta = RelativeTime()
def test_from_json(self):
self.assertEqual(
RelativeTimeTest.delta.from_json('0:05:07'),
datetime.timedelta(seconds=307)
)
self.assertEqual(
RelativeTimeTest.delta.from_json(100.0),
datetime.timedelta(seconds=100)
)
self.assertEqual(
RelativeTimeTest.delta.from_json(None),
datetime.timedelta(seconds=0)
)
with self.assertRaises(TypeError):
RelativeTimeTest.delta.from_json(1234) # int
with self.assertRaises(ValueError):
RelativeTimeTest.delta.from_json("77:77:77")
def test_enforce_type(self):
self.assertEqual(RelativeTimeTest.delta.enforce_type(None), None)
self.assertEqual(
RelativeTimeTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)),
datetime.timedelta(days=1, seconds=46799)
)
self.assertEqual(
RelativeTimeTest.delta.enforce_type('0:05:07'),
datetime.timedelta(seconds=307)
)
with self.assertRaises(TypeError):
RelativeTimeTest.delta.enforce_type([1])
def test_to_json(self):
self.assertEqual(
"01:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723))
)
self.assertEqual(
"00:00:00",
RelativeTimeTest.delta.to_json(None)
)
self.assertEqual(
"00:01:40",
RelativeTimeTest.delta.to_json(100.0)
)
error_msg = "RelativeTime max value is 23:59:59=86400.0 seconds, but 90000.0 seconds is passed"
with self.assertRaisesRegexp(ValueError, error_msg):
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=90000))
with self.assertRaises(TypeError):
RelativeTimeTest.delta.to_json("123")
def test_str(self):
self.assertEqual(
"01:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723))
)
self.assertEqual(
"11:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=39723))
)
|
lduarte1991/edx-platform
|
common/lib/xmodule/xmodule/tests/test_fields.py
|
Python
|
agpl-3.0
| 8,266
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
from openstack_dashboard.dashboards.project import dashboard
class NetworkTopology(horizon.Panel):
name = _("Network Topology")
slug = 'network_topology'
permissions = ('openstack.services.network', )
dashboard.Project.register(NetworkTopology)
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/network_topology/panel.py
|
Python
|
apache-2.0
| 1,152
|
#
# boji.py - mock koji XML-RPC and bodhi RESTful interface
#
# Copyright 2011, Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Author: Tim Flink <tflink@redhat.com>
from __future__ import with_statement
from flask import Flask, request, url_for, render_template, flash, redirect
from flaskext.xmlrpc import XMLRPCHandler, Fault
import json
from datetime import datetime
import fedora.client
import mock_fedorainfra.koji.mock_koji as mock_koji
from sqlalchemy import desc
from mock_fedorainfra.database import db_session, init_db
from mock_fedorainfra.models import BodhiComment
from mock_fedorainfra import util
# configuration
DATABASE = '/tmp/boji.db'
DEBUG = True
SECRET_KEY = 'test key'
NUM_PAGE = 20
default_response = util.make_default_update()
koji_getbuild_resp = {'owner_name': 'cebbert', 'package_name': 'kernel', 'task_id': 3085371, 'creation_event_id': 3729725, 'creation_time': '2011-05-21 17:16:58.584573', 'epoch': None, 'nvr': 'kernel-2.6.35.13-92.fc14', 'name': 'kernel', 'completion_time': '2011-05-21 18:37:45.561815', 'state': 1, 'version': '2.6.35.13', 'release': '92.fc14', 'creation_ts': 1305998218.58457, 'package_id': 8, 'id': 244715, 'completion_ts': 1306003065.56182, 'owner_id': 417}
app = Flask(__name__)
#app.debug = True
app.config.from_object(__name__)
init_db()
handler = XMLRPCHandler('mockkoji')
handler.connect(app, '/mockkoji')
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
def get_bodhi_connection():
return fedora.client.bodhi.BodhiClient()
@handler.register
def getBuild(nvr):
if nvr is None:
raise Fault("no_build", "there has to be some build passed in!")
k = mock_koji.MockKoji()
return k.get_build(nvr)
@handler.register
def listTagged(args):
print args
params, opts = decode_args(*args)
k = mock_koji.MockKoji()
return k.get_tags(*params, **opts)
@handler.register
def tagHistory(args):
print args
print type(args)
if type(args) != str:
params, opts = decode_args(*args)
else:
params = (args,)
opts = {}
print params
print opts
k = mock_koji.MockKoji()
return k.tag_history(*params, **opts)
@handler.register
def listBuilds(args):
if type(args) != dict:
params, opts = decode_args(*args)
else:
params = ()
opts = args
del opts['__starstar']
print params
print opts
k = mock_koji.MockKoji()
return k.list_builds(*params, **opts)
@app.route('/bodhi/comment', methods=['POST'])
def bodhi_comment():
current_time = str(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
db_session.add(BodhiComment(current_time, str( request.form['title']),
str(request.form['text']), str(request.form['user_name']),
int(request.form['karma']), request.form['email'] in ['false', 'False']))
db_session.commit()
return json.dumps(default_response)
@app.route('/bodhi/list', methods=['POST','GET'])
def bodhi_list():
# we need username, release, package, request, status, type_, bugs, mine
user= release= package= bodhirequest= status= update_type= bugs= mine = None
limit = 10
if 'username' in request.form.keys():
user = str(request.form['username'])
if 'release' in request.form.keys():
release = str(request.form['release'])
if 'package' in request.form.keys():
package = str(request.form['package'])
if 'request' in request.form.keys():
bodhirequest = str(request.form['request'])
if 'status' in request.form.keys():
status = str(request.form['status'])
if 'type_' in request.form.keys():
update_type = str(request.form['type_'])
if 'bugs' in request.form.keys():
bugs = request.form['bugs']
if 'mine' in request.form.keys():
mine = str(request.form['mine'])
if 'tg_paginate_limit' in request.form.keys():
limit = int(request.form['tg_paginate_limit'])
bodhi = get_bodhi_connection()
result = bodhi.query(package=package, limit=limit, username=user,
release=release, request=bodhirequest, status=status,
type_=update_type, bugs=bugs, mine=mine).toDict()
for update in result['updates']:
raw_comments = search_comments(update['title'])
comments = [dict(timestamp=row['date'], update=row['update'] ,text=row['text'], author=row['user'],
karma=row['karma'], anonymous=False, group=None) for row in raw_comments]
update['comments'] = comments
return json.dumps(result)
def search_comments(update):
c = db_session.query(BodhiComment).filter(BodhiComment.title.like('%%%s%%' % update)).order_by(BodhiComment.id)
comments = [dict(date=str(row.date), update=row.title, text=row.text, user=row.username,
karma=row.karma, send_email=row.send_email, id=row.id) for row in c]
return comments
def get_comments(start=0, num_comments=NUM_PAGE):
c = db_session.query(BodhiComment).order_by(desc(BodhiComment.id)).slice(start, start+num_comments)
comments = [dict(date=str(row.date), update=row.title, text=row.text, user=row.username,
karma=row.karma, send_email=row.send_email, id=row.id ) for row in c]
return comments
@app.route('/boji/comments', methods=['GET'])
def default_boji_comments():
return redirect('/boji/comments/0')
@app.route('/boji/comments/<int:start_comment>', methods=['GET'])
def boji_comments(start_comment):
comments = get_comments(start=start_comment)
for c in comments:
c['url'] = url_for('get_boji_comment', comment_id=c['id'])
next_start = (start_comment + NUM_PAGE)
prev_start = (start_comment - NUM_PAGE)
if prev_start < 0:
prev_start = 0
return render_template('view_comments.html', bodhi_comments=comments, next_start= next_start, prev_start= prev_start)
@app.route('/boji/comments/search', methods=['GET', 'POST'])
def boji_search_comments():
if request.method == 'GET':
return render_template('search_comments.html')
elif request.method == 'POST':
if 'title' in request.form.keys():
comments = search_comments(request.form['title'])
return render_template('view_comments.html', bodhi_comments=comments, next_start= 0, prev_start=0)
@app.route('/boji/comment/<int:comment_id>', methods = ['GET', 'POST', 'DELETE'])
def get_boji_comment(comment_id):
c = db_session.query(BodhiComment).filter(BodhiComment.id == comment_id).first()
if request.method == 'GET':
comment = dict(date=str(c.date), update=c.title, text=c.text, user=c.username, karma=c.karma, send_email=c.send_email, id=c.id)
return render_template('comment_detail.html', comment = comment)
# stupid browsers not supporting HTTP delete calls ...
elif request.method == 'POST':
if request.form['request'] == 'DELETE':
db_session.delete(c)
db_session.commit()
flash('Comment %d was deleted' % comment_id)
return redirect(url_for('default_boji_comments'))
elif request.method == 'DELETE':
db_session.delete(c)
db_session.commit()
return (url_for('default_boji_comments'))
@app.route('/boji/', methods = ['GET'])
def boji_main():
return render_template('boji_main.html')
@app.route('/util/cleardb', methods=['POST'])
def clear_db():
db_session.execute('delete from comments')
db_session.commit()
flash('Database was cleared')
return redirect(url_for('default_boji_comments'))
def decode_args(*args):
"""Decodes optional arguments from a flat argument list
Complementary to encode_args
Returns a tuple (args,opts) where args is a tuple and opts is a dict
"""
print args
opts = {}
if len(args) > 0:
if type(args) == dict:
return (),args
last = args[-1]
if type(last) == dict and last.get('__starstar',False):
del last['__starstar']
opts = last
args = args[:-1]
# this is a bit of a dirty hack, didn't want to enable
# allow_none right now
if args[0] == '__none':
args = ()
return args,opts
if __name__ == '__main__':
app.run(host='localhost')
|
tflink/mock_fedorainfra
|
mock_fedorainfra/boji.py
|
Python
|
gpl-2.0
| 8,965
|
from .diagram_structures import Node, Connection
from .action_def import ACTION_TYPES
import PyQt5.QtCore as QtCore
class Diagram():
def __init__(self, **kwargs):
self.nodes = []
"""List of diagram nodes"""
self.connections = []
"""List of diagram connections"""
self.file = ""
"""serialize to file"""
def save(self):
"""serialize to file"""
pass
def load(self):
"""serialize from file"""
pass
def add_node(self, x, y, template_num):
"""Add new action to diagram data scheme"""
pos = QtCore.QPoint(x, y)
node = Node(template_num, pos)
self.nodes.append(node)
self.recount_node_uniqueness(node.action_def.name)
return node
def delete_node(self, node):
"""Delete action and all its connections from
diagram data scheme"""
del_conn = []
for conn in self.connections:
if conn.input is node or conn.output is node:
del_conn.append(conn)
for conn in del_conn:
self.delete_connection(conn)
aname = node.action_def.name
self.nodes.remove(node)
self.recount_node_uniqueness(aname)
def recount_node_uniqueness(self, aname):
"""repair nodes unique numbering"""
first = None
i = 1
for node in self.nodes:
if node.action_def.name==aname:
if first is None:
first = node
node.unique=i
i += 1
if i==2:
first.unique=0
def add_connection(self, in_node, out_node):
"""Add new connection to diagram data scheme"""
conn = Connection(in_node, out_node)
self.connections.append(conn)
return conn
def delet_connection(self, conn):
"""Add new connection to diagram data scheme"""
self.connections.remove(conn)
def mark_invalid_connections(self):
"""Mark connection to moved nodes for repaint"""
n = []
for node in self.nodes:
if node.repaint_conn:
n.append(node)
node.repaint_conn = False
for conn in self.connections:
if conn.input in n or conn.output in n:
conn.repaint = True
def get_conn(self, node, pos):
"""
return connection for set node and possition
"""
for conn in self.connections:
if conn.is_conn_point(node, pos):
return conn
return None
def get_action_dict(self):
"""return dict of action grouped to dictionary accoding group"""
dict = {}
for i in range(0, len(ACTION_TYPES)):
if ACTION_TYPES[i].group not in dict:
dict[ACTION_TYPES[i].group] = []
dict[ACTION_TYPES[i].group].append(i)
return dict
@staticmethod
def action_template(i):
"""return action template i"""
return ACTION_TYPES[i]
|
GeoMop/GeoMop
|
src/Analysis/ui/data/diagram.py
|
Python
|
gpl-3.0
| 3,096
|
# -*- coding: utf-8 -*-
"""
File name: __init__
Reference:
Introduction:
Date: 2016-05-20
Last modified: 2016-05-22
Author: enihsyou
"""
import algorithm.bubble_sort
import algorithm.build_in
import algorithm.cocktail_shaker_sort
import algorithm.heap_sort
import algorithm.insertion_sort
import algorithm.merge_sort
import algorithm.quick_sort
import algorithm.selection_sort
__all__ = ['bubble_sort', 'cocktail_shaker_sort', 'selection_sort',
'insertion_sort', 'heap_sort', 'merge_sort', 'quick_sort', 'build_in']
n2 = ['bubble_sort', 'cocktail_shaker_sort', 'selection_sort',
'insertion_sort']
nlogn = ['heap_sort', 'merge_sort', 'quick_sort', 'build_in']
|
enihsyou/Sorting-algorithm
|
algorithm_Python/__init__.py
|
Python
|
mit
| 677
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 ~ 2013 Deepin, Inc.
# 2012 ~ 2013 Hailong Qiu
#
# Author: Hailong Qiu <356752238@qq.com>
# Maintainer: Hailong Qiu <356752238@qq.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from dtk.ui.theme import ui_theme
from dtk.ui.scrolled_window import ScrolledWindow
from dtk.ui.utils import propagate_expose
from dtk.ui.draw import draw_vlinear
from skin import app_theme
from listview import ListView
from listview_base import Text
from treeview_base import TreeViewBase
from net_search import Search
from notebook import NoteBook
from color import alpha_color_hex_to_cairo
from utils import get_text_size
from draw import draw_text, draw_pixbuf
import gtk
class PlayListView(object):
def __init__(self):
self.one_close = app_theme.get_pixbuf("treeview/1-close.png")
self.one_open = app_theme.get_pixbuf("treeview/1-open.png")
self.two_close = app_theme.get_pixbuf("treeview/2-close.png")
self.two_open = app_theme.get_pixbuf("treeview/2-open.png")
self.three_close = app_theme.get_pixbuf("treeview/3-close.png")
self.three_open = app_theme.get_pixbuf("treeview/3-open.png")
#
self.tree_view_open = app_theme.get_pixbuf("treeview/open.png")
self.tree_view_close = app_theme.get_pixbuf("treeview/close.png")
self.tree_view_right = app_theme.get_pixbuf("treeview/right.png")
self.tree_view_bottom = app_theme.get_pixbuf("treeview/bottom.png")
#
self.listview_color = ui_theme.get_color("scrolledbar")
self.play_list_vbox = gtk.VBox()
#
self.list_view_vbox = gtk.VBox()
self.list_scroll_win = ScrolledWindow(0, 0)
self.list_scroll_win.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
self.list_view = ListView()
#
self.play_list_con = PlayListControl()
#
self.list_view_vbox.pack_start(self.list_scroll_win, True, True)
self.list_view_vbox.pack_start(self.play_list_con, False, False)
# 网络列表,搜索框.
self.tree_scroll_win = ScrolledWindow(0, 0)
self.tree_scroll_win.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
self.tree_view_vbox = gtk.VBox()
self.tree_view = TreeViewBase()
self.search_ali = gtk.Alignment(0, 0, 1, 1)
self.search = Search()
self.search_ali.add(self.search)
#
self.search_ali.set_padding(7, 5, 12, 12)
self.tree_view_vbox.pack_start(self.search_ali, False, False)
self.tree_view_vbox.pack_start(self.tree_scroll_win, True, True)
self.search_ali.connect("expose-event", self.search_ali_expose_event)
#
self.note_book = NoteBook()
#
self.list_view.on_draw_sub_item = self.__listview_on_draw_sub_item
self.list_view.columns.add_range(["filename", "time"])
self.list_view.columns[0].width = 120
self.list_view.columns[1].width = 95
#
self.note_book.hide_title()
self.tree_view.paint_nodes_event = self.__treeview_paint_nodes_event
#
self.list_scroll_win.add_with_viewport(self.list_view)
self.tree_scroll_win.add_with_viewport(self.tree_view)
#self.note_book.add_layout1(self.list_scroll_win)
self.note_book.add_layout1(self.list_view_vbox)
self.note_book.add_layout2(self.tree_view_vbox)
#self.play_list_vbox.pack_start(self.scroll_win, True, True)
self.play_list_vbox.pack_start(self.note_book, True, True)
def __listview_on_draw_sub_item(self, e):
color = self.listview_color.get_color()
if e.double_items == e.item:
e.text_color = "#000000"
text_size=9
color_info = [(0, (color, 0.8)), (1, (color, 0.8))]
draw_vlinear(e.cr,
e.x, e.y, e.w, e.h,
color_info
)
elif e.item in e.single_items:
e.text_color = "#FFFFFF"
text_size=9
color_info = [(0, (color, 0.5)), (1, (color, 0.5))]
draw_vlinear(e.cr,
e.x, e.y, e.w, e.h,
color_info
)
elif e.motion_items == e.item:
e.text_color = "#FFFFFF"
text_size=9
color_info = [(0, (color, 0.2)), (1, (color, 0.2))]
draw_vlinear(e.cr,
e.x, e.y, e.w, e.h,
color_info
)
else:
e.text_color = "#FFFFFF"
text_size=9
#
text = e.text.decode("utf-8")
one_width = self.list_view.columns[0].width
two_width = self.list_view.columns[1].width
#if e.w == one_width: # 显示播放名字的第一列.
if e.column_index == 0:
#
t_width = 0
t_index = 0
add_point = False
for t in text:
t_width += get_text_size(t, text_size=text_size)[0]
if t_width > one_width - 20:
add_point = True
break
t_index += 1
if add_point:
text = text[:t_index] + "..."
#
alignment = Text.LEFT
x = e.x + 15
elif e.w == two_width:
alignment = Text.RIGHT
x = e.x - 15
e.draw_text(e.cr,
str(text),
x, e.y, e.w, e.h,
text_color=e.text_color,
text_size=text_size,
alignment=alignment)
def __treeview_paint_nodes_event(self, node_event):
color = self.listview_color.get_color()
text_color = "#FFFFFF"
# 单击和移动, 双击.
if node_event.node in node_event.single_items:
color_info = [(0, (color, 0.45)), (1, (color, 0.45))]
draw_vlinear(node_event.cr,
node_event.x, node_event.y, node_event.w, node_event.h,
color_info
)
#text_color = "#000000"
elif node_event.node in node_event.motion_items:
color_info = [(0, (color, 0.75)), (1, (color, 0.75))]
draw_vlinear(node_event.cr,
node_event.x, node_event.y, node_event.w, node_event.h,
color_info
)
#
x_padding = 12 # 因为要和搜索框对齐.
if 0 == node_event.node.leave: # 根节点. :比如->> >我看过的. >优酷视频. >pps.
if node_event.node.is_expanded:
pixbuf = self.one_open.get_pixbuf()
else:
pixbuf = self.one_close.get_pixbuf()
elif 1 == node_event.node.leave: #
if node_event.node.is_expanded:
pixbuf = self.two_open.get_pixbuf()
else:
pixbuf = self.two_close.get_pixbuf()
else:
if node_event.node.is_expanded:
pixbuf = self.three_open.get_pixbuf()
else:
pixbuf = self.three_close.get_pixbuf()
#
icon_x = node_event.x + x_padding
icon_y = node_event.y + node_event.h/2 - pixbuf.get_height()/2 + 1
if node_event.node.leave > 1:
icon_x += (node_event.node.leave - 1) * pixbuf.get_width()
if node_event.node.leave > 0:
text_color = "#a8a8a8"
##########
# 画图标.
if node_event.node.nodes != []:
draw_pixbuf(node_event.cr,
pixbuf,
icon_x,
icon_y)
# 画文本.
text_x_padding = 15
text_size = 9
draw_text(node_event.cr,
node_event.node.text,
icon_x + text_x_padding,
node_event.y + node_event.h/2 - get_text_size(node_event.node.text, text_size=9)[1]/2,
text_color=text_color,
text_size=text_size
)
def search_ali_expose_event(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
#
bg_color = "#272727"
cr.set_source_rgba(*alpha_color_hex_to_cairo((bg_color,1.0)))
cr.rectangle(rect.x, rect.y, rect.width + 1, rect.height)
cr.fill()
#
propagate_expose(widget, event)
return True
class PlayListControl(gtk.HBox):
def __init__(self):
gtk.HBox.__init__(self)
self.del_btn = gtk.Button("del")
self.add_btn = gtk.Button("add")
self.empty_btn = gtk.Button('')
height = 22
self.del_btn.set_size_request(-1, height)
self.add_btn.set_size_request(-1, height)
self.empty_btn.set_size_request(-1, height)
# init pixbuf.
self.del_pixbuf = app_theme.get_pixbuf("bottom_buttons/play_list_del_file.png").get_pixbuf()
self.add_pixbuf = app_theme.get_pixbuf("bottom_buttons/play_list_add_file.png").get_pixbuf()
#
self.del_btn.connect("expose-event", self.del_btn_expose_event)
self.add_btn.connect("expose-event", self.add_btn_expose_event)
self.empty_btn.connect("expose-event", self.empty_btn_expose_event)
#
self.pack_start(self.empty_btn, True, True)
self.pack_start(self.add_btn, False, False)
self.pack_start(self.del_btn, False, False)
def del_btn_expose_event(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
self.paint_bg(cr, rect)
x = rect.x + rect.width/2 - self.del_pixbuf.get_width()/2
y = rect.y + rect.height/2 - self.del_pixbuf.get_height()/2
if widget.state == gtk.STATE_ACTIVE:
x += 1
y += 1
draw_pixbuf(cr, self.del_pixbuf, x, y)
return True
def add_btn_expose_event(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
self.paint_bg(cr, rect)
x = rect.x + rect.width/2 - self.add_pixbuf.get_width()/2
y = rect.y + rect.height/2 - self.add_pixbuf.get_height()/2
if widget.state == gtk.STATE_ACTIVE:
x += 1
y += 1
draw_pixbuf(cr, self.add_pixbuf, x, y)
return True
def empty_btn_expose_event(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
self.paint_bg(cr, rect)
return True
def paint_bg(self, cr, rect):
cr.set_source_rgba(*alpha_color_hex_to_cairo(("#202020", 1.0)))
cr.rectangle(*rect)
cr.fill()
|
linuxdeepin/deepin-media-player
|
src/widget/playlistview.py
|
Python
|
gpl-3.0
| 11,382
|
# -*- test-case-name: twisted.test.test_nmea -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""NMEA 0183 implementation
Maintainer: Bob Ippolito
The following NMEA 0183 sentences are currently understood::
GPGGA (fix)
GPGLL (position)
GPRMC (position and time)
GPGSA (active satellites)
The following NMEA 0183 sentences require implementation::
None really, the others aren't generally useful or implemented in most devices anyhow
Other desired features::
- A NMEA 0183 producer to emulate GPS devices (?)
"""
import operator
from twisted.protocols import basic
from twisted.python.compat import reduce
POSFIX_INVALID, POSFIX_SPS, POSFIX_DGPS, POSFIX_PPS = 0, 1, 2, 3
MODE_AUTO, MODE_FORCED = 'A', 'M'
MODE_NOFIX, MODE_2D, MODE_3D = 1, 2, 3
class InvalidSentence(Exception):
pass
class InvalidChecksum(Exception):
pass
class NMEAReceiver(basic.LineReceiver):
"""This parses most common NMEA-0183 messages, presumably from a serial GPS device at 4800 bps
"""
delimiter = '\r\n'
dispatch = {
'GPGGA': 'fix',
'GPGLL': 'position',
'GPGSA': 'activesatellites',
'GPRMC': 'positiontime',
'GPGSV': 'viewsatellites', # not implemented
'GPVTG': 'course', # not implemented
'GPALM': 'almanac', # not implemented
'GPGRS': 'range', # not implemented
'GPGST': 'noise', # not implemented
'GPMSS': 'beacon', # not implemented
'GPZDA': 'time', # not implemented
}
# generally you may miss the beginning of the first message
ignore_invalid_sentence = 1
# checksums shouldn't be invalid
ignore_checksum_mismatch = 0
# ignore unknown sentence types
ignore_unknown_sentencetypes = 0
# do we want to even bother checking to see if it's from the 20th century?
convert_dates_before_y2k = 1
def lineReceived(self, line):
if not line.startswith('$'):
if self.ignore_invalid_sentence:
return
raise InvalidSentence("%r does not begin with $" % (line,))
# message is everything between $ and *, checksum is xor of all ASCII values of the message
strmessage, checksum = line[1:].strip().split('*')
message = strmessage.split(',')
sentencetype, message = message[0], message[1:]
dispatch = self.dispatch.get(sentencetype, None)
if (not dispatch) and (not self.ignore_unknown_sentencetypes):
raise InvalidSentence("sentencetype %r" % (sentencetype,))
if not self.ignore_checksum_mismatch:
checksum, calculated_checksum = int(checksum, 16), reduce(operator.xor, map(ord, strmessage))
if checksum != calculated_checksum:
raise InvalidChecksum("Given 0x%02X != 0x%02X" % (checksum, calculated_checksum))
handler = getattr(self, "handle_%s" % dispatch, None)
decoder = getattr(self, "decode_%s" % dispatch, None)
if not (dispatch and handler and decoder):
# missing dispatch, handler, or decoder
return
# return handler(*decoder(*message))
try:
decoded = decoder(*message)
except Exception, e:
raise InvalidSentence("%r is not a valid %s (%s) sentence" % (line, sentencetype, dispatch))
return handler(*decoded)
def decode_position(self, latitude, ns, longitude, ew, utc, status):
latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
utc = self._decode_utc(utc)
if status == 'A':
status = 1
else:
status = 0
return (
latitude,
longitude,
utc,
status,
)
def decode_positiontime(self, utc, status, latitude, ns, longitude, ew, speed, course, utcdate, magvar, magdir):
utc = self._decode_utc(utc)
latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
if speed != '':
speed = float(speed)
else:
speed = None
if course != '':
course = float(course)
else:
course = None
utcdate = 2000+int(utcdate[4:6]), int(utcdate[2:4]), int(utcdate[0:2])
if self.convert_dates_before_y2k and utcdate[0] > 2073:
# GPS was invented by the US DoD in 1973, but NMEA uses 2 digit year.
# Highly unlikely that we'll be using NMEA or this twisted module in 70 years,
# but remotely possible that you'll be using it to play back data from the 20th century.
utcdate = (utcdate[0] - 100, utcdate[1], utcdate[2])
if magvar != '':
magvar = float(magvar)
if magdir == 'W':
magvar = -magvar
else:
magvar = None
return (
latitude,
longitude,
speed,
course,
# UTC seconds past utcdate
utc,
# UTC (year, month, day)
utcdate,
# None or magnetic variation in degrees (west is negative)
magvar,
)
def _decode_utc(self, utc):
utc_hh, utc_mm, utc_ss = map(float, (utc[:2], utc[2:4], utc[4:]))
return utc_hh * 3600.0 + utc_mm * 60.0 + utc_ss
def _decode_latlon(self, latitude, ns, longitude, ew):
latitude = float(latitude[:2]) + float(latitude[2:])/60.0
if ns == 'S':
latitude = -latitude
longitude = float(longitude[:3]) + float(longitude[3:])/60.0
if ew == 'W':
longitude = -longitude
return (latitude, longitude)
def decode_activesatellites(self, mode1, mode2, *args):
satellites, (pdop, hdop, vdop) = args[:12], map(float, args[12:])
satlist = []
for n in satellites:
if n:
satlist.append(int(n))
else:
satlist.append(None)
mode = (mode1, int(mode2))
return (
# satellite list by channel
tuple(satlist),
# (MODE_AUTO/MODE_FORCED, MODE_NOFIX/MODE_2DFIX/MODE_3DFIX)
mode,
# position dilution of precision
pdop,
# horizontal dilution of precision
hdop,
# vertical dilution of precision
vdop,
)
def decode_fix(self, utc, latitude, ns, longitude, ew, posfix, satellites, hdop, altitude, altitude_units, geoid_separation, geoid_separation_units, dgps_age, dgps_station_id):
latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
utc = self._decode_utc(utc)
posfix = int(posfix)
satellites = int(satellites)
hdop = float(hdop)
altitude = (float(altitude), altitude_units)
if geoid_separation != '':
geoid = (float(geoid_separation), geoid_separation_units)
else:
geoid = None
if dgps_age != '':
dgps = (float(dgps_age), dgps_station_id)
else:
dgps = None
return (
# seconds since 00:00 UTC
utc,
# latitude (degrees)
latitude,
# longitude (degrees)
longitude,
# position fix status (POSFIX_INVALID, POSFIX_SPS, POSFIX_DGPS, POSFIX_PPS)
posfix,
# number of satellites used for fix 0 <= satellites <= 12
satellites,
# horizontal dilution of precision
hdop,
# None or (altitude according to WGS-84 ellipsoid, units (typically 'M' for meters))
altitude,
# None or (geoid separation according to WGS-84 ellipsoid, units (typically 'M' for meters))
geoid,
# (age of dgps data in seconds, dgps station id)
dgps,
)
|
ecolitan/fatics
|
venv/lib/python2.7/site-packages/twisted/protocols/gps/nmea.py
|
Python
|
agpl-3.0
| 7,960
|
from south.db import db
from django.db import models
from channelguide.channels.models import *
class Migration:
def forwards(self, orm):
# Adding model 'AddedChannel'
db.create_table('cg_channel_added', (
('timestamp', orm['channels.AddedChannel:timestamp']),
('channel', orm['channels.AddedChannel:channel']),
('user', orm['channels.AddedChannel:user']),
))
db.send_create_signal('channels', ['AddedChannel'])
# Adding model 'LastApproved'
db.create_table('cg_channel_last_approved', (
('timestamp', orm['channels.LastApproved:timestamp']),
))
db.send_create_signal('channels', ['LastApproved'])
# Adding model 'Item'
db.create_table('cg_channel_item', (
('guid', orm['channels.Item:guid']),
('description', orm['channels.Item:description']),
('url', orm['channels.Item:url']),
('thumbnail_extension', orm['channels.Item:thumbnail_extension']),
('thumbnail_url', orm['channels.Item:thumbnail_url']),
('mime_type', orm['channels.Item:mime_type']),
('date', orm['channels.Item:date']),
('size', orm['channels.Item:size']),
('id', orm['channels.Item:id']),
('channel', orm['channels.Item:channel']),
('name', orm['channels.Item:name']),
))
db.send_create_signal('channels', ['Item'])
# Adding model 'Channel'
db.create_table('cg_channel', (
('featured_by', orm['channels.Channel:featured_by']),
('last_moderated_by', orm['channels.Channel:last_moderated_by']),
('moderator_shared_by', orm['channels.Channel:moderator_shared_by']),
('creation_time', orm['channels.Channel:creation_time']),
('modified', orm['channels.Channel:modified']),
('featured', orm['channels.Channel:featured']),
('postal_code', orm['channels.Channel:postal_code']),
('owner', orm['channels.Channel:owner']),
('waiting_for_reply_date', orm['channels.Channel:waiting_for_reply_date']),
('id', orm['channels.Channel:id']),
('license', orm['channels.Channel:license']),
('archived', orm['channels.Channel:archived']),
('hi_def', orm['channels.Channel:hi_def']),
('state', orm['channels.Channel:state']),
('website_url', orm['channels.Channel:website_url']),
('description', orm['channels.Channel:description']),
('featured_at', orm['channels.Channel:featured_at']),
('moderator_shared_at', orm['channels.Channel:moderator_shared_at']),
('adult', orm['channels.Channel:adult']),
('feed_modified', orm['channels.Channel:feed_modified']),
('was_featured', orm['channels.Channel:was_featured']),
('publisher', orm['channels.Channel:publisher']),
('name', orm['channels.Channel:name']),
('language', orm['channels.Channel:language']),
('url', orm['channels.Channel:url']),
('geoip', orm['channels.Channel:geoip']),
('feed_etag', orm['channels.Channel:feed_etag']),
('thumbnail_extension', orm['channels.Channel:thumbnail_extension']),
('approved_at', orm['channels.Channel:approved_at']),
))
db.send_create_signal('channels', ['Channel'])
# Adding ManyToManyField 'Channel.categories'
db.create_table('cg_category_map', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('channel', models.ForeignKey(orm.Channel, null=False)),
('category', models.ForeignKey(orm['labels.Category'], null=False))
))
# Creating unique_together for [channel, user] on AddedChannel.
db.create_unique('cg_channel_added', ['channel_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'AddedChannel'
db.delete_table('cg_channel_added')
# Deleting model 'LastApproved'
db.delete_table('cg_channel_last_approved')
# Deleting model 'Item'
db.delete_table('cg_channel_item')
# Deleting model 'Channel'
db.delete_table('cg_channel')
# Dropping ManyToManyField 'Channel.categories'
db.delete_table('cg_category_map')
# Deleting unique_together for [channel, user] on AddedChannel.
db.delete_unique('cg_channel_added', ['channel_id', 'user_id'])
models = {
'labels.language': {
'Meta': {'db_table': "'cg_channel_language'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 22, 16, 3, 13, 807825)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 22, 16, 3, 13, 807695)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'labels.tag': {
'Meta': {'db_table': "'cg_tag'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'channels.item': {
'Meta': {'db_table': "'cg_channel_item'"},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['channels.Channel']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}),
'thumbnail_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'channels.channel': {
'Meta': {'db_table': "'cg_channel'"},
'adult': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'approved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Category']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'featured_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'featured_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'feed_etag': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feed_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geoip': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'hi_def': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'db_column': "'primary_language_id'", 'to': "orm['labels.Language']"}),
'last_moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_moderated_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'license': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'moderator_shared_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'moderator_shared_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderator_shared_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'to': "orm['auth.User']"}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Tag']"}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'waiting_for_reply_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'was_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'labels.category': {
'Meta': {'db_table': "'cg_category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'on_frontpage': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'channels.addedchannel': {
'Meta': {'unique_together': "[('channel', 'user')]", 'db_table': "'cg_channel_added'"},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['channels.Channel']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['auth.User']"})
},
'channels.lastapproved': {
'Meta': {'db_table': "'cg_channel_last_approved'"},
'timestamp': ('django.db.models.fields.DateTimeField', [], {'primary_key': 'True'})
}
}
complete_apps = ['channels']
|
kmshi/miroguide
|
channelguide/channels/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 14,056
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.unit.integrated.v3 import test_servers
class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-deferred-delete"
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_force_delete(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
|
luzheqi1987/nova-annotation
|
nova/tests/unit/integrated/v3/test_deferred_delete.py
|
Python
|
apache-2.0
| 1,615
|
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtBluetooth
from PIL import Image,ImageDraw,ImageFont
import binascii
import time
from functools import reduce
class MetawatchThread(QtCore.QObject):
connected = QtCore.pyqtSignal()
disconnected = QtCore.pyqtSignal()
readyRead = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(str)
def __init__(self, device, postman, parent=None):
super(MetawatchThread, self).__init__(parent)
self.postman = postman
self.device = device
self.dev = QtBluetooth.QBluetoothSocket(QtBluetooth.QBluetoothServiceInfo.RfcommProtocol)
self.dev.readyRead.connect(self.readyRead.emit)
self.CRC = CRC_CCITT()
self.loop = QtCore.QEventLoop()
self.timer = QtCore.QTimer()
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.loop.quit)
def connect(self):
addr = QtBluetooth.QBluetoothAddress(self.device.address)
print('connecting to', addr)
self.dev.connected.connect(self.connected)
self.dev.disconnected.connect(self.disconnected)
self.dev.error.connect(self.onError)
self.dev.connectToService(addr, QtBluetooth.QBluetoothUuid.SerialPort)
def disconnect(self):
self.dev.disconnectFromService()
def onError(self, E):
self.error.emit(self.dev.errorString())
def buzz(self):
msg = self.pack(b'\x23\x00\x01\xf4\x01\xf4\x01\x01')
self.write(msg)
self.postman.Post.emit(
"MetawatchThread",
self.device.getName(),
str(binascii.hexlify(msg), 'utf-8'))
def setButtonEnabled(self, button, value = True):
#documentation says first butten, then modus
msg = self.pack(b'\x46\x00\x00' + button + b'\x00\x34' + button)
self.write(msg)
self.postman.Post.emit(
"MetawatchThread",
self.device.getName(),
str(binascii.hexlify(msg), 'utf-8'))
def setButtonsEnabled(self, value = True):
self.timer.start(100)
self.loop.exec()
self.setButtonEnabled(b'\x00')
self.setButtonEnabled(b'\x01')
self.setButtonEnabled(b'\x02')
self.setButtonEnabled(b'\x03')
self.setButtonEnabled(b'\x04')
self.setButtonEnabled(b'\x05')
self.setButtonEnabled(b'\x06')
def pack(self, message):
text=b'\x01'+bytes([len(message)+4])+message;
crc=self.CRC.checksum(text);
text=text+(crc&0xFF).to_bytes(1, byteorder='little')+(crc>>8).to_bytes(1, byteorder='little') #Little Endian
return text
def write(self, text):
self.dev.write(text)
self.timer.start(10)
self.loop.exec()
def writebuffer(self, mode, row1, data1, row2=None, data2=None):
"""Writes image data to the Draw Buffer.
You'll need to send activatedisplay() to swap things over when done."""
option=mode; #idle screen, single row.
if row2:
option = option | 0x10;
packet=b'\x40' + bytes([option]) + bytes([row1]) + data1[0:12] + b'\x00'
if row2:
packet= packet + bytes([row2]) + data2[0:11]
self.write(self.pack(packet))
def writeImage(self, mode=0, image="dalek.bmp", live=False, from_y=0, to_y=96, from_x=0, to_x=96):
"""Write a 1bpp BMP file to the watch in the given mode."""
im=Image.open(image);
pix=im.load();
for y in range(max(from_y, 0), min(to_y, 96)):
rowdat=b'';
for x in range(max(from_x, 0), min(to_x, 96),8):
byte=0;
for pindex in range(0,8):
pixel=pix[x+pindex,y];
if (pixel > 0):
pixel = 1
byte =((byte>>1)|(pixel<<7))
rowdat = rowdat + bytes([byte])
self.writebuffer(mode, y, rowdat)
self.write(self.pack(b'\x43\x00')) #swap DisplayBuffer
def writeText(self,mode=0,text='', pos_x=0, pos_y=0):
image = Image.new("L",(96,96), 'black')
(zeilen, spalten) = self.draw_word_wrap(image,text,pos_x,pos_y)
image.save('tmp.bmp','BMP')
image.close()
self.write(self.pack(b'\x42\x00\x01')) #remove Clock (Fullscreen)
self.writeImage(mode,"tmp.bmp",live=False, from_y=pos_y, to_y=pos_y + (zeilen * 11))
def draw_word_wrap(self, img, text, xpos=0, ypos=0, max_width=95):
font=ImageFont.load_default()
draw = ImageDraw.Draw(img)
(text_size_x, text_size_y) = draw.textsize(text, font=font)
remaining = max_width
space_width, space_height = draw.textsize(' ', font=font)
output_text = []
for word in text.split(None):
word_width, word_height = draw.textsize(word, font=font)
if word_width + space_width > remaining:
output_text.append(word)
remaining = max_width - word_width
else:
if not output_text:
output_text.append(word)
else:
output = output_text.pop()
output += ' %s' % word
output_text.append(output)
remaining = remaining - (word_width + space_width)
for text in output_text:
draw.text((xpos, ypos), text, font=font, fill='white')
ypos += text_size_y
return (len(output_text), reduce(max, [len(s) for s in output_text]))
def readAll(self):
return self.dev.readAll()
class CRC_CCITT:
def __init__(self, inverted=True):
self.inverted=inverted
self.tab=256*[[]]
for i in range(256):
crc=0
c = i << 8
for j in range(8):
if (crc ^ c) & 0x8000:
crc = ( crc << 1) ^ 0x1021
else:
crc = crc << 1
c = c << 1
crc = crc & 0xffff
self.tab[i]=crc
def update_crc(self, crc, c):
c=0x00ff & (c % 256)
if self.inverted: c=self.flip(c)
tmp = ((crc >> 8) ^ c) & 0xffff
crc = (((crc << 8) ^ self.tab[tmp])) & 0xffff
return crc
def checksum(self,str):
crcval=0xFFFF
for c in str:
crcval=self.update_crc(crcval, c)
return crcval
def flip(self,c):
l=[0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]
return ((l[c&0x0F]) << 4) + l[(c & 0xF0) >> 4]
|
GFEeV/SUNHand
|
Devices/MetaWatch/metawatchThread.py
|
Python
|
gpl-3.0
| 5,970
|
# postgresql/pypostgresql.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pypostgresql
:name: py-postgresql
:dbapi: pypostgresql
:connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]
:url: http://python.projects.pgfoundry.org/
"""
from ... import util
from ... import types as sqltypes
from .base import PGDialect, PGExecutionContext
from ... import processors
class PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
if self.asdecimal:
return None
else:
return processors.to_float
class PGExecutionContext_pypostgresql(PGExecutionContext):
pass
class PGDialect_pypostgresql(PGDialect):
driver = 'pypostgresql'
supports_unicode_statements = True
supports_unicode_binds = True
description_encoding = None
default_paramstyle = 'pyformat'
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropriately
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pypostgresql
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: PGNumeric,
# prevents PGNumeric from being used
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
from postgresql.driver import dbapi20
return dbapi20
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
else:
opts['port'] = 5432
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
dialect = PGDialect_pypostgresql
|
jessekl/flixr
|
venv/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py
|
Python
|
mit
| 2,169
|
#-*- coding: utf-8 -*-
from django.db.models import Manager
from django.shortcuts import get_object_or_404
from django.db.models import Q
class TopicManager(Manager):
def _for_all(self):
return self.filter(Q(category__parent=None) | Q(category__parent__is_removed=False),
category__is_removed=False,
is_removed=False)
def for_public(self):
return self._for_all()\
.filter(category__is_private=False)
def for_public_open(self):
return self.for_public()\
.filter(is_closed=False)
def for_category(self, category):
if category.is_subcategory:
return self.filter(category=category,
is_removed=False)
else:
return self.filter(Q(category=category) | Q(category__parent=category),
category__is_removed=False,
is_removed=False)
def get_public_or_404(self, pk, user):
if user.is_authenticated() and user.is_moderator:
return get_object_or_404(self
.select_related('category__parent'),
pk=pk,
category__is_private=False)
else:
return get_object_or_404(self.for_public()
.select_related('category__parent'),
pk=pk)
def for_update_or_404(self, pk, user):
if user.is_moderator:
return get_object_or_404(self,
pk=pk,
category__is_private=False)
else:
return get_object_or_404(self.for_public_open(),
pk=pk,
user=user)
def for_access(self, user):
return self._for_all()\
.filter(Q(category__is_private=False) | Q(topics_private__user=user))
def for_access_open(self, user):
return self.for_access(user)\
.filter(is_closed=False)
|
Si-elegans/Web-based_GUI_Tools
|
spirit/managers/topic.py
|
Python
|
apache-2.0
| 2,144
|
import pytest
from saleor.graphql.core.utils.reordering import perform_reordering
from saleor.product import models
SortedModel = models.AttributeValue
def _sorted_by_order(items):
return sorted(items, key=lambda o: o[1])
def _get_sorted_map():
return list(
SortedModel.objects.values_list("pk", "sort_order").order_by("sort_order")
)
@pytest.fixture
def dummy_attribute():
return models.Attribute.objects.create(name="Dummy")
@pytest.fixture
def sorted_entries_seq(dummy_attribute):
attribute = dummy_attribute
values = SortedModel.objects.bulk_create(
[
SortedModel(
attribute=attribute, slug=f"value-{i}", name=f"Value-{i}", sort_order=i
)
for i in range(6)
]
)
return list(values)
@pytest.fixture
def sorted_entries_gaps(dummy_attribute):
attribute = dummy_attribute
values = SortedModel.objects.bulk_create(
[
SortedModel(
attribute=attribute, slug=f"value-{i}", name=f"Value-{i}", sort_order=i
)
for i in range(0, 12, 2)
]
)
return list(values)
def test_reordering_sequential(sorted_entries_seq):
"""
Ensures the reordering logic works as expected. This test simply provides
sequential sort order values and try to reorder them.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
operations = {nodes[5].pk: -1, nodes[2].pk: +3}
expected = _sorted_by_order(
[
(nodes[0].pk, 0),
(nodes[1].pk, 1),
(nodes[2].pk, 2 + 3),
(nodes[3].pk, 3 - 1),
(nodes[4].pk, 4 + 1 - 1),
(nodes[5].pk, 5 - 1 - 1),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_non_sequential(sorted_entries_gaps):
"""
Ensures that reordering non-sequential sort order values is properly
handled. This case happens when an item gets deleted, creating gaps between values.
"""
qs = SortedModel.objects
nodes = sorted_entries_gaps
operations = {nodes[5].pk: -1, nodes[2].pk: +3}
expected = _sorted_by_order(
[
(nodes[0].pk, 0),
(nodes[1].pk, 2),
(nodes[2].pk, 4 + (3 * 2) - 1),
(nodes[3].pk, 6 - 1),
(nodes[4].pk, 8 + 1 - 1),
(nodes[5].pk, 10 - (1 * 2) - 1),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
@pytest.mark.parametrize(
"operation, expected_operations",
[((0, +5), (+5, -1, -1, -1, -1, -1)), ((5, -5), (+1, +1, +1, +1, +1, -5))],
)
def test_inserting_at_the_edges(sorted_entries_seq, operation, expected_operations):
"""
Ensures it is possible to move an item at the top and bottom of the list.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
target_node_pos, new_rel_sort_order = operation
operations = {nodes[target_node_pos].pk: new_rel_sort_order}
expected = _sorted_by_order(
[
(node.pk, node.sort_order + op)
for node, op in zip(nodes, expected_operations)
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_out_of_bound(sorted_entries_seq):
"""
Ensures it is not possible to manually create gaps or for the users
to insert anywhere they want, e.g. -1000, which could create a mess
into the database.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
operations = {nodes[5].pk: -100, nodes[0].pk: +100}
expected = _sorted_by_order(
[
(nodes[0].pk, 0 + 5),
(nodes[1].pk, 1),
(nodes[2].pk, 2),
(nodes[3].pk, 3),
(nodes[4].pk, 4),
(nodes[5].pk, 5 - 5),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_null_sort_orders(dummy_attribute):
"""
Ensures null sort orders values are getting properly ordered (by ID sorting).
"""
attribute = dummy_attribute
qs = SortedModel.objects
non_null_sorted_entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=1
),
SortedModel(
pk=2, attribute=attribute, slug="2", name="2", sort_order=0
),
]
)
)
null_sorted_entries = list(
qs.bulk_create(
[
SortedModel(
pk=5, attribute=attribute, slug="5", name="5", sort_order=None
),
SortedModel(
pk=4, attribute=attribute, slug="4", name="4", sort_order=None
),
SortedModel(
pk=3, attribute=attribute, slug="3", name="3", sort_order=None
),
]
)
)
operations = {null_sorted_entries[0].pk: -2}
expected = [
(non_null_sorted_entries[1].pk, 0),
(non_null_sorted_entries[0].pk, 1),
(null_sorted_entries[0].pk, 2),
(null_sorted_entries[2].pk, 3),
(null_sorted_entries[1].pk, 4),
]
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_nothing(sorted_entries_seq, assert_num_queries):
"""
Ensures giving operations that does nothing, are skipped. Thus only one query should
have been made: fetching the nodes.
"""
qs = SortedModel.objects
pk = sorted_entries_seq[0].pk
operations = {pk: 0}
with assert_num_queries(1) as ctx:
perform_reordering(qs, operations)
assert ctx[0]["sql"].startswith("SELECT "), "Should only have done a SELECT"
def test_giving_no_operation_does_no_query(sorted_entries_seq, assert_num_queries):
"""Ensures giving no operations runs no queries at all."""
qs = SortedModel.objects
with assert_num_queries(0):
perform_reordering(qs, {})
def test_reordering_concurrently(dummy_attribute, assert_num_queries):
"""
Ensures users cannot concurrently reorder, they need to wait for the other one
to achieve.
This must be the first thing done before doing anything. For that, we ensure
the first SQL query is acquiring the lock.
"""
qs = SortedModel.objects
attribute = dummy_attribute
entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=0
),
SortedModel(
pk=2, attribute=attribute, slug="2", name="2", sort_order=1
),
]
)
)
operations = {entries[0].pk: +1}
with assert_num_queries(2) as ctx:
perform_reordering(qs, operations)
assert ctx[0]["sql"] == (
'SELECT "product_attributevalue"."id", "product_attributevalue"."sort_order" '
'FROM "product_attributevalue" '
"ORDER BY "
'"product_attributevalue"."sort_order" ASC NULLS LAST, '
'"product_attributevalue"."id" ASC FOR UPDATE'
)
assert ctx[1]["sql"] == (
'UPDATE "product_attributevalue" '
'SET "sort_order" = (CASE WHEN ("product_attributevalue"."id" = 1) '
'THEN 1 WHEN ("product_attributevalue"."id" = 2) '
"THEN 0 ELSE NULL END)::integer "
'WHERE "product_attributevalue"."id" IN (1, 2)'
)
def test_reordering_deleted_node_from_concurrent(dummy_attribute, assert_num_queries):
"""
Ensures if a node was deleted before locking, it just skip it instead of
raising an error.
"""
qs = SortedModel.objects
attribute = dummy_attribute
entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=0
),
SortedModel(
pk=2, attribute=attribute, slug="2", name="2", sort_order=1
),
]
)
)
operations = {-1: +1, entries[0].pk: +1}
with assert_num_queries(2) as ctx:
perform_reordering(qs, operations)
assert ctx[1]["sql"] == (
'UPDATE "product_attributevalue" '
'SET "sort_order" = (CASE WHEN ("product_attributevalue"."id" = 1) '
'THEN 1 WHEN ("product_attributevalue"."id" = 2) '
"THEN 0 ELSE NULL END)::integer "
'WHERE "product_attributevalue"."id" IN (1, 2)'
)
|
maferelo/saleor
|
tests/api/test_core_reordering.py
|
Python
|
bsd-3-clause
| 8,739
|
# Under MIT License, see LICENSE.txt
from Model.DataObject.BaseDataObject import catch_format_error
from Model.DataObject.DrawingData.BaseDataDraw import BaseDataDraw
__author__ = 'RoboCupULaval'
class DrawRectDataIn(BaseDataDraw):
def __init__(self, data_in):
super().__init__(data_in)
self._format_data()
@catch_format_error
def _check_obligatory_data(self):
""" Vérifie les données obligatoires """
assert isinstance(self.data, dict),\
"data: {} n'est pas un dictionnaire.".format(type(self.data))
keys = self.data.keys()
assert 'top_left' in keys, \
"data['top_left'] n'existe pas."
assert self._point_is_valid(self.data['top_left']), \
"data['top_left']: {} n'est pas un point valide.".format(self.data['top_left'])
assert 'bottom_right' in keys, \
"data['bottom_right'] n'existe pas."
assert self._point_is_valid(self.data['bottom_right']), \
"data['bottom_right']: {} n'est pas un point valide.".format(self.data['bottom_right'])
@catch_format_error
def _check_optional_data(self):
""" Vérifie les données optionnelles """
keys = self.data.keys()
if 'color' in keys:
assert self._colorRGB_is_valid(self.data['color']), \
"data['color']: {} n'est pas une couleur valide.".format(self.data['color'])
else:
self.data['color'] = (0, 0, 0)
if 'is_fill' in keys:
assert isinstance(self.data['is_fill'], bool), \
"data['is_fill']: {} n'est pas du bon type (bool)".format(type(self.data['is_fill']))
else:
self.data['is_fill'] = False
if 'width' in keys:
assert isinstance(self.data['width'], int), \
"data['width']: {} n'est pas du bon type (int)".format(type(self.data['width']))
else:
self.data['width'] = 2
if 'style' in keys:
assert self.data['style'] in self.line_style_allowed, \
"data['style']: {} n'est pas une style valide".format(self.data['style'])
else:
self.data['style'] = 'SolidLine'
if 'timeout' in keys:
assert self.data['timeout'] >= 0, \
"data['timeout']: {} n'est pas valide.".format(self.data['timeout'])
else:
self.data['timeout'] = 0
@staticmethod
def get_default_data_dict():
return dict(zip(["top_left", 'bottom_right'],
[(-250, -250), (0, -500)]))
@staticmethod
def get_type():
return 3006
|
RoboCupULaval/UI-Debug
|
Model/DataObject/DrawingData/DrawRectDataIn.py
|
Python
|
mit
| 2,638
|
"""Reusable cryptopals module."""
from base64 import b64encode
from binascii import hexlify, unhexlify
def hex_to_base64(s):
"""Converts a hex string to base64."""
return b64encode(unhexlify(s))
def fixed_xor(s1, s2):
"""XORs two hex strings."""
return hexlify(''.join(chr(ord(c1) ^ ord(c2)) for c1, c2 in zip(unhexlify(s1[-len(s2):]), unhexlify(s2))))
def single_char_xor(s, c):
"""XORs a hex string with a single character."""
return ''.join(chr(ord(c1) ^ ord(c)) for c1 in unhexlify(s))
|
dougludlow/cryptopals
|
sets/1/challenges/cryptopals.py
|
Python
|
mit
| 520
|
import extractCsvData as CD
import config
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
@Singleton
class SamplingDatabase:
"""
This class is used to read each csv file used for the model, convert it into a array using the relevent method, and then stores each array in
a dictionary [key:array]. This means that each csv file is only read once, from then on the model just uses the data in the array as this
never changes.
The two methods called to create the arrays are:
def createInputArray: creates a 2D array in the form [[x1,y1],[x2,y2]...] --> this is used for most csv files
def creatInputArrayWithDummy: is used when each x axis label represents a range e.g. 1-2, but an x axis array of numbers (float or int)
is needed. Method creates a seperate array for y values, x values and x dummy values.
"""
__instance = None
__database = {}
__database["RESIDENTIAL"]={}
__database["COMMERCIAL"]={}
def __init__(self):
self.__database["RESIDENTIAL"]["DIURNAL_PATTERN"]= CD.createInputArray(config.RESIDENTIAL_DIURNAL_PATTERN_DATABASE)
self.__database["RESIDENTIAL"]["RESIDENTIAL_UNIT_SIZE_DATABASE"] = CD.createInputArray(config.RESIDENTIAL_UNIT_SIZE_DATABASE)
self.__database["RESIDENTIAL"]["TOILET_FLUSH_FREQ_DATABASE"] = CD.createInputArray(config.RESIDENTIAL_TOILET_FLUSH_FREQ_DATABASE)
self.__database["RESIDENTIAL"]["SHOWER_FREQUENCY_DATABASE"] = CD.createInputArray(config.RESIDENTIAL_SHOWER_FREQUENCY_DATABASE)
self.__database["RESIDENTIAL"]["SHOWER_DURATION_DATABASE"] = CD.createInputArrayWithDummy(config.RESIDENTIAL_SHOWER_DURATION_DATABASE)
self.__database["RESIDENTIAL"]["BATH_FREQUENCY_DATABASE"] = CD.createInputArray(config.RESIDENTIAL_BATH_FREQUENCY_DATABASE)
self.__database["RESIDENTIAL"]["WASH_MACHINE_FREQ_DATABASE"] = CD.createInputArray(config.RESIDENTIAL_WASH_MACHINE_FREQ_DATABASE)
self.__database["COMMERCIAL"]["DIURNAL_PATTERN"] = CD.createInputArray(config.COMMERCIAL_DIURNAL_PATTERN_DATABASE)
self.__database["COMMERCIAL"]["BUILDING_SIZE"] = CD.createInputArray(config.COMMERCIAL_BUILDING_NLA)
self.__database["COMMERCIAL"]["M^2_PER_OCCUPIED_WORKPOINT"] = CD.extractCsvData(config.COMMERCIAL_BUILDING_OWP)
self.__database["COMMERCIAL"]["TOILET_FLUSH_FREQ_DATABASE"] = CD.createInputArray(config.COMMERCIAL_TOILET_FLUSH_FREQ_DATABASE)
self.__database["COMMERCIAL"]["SHOWER_FREQUENCY_DATABASE"] = CD.createInputArray(config.COMMERCIAL_SHOWER_FREQUENCY_DATABASE)
self.__database["COMMERCIAL"]["SHOWER_DURATION_DATABASE"] = CD.createInputArrayWithDummy(config.COMMERCIAL_SHOWER_DURATION_DATABASE)
self.__database["COMMERCIAL"]["BATH_FREQUENCY_DATABASE"] = CD.createInputArray(config.COMMERCIAL_BATH_FREQUENCY_DATABASE)
self.__database["TAP_USE_DURATION_DATABASE"] = CD.createInputArray(config.TAP_USE_DURATION_DATABASE)
self.__database["TAP_FLOWRATE_DATABASE"] = CD.createInputArray(config.TAP_FLOWRATE_DATABASE)
self.__database["FRACTION_TOP_LOADERS_IN_AUST"] = config.FRACTION_TOP_LOADERS_IN_AUST
self.__database["DISTRIBUTIONS"] = config.DISTRIBUTIONS
self.__database["TOILET_FLUSH_VOLUME_DATABASE"] = CD.createInputArray(config.TOILET_FLUSH_VOLUME_DATABASE)
self.__database["SHOWER_FLOWRATE_DATABASE"] = CD.createInputArray(config.SHOWER_FLOWRATE_DATABASE)
self.__database["TOP_LOADER_WM_VOLUME_DATABASE"] = CD.createInputArray(config.TOP_LOADER_WM_VOLUME_DATABASE)
self.__database["FRONT_LOADER_WM_VOLUME_DATABASE"] = CD.createInputArray(config.FRONT_LOADER_WM_VOLUME_DATABASE)
self.__database["DISH_WASHER_VOLUME_DATABASE"] = CD.createInputArray(config.DISH_WASHER_VOLUME_DATABASE)
def getDB(self, data):
"""
Return simulation based on the current session id
location = where the demand is being generated e.g. house, commercial building
data = data required e.g. diurnal pattern, shower frequency etc for that particular location
"""
if not self.__database.has_key(data):
print "error - this database does not exist"
return []
return self.__database[data]
def getDBTest(self, location,data):
"""
Return simulation based on the current session id
location = where the demand is being generated e.g. house, commercial building
data = data required e.g. diurnal pattern, shower frequency etc for that particular location
"""
if not self.__database.has_key(location):
print "error - this database does not exist"
return []
return self.__database[location][data]
|
iut-ibk/DynaMind-ToolBox
|
DynaMind-Performance-Assessment/3rdparty/CD3Waterbalance/WaterDemandModel/sampling_db.py
|
Python
|
gpl-2.0
| 5,979
|
#!/usr/env python
# -*- coding: utf-8 -*-
# =============================================================================
import ROOT
from ostap.core.core import cpp, hID, VE
import ostap.histos.histos
import ostap.histos.graphs
import ostap.histos.param
import ostap.histos.compare
# =============================================================================
# The END
# =============================================================================
|
OstapHEP/ostap
|
ostap/histos/__init__.py
|
Python
|
bsd-3-clause
| 472
|
''' Provide a base class for all Bokeh Server Protocol message types.
Boker messages are comprised of a sequence of JSON fragments. Specified as
Python JSON-like data, messages have the general form:
.. code-block:: python
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
The ``header`` fragment will have the form:
.. code-block:: python
header = {
# these are required
'msgid' : <str> # a unique id for the message
'msgtype' : <str> # a message type, e.g. 'ACK', 'PATCH-DOC', etc
# these are optional
'num_buffers' : <int> # the number of additional buffers, if any
}
The ``metadata`` fragment may contain any arbitrary information. It is not
processed by Bokeh for any purpose, but may be useful for external
monitoring or instrumentation tools.
The ``content`` fragment is defined by the specific message type.
'''
from __future__ import absolute_import, print_function
from tornado.escape import json_decode, json_encode
from tornado import gen
import bokeh.util.serialization as bkserial
from .exceptions import MessageError, ProtocolError
class Message(object):
''' The Message base class encapsulates creating, assembling, and
validating the integrity of Bokeh Server messages. Additionally, it
provide hooks
'''
def __init__(self, header, metadata, content):
''' Initialize a new message from header, metadata, and content
dictionaries.
To assemble a message from existing JSON fragments, use the
``assemble`` method.
To create new messages with automatically generated headers,
use subclass ``create`` methods.
Args:
header (JSON-like) :
metadata (JSON-like) :
content (JSON-like) :
'''
self.header = header
self.metadata = metadata
self.content = content
self._buffers = []
def __repr__(self):
return "Message %r (revision %d) content: %r" % (self.msgtype, self.revision, self.content)
@classmethod
def assemble(cls, header_json, metadata_json, content_json):
''' Creates a new message, assembled from JSON fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
Message subclass
Raises:
MessageError
'''
try:
header = json_decode(header_json)
except ValueError:
raise MessageError("header could not be decoded")
try:
metadata = json_decode(metadata_json)
except ValueError:
raise MessageError("metadata could not be decoded")
try:
content = json_decode(content_json)
except ValueError:
raise MessageError("content could not be decoded")
msg = cls(header, metadata, content)
msg._header_json = header_json
msg._metadata_json = metadata_json
msg._content_json = content_json
return msg
def add_buffer(self, buf_header, buf_payload):
''' Associate a buffer header and payload with this message.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
MessageError
'''
if 'num_buffers' in self._header:
self._header['num_buffers'] += 1
else:
self._header['num_buffers'] = 1
self._header_json = None
self._buffers.append((buf_header, buf_payload))
def assemble_buffer(self, buf_header, buf_payload):
''' Add a buffer header and payload that we read from the socket.
This differs from add_buffer() because we're validating vs.
the header's num_buffers, instead of filling in the header.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
ProtocolError
'''
if self.header.get('num_buffers', 0) <= len(self._buffers):
raise ProtocolError("too many buffers received expecting " + str(self.header['num_buffers']))
self._buffers.append((buf_header, buf_payload))
@gen.coroutine
def write_buffers(self, conn, locked=True):
''' Write any buffer headers and payloads to the given connection.
Args:
conn (object) :
May be any object with a ``write_message`` method. Typically,
a Tornado ``WSHandler`` or ``WebSocketClientConnection``
locked (bool) :
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot write_buffers to connection None")
sent = 0
for header, payload in self._buffers:
yield conn.write_message(header, locked=locked)
yield conn.write_message(payload, binary=True, locked=locked)
sent += (len(header) + len(payload))
raise gen.Return(sent)
@classmethod
def create_header(cls, request_id=None):
''' Return a message header fragment dict.
Args:
request_id (str or None) :
Message ID of the message this message replies to
Returns:
dict : a message header
'''
header = {
'msgid' : bkserial.make_id(),
'msgtype' : cls.msgtype
}
if request_id is not None:
header['reqid'] = request_id
return header
@gen.coroutine
def send(self, conn):
''' Send the message on the given connection.
Args:
conn (WebSocketHandler) : a WebSocketHandler to send messages
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot send to connection None")
with (yield conn.write_lock.acquire()):
sent = 0
yield conn.write_message(self.header_json, locked=False)
sent += len(self.header_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#yield gen.sleep(0.1)
yield conn.write_message(self.metadata_json, locked=False)
sent += len(self.metadata_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#yield gen.sleep(0.1)
yield conn.write_message(self.content_json, locked=False)
sent += len(self.content_json)
sent += yield self.write_buffers(conn, locked=False)
raise gen.Return(sent)
@property
def complete(self):
''' Returns whether all required parts of a message are present.
Returns:
bool : True if the message is complete, False otherwise
'''
return self.header is not None and \
self.metadata is not None and \
self.content is not None and \
self.header.get('num_buffers', 0) == len(self._buffers)
# header fragment properties
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
self._header_json = None
@property
def header_json(self):
if not self._header_json:
self._header_json = json_encode(self.header)
return self._header_json
# content fragment properties
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
self._content_json = None
@property
def content_json(self):
if not self._content_json:
self._content_json = json_encode(self.content)
return self._content_json
# metadata fragment properties
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, value):
self._metadata = value
self._metadata_json = None
@property
def metadata_json(self):
if not self._metadata_json:
self._metadata_json = json_encode(self.metadata)
return self._metadata_json
# buffer properties
@property
def buffers(self):
return self._buffers
|
mindriot101/bokeh
|
bokeh/protocol/message.py
|
Python
|
bsd-3-clause
| 8,808
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import osv, fields
from tools.translate import _
class hr_si_so_ask(osv.osv_memory):
_name = 'hr.sign.in.out.ask'
_description = 'Ask for Sign In Out'
_columns = {
'name': fields.char('Employees name', size=32, required=True, readonly=True),
'last_time': fields.datetime('Your last sign out', required=True),
'emp_id': fields.many2one('hr.employee', 'Empoyee ID', readonly=True),
}
def _get_empname(self, cr, uid, context=None):
emp_id = context.get('emp_id', self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context))
if emp_id:
employee = self.pool.get('hr.employee').browse(cr, uid, emp_id, context=context)[0].name
return employee
return ''
def _get_empid(self, cr, uid, context=None):
emp_id = context.get('emp_id', self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context))
if emp_id:
return emp_id[0]
return False
_defaults = {
'name': _get_empname,
'emp_id': _get_empid,
}
def sign_in(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, [], context=context)[0]
data['emp_id'] = data['emp_id'] and data['emp_id'][0]
return self.pool.get('hr.sign.in.out').sign_in(cr, uid, data, context)
def sign_out(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, [], context=context)[0]
data['emp_id'] = data['emp_id'] and data['emp_id'][0]
return self.pool.get('hr.sign.in.out').sign_out(cr, uid, data, context)
hr_si_so_ask()
class hr_sign_in_out(osv.osv_memory):
_name = 'hr.sign.in.out'
_description = 'Sign In Sign Out'
_columns = {
'name': fields.char('Employees name', size=32, required=True, readonly=True),
'state': fields.char('Current state', size=32, required=True, readonly=True),
'emp_id': fields.many2one('hr.employee', 'Empoyee ID', readonly=True),
}
def _get_empid(self, cr, uid, context=None):
emp_id = context.get('emp_id', self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context))
if emp_id:
employee = self.pool.get('hr.employee').browse(cr, uid, emp_id, context=context)[0]
return {'name': employee.name, 'state': employee.state, 'emp_id': emp_id[0]}
return {}
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_sign_in_out, self).default_get(cr, uid, fields_list, context=context)
res_emp = self._get_empid(cr, uid, context=context)
res.update(res_emp)
return res
def si_check(self, cr, uid, ids, context=None):
obj_model = self.pool.get('ir.model.data')
att_obj = self.pool.get('hr.attendance')
data = self.read(cr, uid, ids, [], context=context)[0]
data['emp_id'] = data['emp_id'] and data['emp_id'][0]
emp_id = data['emp_id']
att_id = att_obj.search(cr, uid, [('employee_id', '=', emp_id)], limit=1, order='name desc')
last_att = att_obj.browse(cr, uid, att_id, context=context)
if last_att:
last_att = last_att[0]
cond = not last_att or last_att.action == 'sign_out'
if cond:
return self.sign_in(cr, uid, data, context)
else:
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','view_hr_attendance_so_ask')], context=context)
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Sign in / Sign out'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.sign.in.out.ask',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'context': context,
'target': 'new',
}
def so_check(self, cr, uid, ids, context=None):
obj_model = self.pool.get('ir.model.data')
att_obj = self.pool.get('hr.attendance')
data = self.read(cr, uid, ids, [], context=context)[0]
data['emp_id'] = data['emp_id'] and data['emp_id'][0]
emp_id = data['emp_id']
att_id = att_obj.search(cr, uid, [('employee_id', '=', emp_id),('action', '!=', 'action')], limit=1, order='name desc')
last_att = att_obj.browse(cr, uid, att_id, context=context)
if last_att:
last_att = last_att[0]
if not att_id and not last_att:
model_data_ids = obj_model.search(cr, uid, [('model','=','ir.ui.view'),('name','=','view_hr_attendance_message')], context=context)
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Sign in / Sign out'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.sign.in.out',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'context': context,
'target': 'new',
}
cond = last_att and last_att['action'] == 'sign_in'
if cond:
return self.sign_out(cr, uid, data, context)
else:
model_data_ids = obj_model.search(cr, uid, [('model','=','ir.ui.view'),('name','=','view_hr_attendance_si_ask')], context=context)
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Sign in / Sign out'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.sign.in.out.ask',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def sign_in(self, cr, uid, data, context=None):
if context is None:
context = {}
emp_id = data['emp_id']
if 'last_time' in data:
if data['last_time'] > time.strftime('%Y-%m-%d %H:%M:%S'):
raise osv.except_osv(_('UserError'), _('The sign-out date must be in the past'))
self.pool.get('hr.attendance').create(cr, uid, {'name': data['last_time'], 'action': 'sign_out',
'employee_id': emp_id}, context=context)
try:
self.pool.get('hr.employee').attendance_action_change(cr, uid, [emp_id], 'sign_in')
except:
raise osv.except_osv(_('UserError'), _('A sign-in must be right after a sign-out !'))
return {'type': 'ir.actions.act_window_close'} # To do: Return Success message
def sign_out(self, cr, uid, data, context=None):
emp_id = data['emp_id']
if 'last_time' in data:
if data['last_time'] > time.strftime('%Y-%m-%d %H:%M:%S'):
raise osv.except_osv(_('UserError'), _('The Sign-in date must be in the past'))
self.pool.get('hr.attendance').create(cr, uid, {'name':data['last_time'], 'action':'sign_in', 'employee_id':emp_id}, context=context)
try:
self.pool.get('hr.employee').attendance_action_change(cr, uid, [emp_id], 'sign_out')
except:
raise osv.except_osv(_('UserError'), _('A sign-out must be right after a sign-in !'))
return {'type': 'ir.actions.act_window_close'} # To do: Return Success message
hr_sign_in_out()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ksrajkumar/openerp-6.1
|
openerp/addons/hr_attendance/wizard/hr_attendance_sign_in_out.py
|
Python
|
agpl-3.0
| 8,656
|
"""
Course API Serializers. Representing course catalog data
"""
import urllib
from django.urls import reverse
from edx_django_utils import monitoring as monitoring_utils
from rest_framework import serializers
from openedx.core.djangoapps.content.course_overviews.models import \
CourseOverview # lint-amnesty, pylint: disable=unused-import
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.lib.api.fields import AbsoluteURLField
class _MediaSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Nested serializer to represent a media object.
"""
def __init__(self, uri_attribute, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uri_attribute = uri_attribute
uri = serializers.SerializerMethodField(source='*')
def get_uri(self, course_overview):
"""
Get the representation for the media resource's URI
"""
return getattr(course_overview, self.uri_attribute)
class _AbsolutMediaSerializer(_MediaSerializer): # pylint: disable=abstract-method
"""
Nested serializer to represent a media object and its absolute path.
"""
requires_context = True
def __call__(self, serializer_field):
self.context = serializer_field.context
return super(self).__call__(serializer_field) # lint-amnesty, pylint: disable=bad-super-call
uri_absolute = serializers.SerializerMethodField(source="*")
def get_uri_absolute(self, course_overview):
"""
Convert the media resource's URI to an absolute URI.
"""
uri = getattr(course_overview, self.uri_attribute)
if not uri:
# Return empty string here, to keep the same
# response type in case uri is empty as well.
return ""
cdn_applied_uri = course_overview.apply_cdn_to_url(uri)
field = AbsoluteURLField()
# In order to use the AbsoluteURLField to have the same
# behaviour what ImageSerializer provides, we need to set
# the request for the field
field._context = {"request": self.context.get("request")} # lint-amnesty, pylint: disable=protected-access
return field.to_representation(cdn_applied_uri)
class ImageSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Collection of URLs pointing to images of various sizes.
The URLs will be absolute URLs with the host set to the host of the current request. If the values to be
serialized are already absolute URLs, they will be unchanged.
"""
raw = AbsoluteURLField()
small = AbsoluteURLField()
large = AbsoluteURLField()
class _CourseApiMediaCollectionSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Nested serializer to represent a collection of media objects
"""
banner_image = _AbsolutMediaSerializer(source='*', uri_attribute='banner_image_url')
course_image = _MediaSerializer(source='*', uri_attribute='course_image_url')
course_video = _MediaSerializer(source='*', uri_attribute='course_video_url')
image = ImageSerializer(source='image_urls')
class CourseSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Serializer for Course objects providing minimal data about the course.
Compare this with CourseDetailSerializer.
"""
blocks_url = serializers.SerializerMethodField()
effort = serializers.CharField()
end = serializers.DateTimeField()
enrollment_start = serializers.DateTimeField()
enrollment_end = serializers.DateTimeField()
id = serializers.CharField() # pylint: disable=invalid-name
media = _CourseApiMediaCollectionSerializer(source='*')
name = serializers.CharField(source='display_name_with_default_escaped')
number = serializers.CharField(source='display_number_with_default')
org = serializers.CharField(source='display_org_with_default')
short_description = serializers.CharField()
start = serializers.DateTimeField()
start_display = serializers.CharField()
start_type = serializers.CharField()
pacing = serializers.CharField()
mobile_available = serializers.BooleanField()
hidden = serializers.SerializerMethodField()
invitation_only = serializers.BooleanField()
# 'course_id' is a deprecated field, please use 'id' instead.
course_id = serializers.CharField(source='id', read_only=True)
def get_hidden(self, course_overview):
"""
Get the representation for SerializerMethodField `hidden`
Represents whether course is hidden in LMS
"""
catalog_visibility = course_overview.catalog_visibility
return catalog_visibility in ['about', 'none'] or course_overview.id.deprecated # Old Mongo should be hidden
def get_blocks_url(self, course_overview):
"""
Get the representation for SerializerMethodField `blocks_url`
"""
base_url = '?'.join([
reverse('blocks_in_course'),
urllib.parse.urlencode({'course_id': course_overview.id}),
])
return self.context['request'].build_absolute_uri(base_url)
class CourseDetailSerializer(CourseSerializer): # pylint: disable=abstract-method
"""
Serializer for Course objects providing additional details about the
course.
This serializer makes additional database accesses (to the modulestore) and
returns more data (including 'overview' text). Therefore, for performance
and bandwidth reasons, it is expected that this serializer is used only
when serializing a single course, and not for serializing a list of
courses.
"""
overview = serializers.SerializerMethodField()
def get_overview(self, course_overview):
"""
Get the representation for SerializerMethodField `overview`
"""
# Note: This makes a call to the modulestore, unlike the other
# fields from CourseSerializer, which get their data
# from the CourseOverview object in SQL.
return CourseDetails.fetch_about_attribute(course_overview.id, 'overview')
class CourseKeySerializer(serializers.BaseSerializer): # pylint:disable=abstract-method
"""
Serializer that takes a CourseKey and serializes it to a string course_id.
"""
@monitoring_utils.function_trace('course_key_serializer_to_representation')
def to_representation(self, instance):
# The function trace should be counting calls to this function, but I
# couldn't find it when I looked in any of the NR transaction traces,
# so I'm manually counting them using a custom metric:
monitoring_utils.increment('course_key_serializer_to_representation_call_count')
return str(instance)
|
eduNEXT/edx-platform
|
lms/djangoapps/course_api/serializers.py
|
Python
|
agpl-3.0
| 6,807
|
#! /usr/bin/env python
"""
Run tests.
"""
import sys
import os
import argparse
import inspect
import subprocess
import re
import dendropy
from dendropy import treecalc
from dendropy.interop import paup
class AnsiColorMeta(type):
##############################################################################
## Color infrastructure modified from:
##
## http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py
##
## pygments.console
## ~~~~~~~~~~~~~~~~
## Format colored console output.
## :copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
## :license: BSD, see LICENSE for details.
##
ansiesc = "\x1b["
@staticmethod
def get_ansicodes():
ansicodes = {}
ansicodes[""] = ""
ansicodes["reset"] = AnsiColorMeta.ansiesc + "39;49;00m"
ansicodes["bold"] = AnsiColorMeta.ansiesc + "01m"
ansicodes["faint"] = AnsiColorMeta.ansiesc + "02m"
ansicodes["standout"] = AnsiColorMeta.ansiesc + "03m"
ansicodes["underline"] = AnsiColorMeta.ansiesc + "04m"
ansicodes["blink"] = AnsiColorMeta.ansiesc + "05m"
ansicodes["overline"] = AnsiColorMeta.ansiesc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
ansicodes[d] = AnsiColorMeta.ansiesc + "%im" % x
ansicodes[l] = AnsiColorMeta.ansiesc + "%i;01m" % x
x += 1
ansicodes["darkteal"] = ansicodes["turquoise"]
ansicodes["darkyellow"] = ansicodes["brown"]
ansicodes["fuscia"] = ansicodes["fuchsia"]
# ansicodes["white"] = ansicodes["bold"]
return ansicodes
def reset_color(cls):
return cls.ansicodes["reset"]
def colorize(cls, color_key, text):
return cls.ansicodes[color_key] + text + cls.ansicodes["reset"]
def ansiformat(cls, attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(cls.ansicodes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(cls.ansicodes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(cls.ansicodes['underline'])
attr = attr[1:-1]
result.append(cls.ansicodes[attr])
result.append(text)
result.append(cls.ansicodes['reset'])
return ''.join(result)
def __new__(cls, name, bases, dct):
return type.__new__(cls, name, bases, dct)
def __init__(cls, name, bases, dct):
super(AnsiColorMeta, cls).__init__(name, bases, dct)
# setattr(cls, "ansicodes", AnsiColorMeta.get_ansicodes())
# setattr(cls, "ansiesc", AnsiColorMeta.ansiesc)
cls.ansicodes = AnsiColorMeta.get_ansicodes()
cls.ansiesc = AnsiColorMeta.ansiesc
class AnsiColor(object):
__metaclass__ = AnsiColorMeta
def __init__(self, stream=sys.stdout, colorize=True):
self.stream = stream
self.colorize = colorize
self.color_pattern = re.compile(r"@(\w+)@<<(.*?)>>")
def format_color(self, message):
if not self.colorize or not self.color_pattern.findall(message):
return message
else:
output = []
cur_pos = 0
for match in self.color_pattern.finditer(message):
start, end = match.span()
output.append(message[cur_pos:start])
output.append(AnsiColor.ansiformat(match.group(1), match.group(2)))
cur_pos = end
output.append(message[cur_pos:])
output = "".join(output)
return output
def write(self, message):
self.stream.write(self.format_color(message))
def __call__(self, message):
self.write(message)
class TestRunner(object):
PASS = 0
FAIL = 1
ERROR = 2
dna_to_partials = {
'A' : [1.0, 0.0, 0.0, 0.0],
'C' : [0.0, 1.0, 0.0, 0.0],
'G' : [0.0, 0.0, 1.0, 0.0],
'T' : [0.0, 0.0, 0.0, 1.0],
'U' : [0.0, 0.0, 0.0, 1.0],
'N' : [1.0, 1.0, 1.0, 1.0],
'X' : [1.0, 1.0, 1.0, 1.0],
'-' : [1.0, 1.0, 1.0, 1.0],
'?' : [1.0, 1.0, 1.0, 1.0],
'R' : [1.0, 0.0, 1.0, 0.0],
'Y' : [0.0, 1.0, 0.0, 1.0],
'M' : [1.0, 1.0, 0.0, 0.0],
'W' : [1.0, 0.0, 0.0, 1.0],
'S' : [0.0, 1.0, 1.0, 0.0],
'K' : [0.0, 0.0, 1.0, 1.0],
'V' : [1.0, 1.0, 1.0, 0.0],
'H' : [1.0, 1.0, 0.0, 1.0],
'D' : [1.0, 0.0, 1.0, 1.0],
'B' : [0.0, 1.0, 1.0, 1.0]
}
@staticmethod
def get_node_tag(nd):
if nd.taxon:
return nd.taxon.label
else:
return nd.label
def __init__(self, opts):
self.script_dir = os.path.dirname(os.path.abspath(__file__))
self.data_dir = os.path.join(self.script_dir, "data")
self.verbosity = opts.verbosity
self.break_on_fail = opts.break_on_fail
self.test_command = None
self.test_retcode = None
self.test_stdout = None
self.test_stderr = None
self.test_result = None
self.test_fail_message = None
self.test_pass_message = None
self.cout = AnsiColor(sys.stdout, colorize=True)
def execute_test(self, test_program, args=None):
self.test_command = None
self.test_stdout = None
self.test_stderr = None
self.test_retcode = None
self.test_fail_message = None
cmd = [os.path.abspath(os.path.join(self.script_dir, test_program))]
if args:
cmd.extend(args)
self.test_command = " ".join(cmd)
p = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.script_dir)
self.test_stdout, self.test_stderr = p.communicate()
self.test_retcode = p.returncode
return self.test_retcode
def fail(self, message):
self.test_fail_message = message
return TestRunner.FAIL
def is_almost_equal(self, v1, v2, prec=1e-4):
if v1 is None and v2 is None:
return True
if v1 is None:
return self.is_almost_equal(v2, 0.0)
if v2 is None:
return self.is_almost_equal(v1, 0.0)
if abs(v1-v2) > prec:
return False
return True
def compare_trees(self, tree1, tree2):
status = TestRunner.PASS
tree1.update_splits()
tree2.update_splits()
splits = set(tree1.split_edges.keys() + tree2.split_edges.keys())
for split in splits:
if split not in tree2.split_edges:
return self.fail("Split {} not found on tree 2: {}".format(split, tree1.taxon_set.split_as_newick_string(split)))
if split not in tree1.split_edges:
return self.fail("Split {} not found on tree 1: {}".format(split, tree2.taxon_set.split_as_newick_string(split)))
edge1_len = tree1.split_edges[split].length
edge2_len = tree2.split_edges[split].length
if not self.is_almost_equal(edge1_len, edge2_len):
return self.fail("Unequal edge length for split {} {}: {} vs. {}".format(
split,
tree1.taxon_set.split_as_newick_string(split),
edge1_len,
edge2_len))
if len(tree1.split_edges) != len(tree2.split_edges):
return self.fail("Different number of splits on trees: {} vs. {}".format(len(tree1.split_edges), len(tree2.split_edges)))
return status
def compare_tree_traversal(self, tree1, tree2, traverse_func):
status = TestRunner.PASS
tree1_nodes = [nd for nd in getattr(tree1, traverse_func)()]
tree2_nodes = [nd for nd in getattr(tree2, traverse_func)()]
if len(tree1_nodes) != len(tree2_nodes):
return self.fail("Trees have different number of nodes: {} vs. {}".format(len(tree1_nodes), len(tree2_nodes)))
for nd_idx, node1 in enumerate(tree1_nodes):
node2 = tree2_nodes[nd_idx]
if node1.taxon is not node2.taxon:
return self.fail("Different taxa found during postorder traversal of nodes: {} vs. {}".format(node1.taxon, node2.taxon))
if node1.label is not node2.label:
return self.fail("Different labels found during postorder traversal of nodes: {} vs. {}".format(node1.label, node2.label))
if not self.is_almost_equal(node1.edge.length, node2.edge.length):
return self.fail("Different edge lengths found during postorder traversal of nodes: {} vs. {}".format(node1.edge.length, node2.edge.length))
return status
def test_tree_postorder_iter(self):
treefile = os.path.join(self.data_dir, "basic", "pythonidae.postorder.newick")
self.execute_test("tree_postorder_iter",
[treefile, "newick"])
if self.test_retcode != 0:
return TestRunner.ERROR
test_visits = []
test_edge_lens = []
for item in self.test_stdout.split("\n"):
if not item:
continue
node, edge_len = item.split("\t")
test_visits.append(node)
test_edge_lens.append(float(edge_len))
taxa = dendropy.TaxonSet()
check_tree = dendropy.Tree.get_from_path(treefile, "newick", taxon_set=taxa)
check_visits = []
check_edge_lens = []
for check_node in check_tree.postorder_node_iter():
label = self.get_node_tag(check_node)
edge_len = check_node.edge.length if check_node.edge.length else 0.0
check_visits.append(label)
check_edge_lens.append(edge_len)
for idx in range(len(check_visits)):
if idx > len(test_visits):
return self.fail("Insufficient visits: expecting {} but found {}".format(len(check_visits), len(test_visits)))
n1 = check_visits[idx]
n2 = test_visits[idx]
if n1 != n2:
return self.fail("Incorrect visit {}: '{}' vs. '{}'".format(idx+1, n1, n2))
e1 = check_edge_lens[idx]
e2 = test_edge_lens[idx]
if not self.is_almost_equal(e1, e2):
return self.fail("Incorrect node edge length on visit {}: {} vs. {}".format(idx+1, e1, e2))
return TestRunner.PASS
def test_tree_leaf_iter(self):
treefile = os.path.join(self.data_dir, "basic", "pythonidae.postorder.newick")
self.execute_test("tree_leaf_iter",
[treefile, "newick"])
if self.test_retcode != 0:
return TestRunner.ERROR
test_leaves = []
for item in self.test_stdout.split("\n"):
if not item:
continue
idx, label = item.split("\t")
test_leaves.append(label)
taxa = dendropy.TaxonSet()
check_tree = dendropy.Tree.get_from_path(treefile, "newick", taxon_set=taxa)
check_leaves = [ self.get_node_tag(nd) for nd in check_tree.leaf_iter() ]
test_leaves.sort()
check_leaves.sort()
if set(check_leaves) != set(test_leaves):
return self.fail("Unequal leaf set: {} vs. {}".format(set(check_leaves), set(test_leaves)))
if len(check_leaves) != len(test_leaves):
return self.fail("Duplicate leaves: {}".format(set(test_leaves)))
return TestRunner.PASS
def test_tree_child_iter(self):
treefile = os.path.join(self.data_dir, "basic", "pythonidae.postorder.newick")
self.execute_test("tree_child_iter",
[treefile, "newick"])
if self.test_retcode != 0:
return TestRunner.ERROR
test_node_children = {}
for item in self.test_stdout.split("\n"):
if not item:
continue
nodes = item.split("\t")
test_node_children[nodes[0]] = nodes[1:]
taxa = dendropy.TaxonSet()
check_tree = dendropy.Tree.get_from_path(treefile, "newick", taxon_set=taxa)
check_node_children = {}
for check_node in check_tree.postorder_node_iter():
check_node_children[self.get_node_tag(check_node)] = [self.get_node_tag(child) for child in check_node.child_nodes()]
for check_node, check_children in check_node_children.items():
if check_node not in test_node_children:
return self.fail("Node not visited: '{}'".format(check_node))
if test_node_children[check_node] != check_children:
return self.fail("Incorrect children: '{}' vs. '{}'".format(check_children, test_node_children[check_node]))
return TestRunner.PASS
def compare_tree_scores(self, tree_filename, data_filename):
full_tree_filepath = os.path.join(self.data_dir, "basic", tree_filename)
full_data_filepath = os.path.join(self.data_dir, "basic", data_filename)
self.execute_test("score_phylogenetic_tree",
[full_tree_filepath, full_data_filepath])
if self.test_retcode != 0:
return TestRunner.ERROR
test_ln_like = float(self.test_stdout)
taxa = dendropy.TaxonSet()
tree = dendropy.Tree.get_from_path(full_tree_filepath, "newick", taxon_set=taxa)
data = dendropy.DnaCharacterMatrix.get_from_path(full_data_filepath, "fasta", taxon_set=taxa)
paup_tree, results = paup.estimate_model(
char_matrix=data,
tree_model=tree,
num_states=1,
unequal_base_freqs=False,
gamma_rates=False,
prop_invar=False,
tree_est_criterion="likelihood",
tree_user_brlens=True,
paup_path="paup")
check_ln_like = float(results['likelihood'])
if not self.is_almost_equal(check_ln_like, test_ln_like):
return self.fail("Unequal log-likelihoods (tree: '{}', data: '{}'): {} vs. {}".format(tree_filename, data_filename, check_ln_like, test_ln_like))
return TestRunner.PASS
def test_tree_score1(self):
return self.compare_tree_scores("primates.beast.mcct.medianh.newick.tre", "primates.chars.fasta")
def test_tree_score2(self):
return self.compare_tree_scores("pythonidae.tree.newick", "pythonidae.chars.fasta")
def test_tree_read_from_file(self):
treefile = os.path.join(self.data_dir, "basic", "bird_orders.nex")
self.execute_test("read_tree",
[treefile, "nexus"])
if self.test_retcode != 0:
return TestRunner.ERROR
taxa = dendropy.TaxonSet()
check_tree = dendropy.Tree.get_from_path(treefile, "nexus", taxon_set=taxa)
test_tree = dendropy.Tree.get_from_string(self.test_stdout, "newick", taxon_set=taxa)
# return self.compare_tree_traversal(check_tree, test_tree, "postorder_node_iter")
return self.compare_trees(check_tree, test_tree)
def test_read_dna_sequences(self):
datafile = os.path.join(self.data_dir, "basic", "pythonidae.chars.fasta")
self.execute_test("read_dna_sequences",
[datafile, "fasta"])
if self.test_retcode != 0:
return TestRunner.ERROR
dna_matrix = dendropy.DnaCharacterMatrix.get_from_path(datafile, "fasta", row_type="STR")
expected_partials = {}
for taxon in dna_matrix:
# label = taxon.label.replace(" ", "_")
label = taxon.label
expected_partials[label] = []
states = dna_matrix[taxon]
for state in states:
sub_partials = self.dna_to_partials[state]
expected_partials[label].extend(list(sub_partials))
rows = self.test_stdout.split("\n")
observed_partials = {}
for row in rows:
if not row:
continue
label, partials = row.split(":")
partials = [float(v) for v in partials.split(";") if v]
observed_partials[label] = list(partials)
for label in expected_partials:
if label not in observed_partials:
return self.fail("Sequence '{}' not found: {}".format(label,
",".join(["'{}'".format(t) for t in observed_partials])))
p1 = expected_partials[label]
p2 = observed_partials[label]
if len(p1) != len(p2):
return self.fail("Sequence '{}': expected {} elements but found {}".format(label,
len(p1), len(p2)))
for idx, i1 in enumerate(p1):
i2 = p2[idx]
if not self.is_almost_equal(i1, i2):
return self.fail("Sequence '{}': character {}: expected {} but found {}".format(label,
idx, i1, i2))
return TestRunner.PASS
def run(self):
tests_to_run = []
for name, value in inspect.getmembers(self, callable):
if name.startswith("test"):
tests_to_run.append((name, value))
passes = []
fails = []
for test_idx, (test_name, test_call) in enumerate(tests_to_run):
self.cout("@turquoise@<<{: 4d}/{:<4d}>>: {}: ".format(test_idx+1, len(tests_to_run), test_name))
result = test_call()
if result == TestRunner.PASS:
self.cout("@green@<<PASS>>\n")
passes.append(test_name)
if self.test_pass_message and self.verbosity > 3:
self.cout(" : - {}\n".format(self.test_pass_message))
elif result == TestRunner.FAIL:
self.cout("@fuchsia@<<FAIL>>\n")
fails.append(test_name)
if self.test_fail_message:
self.cout(" : - {}\n".format(self.test_fail_message))
if self.break_on_fail:
self.summarize(passes, fails)
return
else:
self.cout("@red@<<ERROR>>\n")
self.cout(" @red@<<: Executed:>> {}\n".format(self.test_command))
self.cout(" @red@<<: Return Code:>> {}\n".format(self.test_retcode))
self.cout(" @red@<<: Standard Output:>> {}\n".format(self.test_stdout))
self.cout(" @red@<<: Standard Error:>> {}\n".format(self.test_stderr))
if self.break_on_fail:
self.summarize(passes, fails)
return
self.summarize(passes, fails)
def summarize(self, passes, fails):
self.cout("\n--\nTests completed.\n")
self.cout("@turquoise@<<{}>> tests run with @green@<<{}>> successes and @fuchsia@<<{}>> failures.\n".format(len(passes)+len(fails), len(passes), len(fails)))
def main():
"""
Main CLI handler.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-v", "--verbosity",
default=1,
help="control messaging level")
parser.add_argument("-b", "--break-on-fail",
action="store_true",
default=False,
help="terminate tests after first failure")
args = parser.parse_args()
test_runner = TestRunner(args)
test_runner.run()
if __name__ == '__main__':
main()
|
jeetsukumaran/treeshrew
|
test/scripts/run-tests.py
|
Python
|
gpl-2.0
| 19,904
|
import datetime
from dateutil import rrule
from django.conf import settings as django_settings
from django.contrib.contenttypes import fields
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.template.defaultfilters import date
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext, gettext_lazy as _
from schedule.models.calendars import Calendar
from schedule.models.rules import Rule
from schedule.utils import OccurrenceReplacer
freq_dict_order = {
"YEARLY": 0,
"MONTHLY": 1,
"WEEKLY": 2,
"DAILY": 3,
"HOURLY": 4,
"MINUTELY": 5,
"SECONDLY": 6,
}
param_dict_order = {
"byyearday": 1,
"bymonth": 1,
"bymonthday": 2,
"byweekno": 2,
"byweekday": 3,
"byhour": 4,
"byminute": 5,
"bysecond": 6,
}
class EventManager(models.Manager):
def get_for_object(self, content_object, distinction="", inherit=True):
return EventRelation.objects.get_events_for_object(
content_object, distinction, inherit
)
class Event(models.Model):
"""
This model stores meta data for a date. You can relate this data to many
other models.
"""
start = models.DateTimeField(_("start"), db_index=True)
end = models.DateTimeField(
_("end"),
db_index=True,
help_text=_("The end time must be later than the start time."),
)
title = models.CharField(_("title"), max_length=255)
description = models.TextField(_("description"), blank=True)
creator = models.ForeignKey(
django_settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
blank=True,
verbose_name=_("creator"),
related_name="creator",
)
created_on = models.DateTimeField(_("created on"), auto_now_add=True)
updated_on = models.DateTimeField(_("updated on"), auto_now=True)
rule = models.ForeignKey(
Rule,
on_delete=models.SET_NULL,
null=True,
blank=True,
verbose_name=_("rule"),
help_text=_("Select '----' for a one time only event."),
)
end_recurring_period = models.DateTimeField(
_("end recurring period"),
null=True,
blank=True,
db_index=True,
help_text=_("This date is ignored for one time only events."),
)
calendar = models.ForeignKey(
Calendar, on_delete=models.CASCADE, verbose_name=_("calendar")
)
color_event = models.CharField(_("Color event"), blank=True, max_length=10)
objects = EventManager()
class Meta:
verbose_name = _("event")
verbose_name_plural = _("events")
index_together = (("start", "end"),)
def __str__(self):
return gettext("%(title)s: %(start)s - %(end)s") % {
"title": self.title,
"start": date(self.start, django_settings.DATE_FORMAT),
"end": date(self.end, django_settings.DATE_FORMAT),
}
@property
def seconds(self):
return (self.end - self.start).total_seconds()
@property
def minutes(self):
return float(self.seconds) / 60
@property
def hours(self):
return float(self.seconds) / 3600
def get_absolute_url(self):
return reverse("event", args=[self.id])
def get_occurrences(self, start, end, clear_prefetch=True):
"""
>>> rule = Rule(frequency = "MONTHLY", name = "Monthly")
>>> rule.save()
>>> event = Event(rule=rule, start=datetime.datetime(2008,1,1,tzinfo=pytz.utc), end=datetime.datetime(2008,1,2))
>>> event.rule
<Rule: Monthly>
>>> occurrences = event.get_occurrences(datetime.datetime(2008,1,24), datetime.datetime(2008,3,2))
>>> ["%s to %s" %(o.start, o.end) for o in occurrences]
['2008-02-01 00:00:00+00:00 to 2008-02-02 00:00:00+00:00', '2008-03-01 00:00:00+00:00 to 2008-03-02 00:00:00+00:00']
Ensure that if an event has no rule, that it appears only once.
>>> event = Event(start=datetime.datetime(2008,1,1,8,0), end=datetime.datetime(2008,1,1,9,0))
>>> occurrences = event.get_occurrences(datetime.datetime(2008,1,24), datetime.datetime(2008,3,2))
>>> ["%s to %s" %(o.start, o.end) for o in occurrences]
[]
"""
# Explanation of clear_prefetch:
#
# Periods, and their subclasses like Week, call
# prefetch_related('occurrence_set') on all events in their
# purview. This reduces the database queries they make from
# len()+1 to 2. However, having a cached occurrence_set on the
# Event model instance can sometimes cause Events to have a
# different view of the state of occurrences than the Period
# managing them.
#
# E.g., if you create an unsaved occurrence, move it to a
# different time [which saves the event], keep a reference to
# the moved occurrence, & refetch all occurrences from the
# Period without clearing the prefetch cache, you'll end up
# with two Occurrences for the same event but different moved
# states. It's a complicated scenario, but can happen. (See
# tests/test_occurrence.py#test_moved_occurrences, which caught
# this bug in the first place.)
#
# To prevent this, we clear the select_related cache by default
# before we call an event's get_occurrences, but allow Period
# to override this cache clear since it already fetches all
# occurrence_sets via prefetch_related in its get_occurrences.
if clear_prefetch:
self.occurrence_set._remove_prefetched_objects()
persisted_occurrences = self.occurrence_set.all()
occ_replacer = OccurrenceReplacer(persisted_occurrences)
occurrences = self._get_occurrence_list(start, end)
final_occurrences = []
for occ in occurrences:
# replace occurrences with their persisted counterparts
if occ_replacer.has_occurrence(occ):
p_occ = occ_replacer.get_occurrence(occ)
# ...but only if they are within this period
if p_occ.start < end and p_occ.end >= start:
final_occurrences.append(p_occ)
else:
final_occurrences.append(occ)
# then add persisted occurrences which originated outside of this period but now
# fall within it
final_occurrences += occ_replacer.get_additional_occurrences(start, end)
return final_occurrences
def get_rrule_object(self, tzinfo):
if self.rule is None:
return
params = self._event_params()
frequency = self.rule.rrule_frequency()
if timezone.is_naive(self.start):
dtstart = self.start
else:
dtstart = tzinfo.normalize(self.start).replace(tzinfo=None)
if self.end_recurring_period is None:
until = None
elif timezone.is_naive(self.end_recurring_period):
until = self.end_recurring_period
else:
until = tzinfo.normalize(
self.end_recurring_period.astimezone(tzinfo)
).replace(tzinfo=None)
return rrule.rrule(frequency, dtstart=dtstart, until=until, **params)
def _create_occurrence(self, start, end=None):
if end is None:
end = start + (self.end - self.start)
return Occurrence(
event=self, start=start, end=end, original_start=start, original_end=end
)
def get_occurrence(self, date):
use_naive = timezone.is_naive(date)
tzinfo = timezone.utc
if timezone.is_naive(date):
date = timezone.make_aware(date, timezone.utc)
if date.tzinfo:
tzinfo = date.tzinfo
rule = self.get_rrule_object(tzinfo)
if rule:
next_occurrence = rule.after(
tzinfo.normalize(date).replace(tzinfo=None), inc=True
)
next_occurrence = tzinfo.localize(next_occurrence)
else:
next_occurrence = self.start
if next_occurrence == date:
try:
return Occurrence.objects.get(event=self, original_start=date)
except Occurrence.DoesNotExist:
if use_naive:
next_occurrence = timezone.make_naive(next_occurrence, tzinfo)
return self._create_occurrence(next_occurrence)
def _get_occurrence_list(self, start, end):
"""
Returns a list of occurrences that fall completely or partially inside
the timespan defined by start (inclusive) and end (exclusive)
"""
if self.rule is not None:
duration = self.end - self.start
use_naive = timezone.is_naive(start)
# Use the timezone from the start date
tzinfo = timezone.utc
if start.tzinfo:
tzinfo = start.tzinfo
# Limit timespan to recurring period
occurrences = []
if self.end_recurring_period and self.end_recurring_period < end:
end = self.end_recurring_period
start_rule = self.get_rrule_object(tzinfo)
start = start.replace(tzinfo=None)
if timezone.is_aware(end):
end = tzinfo.normalize(end).replace(tzinfo=None)
o_starts = []
# Occurrences that start before the timespan but ends inside or after timespan
closest_start = start_rule.before(start, inc=False)
if closest_start is not None and closest_start + duration > start:
o_starts.append(closest_start)
# Occurrences starts that happen inside timespan (end-inclusive)
occs = start_rule.between(start, end, inc=True)
# The occurrence that start on the end of the timespan is potentially
# included above, lets remove if thats the case.
if len(occs) > 0:
if occs[-1] == end:
occs.pop()
# Add the occurrences found inside timespan
o_starts.extend(occs)
# Create the Occurrence objects for the found start dates
for o_start in o_starts:
o_start = tzinfo.localize(o_start)
if use_naive:
o_start = timezone.make_naive(o_start, tzinfo)
o_end = o_start + duration
occurrence = self._create_occurrence(o_start, o_end)
if occurrence not in occurrences:
occurrences.append(occurrence)
return occurrences
else:
# check if event is in the period
if self.start < end and self.end > start:
return [self._create_occurrence(self.start)]
else:
return []
def _occurrences_after_generator(self, after=None):
"""
returns a generator that produces unpresisted occurrences after the
datetime ``after``. (Optionally) This generator will return up to
``max_occurrences`` occurrences or has reached ``self.end_recurring_period``, whichever is smallest.
"""
tzinfo = timezone.utc
if after is None:
after = timezone.now()
elif not timezone.is_naive(after):
tzinfo = after.tzinfo
rule = self.get_rrule_object(tzinfo)
if rule is None:
if self.end > after:
yield self._create_occurrence(self.start, self.end)
return
date_iter = iter(rule)
difference = self.end - self.start
loop_counter = 0
for o_start in date_iter:
o_start = tzinfo.localize(o_start)
o_end = o_start + difference
if o_end > after:
yield self._create_occurrence(o_start, o_end)
loop_counter += 1
def occurrences_after(self, after=None, max_occurrences=None):
"""
returns a generator that produces occurrences after the datetime
``after``. Includes all of the persisted Occurrences. (Optionally) This generator will return up to
``max_occurrences`` occurrences or has reached ``self.end_recurring_period``, whichever is smallest.
"""
if after is None:
after = timezone.now()
occ_replacer = OccurrenceReplacer(self.occurrence_set.all())
generator = self._occurrences_after_generator(after)
trickies = list(
self.occurrence_set.filter(
original_start__lte=after, start__gte=after
).order_by("start")
)
for index, nxt in enumerate(generator):
if max_occurrences and index > max_occurrences - 1:
break
if len(trickies) > 0 and (nxt is None or nxt.start > trickies[0].start):
yield trickies.pop(0)
yield occ_replacer.get_occurrence(nxt)
@property
def event_start_params(self):
start = self.start
params = {
"byyearday": start.timetuple().tm_yday,
"bymonth": start.month,
"bymonthday": start.day,
"byweekno": start.isocalendar()[1],
"byweekday": start.weekday(),
"byhour": start.hour,
"byminute": start.minute,
"bysecond": start.second,
}
return params
@property
def event_rule_params(self):
return self.rule.get_params()
def _event_params(self):
freq_order = freq_dict_order[self.rule.frequency]
rule_params = self.event_rule_params
start_params = self.event_start_params
event_params = {}
if len(rule_params) == 0:
return event_params
for param in rule_params:
# start date influences rule params
if (
param in param_dict_order
and param_dict_order[param] > freq_order
and param in start_params
):
sp = start_params[param]
if sp == rule_params[param] or (
hasattr(rule_params[param], "__iter__") and sp in rule_params[param]
):
event_params[param] = [sp]
else:
event_params[param] = rule_params[param]
else:
event_params[param] = rule_params[param]
return event_params
@property
def event_params(self):
event_params = self._event_params()
start = self.effective_start
empty = False
if not start:
empty = True
elif self.end_recurring_period and start > self.end_recurring_period:
empty = True
return event_params, empty
@property
def effective_start(self):
if self.pk and self.end_recurring_period:
occ_generator = self._occurrences_after_generator(self.start)
try:
return next(occ_generator).start
except StopIteration:
pass
elif self.pk:
return self.start
return None
@property
def effective_end(self):
if self.pk and self.end_recurring_period:
params, empty = self.event_params
if empty or not self.effective_start:
return None
elif self.end_recurring_period:
occ = None
occ_generator = self._occurrences_after_generator(self.start)
for occ in occ_generator:
pass
return occ.end
elif self.pk:
return datetime.datetime.max
return None
class EventRelationManager(models.Manager):
"""
>>> import datetime
>>> EventRelation.objects.all().delete()
>>> CalendarRelation.objects.all().delete()
>>> data = {
... 'title': 'Test1',
... 'start': datetime.datetime(2008, 1, 1),
... 'end': datetime.datetime(2008, 1, 11)
... }
>>> Event.objects.all().delete()
>>> event1 = Event(**data)
>>> event1.save()
>>> data['title'] = 'Test2'
>>> event2 = Event(**data)
>>> event2.save()
>>> user1 = User(username='alice')
>>> user1.save()
>>> user2 = User(username='bob')
>>> user2.save()
>>> event1.create_relation(user1, 'owner')
>>> event1.create_relation(user2, 'viewer')
>>> event2.create_relation(user1, 'viewer')
"""
# Currently not supported
# Multiple level reverse lookups of generic relations appears to be
# unsupported in Django, which makes sense.
#
# def get_objects_for_event(self, event, model, distinction=None):
# '''
# returns a queryset full of instances of model, if it has an EventRelation
# with event, and distinction
# >>> event = Event.objects.get(title='Test1')
# >>> EventRelation.objects.get_objects_for_event(event, User, 'owner')
# [<User: alice>]
# >>> EventRelation.objects.get_objects_for_event(event, User)
# [<User: alice>, <User: bob>]
# '''
# if distinction:
# dist_q = Q(eventrelation__distinction = distinction)
# else:
# dist_q = Q()
# ct = ContentType.objects.get_for_model(model)
# return model.objects.filter(
# dist_q,
# eventrelation__content_type = ct,
# eventrelation__event = event
# )
def get_events_for_object(self, content_object, distinction="", inherit=True):
"""
returns a queryset full of events, that relate to the object through, the
distinction
If inherit is false it will not consider the calendars that the events
belong to. If inherit is true it will inherit all of the relations and
distinctions that any calendar that it belongs to has, as long as the
relation has inheritable set to True. (See Calendar)
>>> event = Event.objects.get(title='Test1')
>>> user = User.objects.get(username = 'alice')
>>> EventRelation.objects.get_events_for_object(user, 'owner', inherit=False)
[<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>]
If a distinction is not declared it will not vet the relations based on
distinction.
>>> EventRelation.objects.get_events_for_object(user, inherit=False)
[<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>]
Now if there is a Calendar
>>> calendar = Calendar(name = 'MyProject')
>>> calendar.save()
And an event that belongs to that calendar
>>> event = Event.objects.get(title='Test2')
>>> calendar.events.add(event)
If we relate this calendar to some object with inheritable set to true,
that relation will be inherited
>>> user = User.objects.get(username='bob')
>>> cr = calendar.create_relation(user, 'viewer', True)
>>> EventRelation.objects.get_events_for_object(user, 'viewer')
[<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>]
"""
ct = ContentType.objects.get_for_model(type(content_object))
if distinction:
dist_q = Q(eventrelation__distinction=distinction)
cal_dist_q = Q(calendar__calendarrelation__distinction=distinction)
else:
dist_q = Q()
cal_dist_q = Q()
if inherit:
inherit_q = Q(
cal_dist_q,
calendar__calendarrelation__content_type=ct,
calendar__calendarrelation__object_id=content_object.id,
calendar__calendarrelation__inheritable=True,
)
else:
inherit_q = Q()
event_q = Q(
dist_q,
eventrelation__content_type=ct,
eventrelation__object_id=content_object.id,
)
return Event.objects.filter(inherit_q | event_q)
def create_relation(self, event, content_object, distinction=""):
"""
Creates a relation between event and content_object.
See EventRelation for help on distinction.
"""
return EventRelation.objects.create(
event=event, distinction=distinction, content_object=content_object
)
class EventRelation(models.Model):
"""
This is for relating data to an Event, there is also a distinction, so that
data can be related in different ways. A good example would be, if you have
events that are only visible by certain users, you could create a relation
between events and users, with the distinction of 'visibility', or
'ownership'.
event: a foreign key relation to an Event model.
content_type: a foreign key relation to ContentType of the generic object
object_id: the id of the generic object
content_object: the generic foreign key to the generic object
distinction: a string representing a distinction of the relation, User could
have a 'viewer' relation and an 'owner' relation for example.
DISCLAIMER: while this model is a nice out of the box feature to have, it
may not scale well. If you use this keep that in mind.
"""
event = models.ForeignKey(Event, on_delete=models.CASCADE, verbose_name=_("event"))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.IntegerField(db_index=True)
content_object = fields.GenericForeignKey("content_type", "object_id")
distinction = models.CharField(_("distinction"), max_length=20)
objects = EventRelationManager()
class Meta:
verbose_name = _("event relation")
verbose_name_plural = _("event relations")
index_together = [("content_type", "object_id")]
def __str__(self):
return "{}({})-{}".format(
self.event.title, self.distinction, self.content_object
)
class Occurrence(models.Model):
event = models.ForeignKey(Event, on_delete=models.CASCADE, verbose_name=_("event"))
title = models.CharField(_("title"), max_length=255, blank=True)
description = models.TextField(_("description"), blank=True)
start = models.DateTimeField(_("start"), db_index=True)
end = models.DateTimeField(_("end"), db_index=True)
cancelled = models.BooleanField(_("cancelled"), default=False)
original_start = models.DateTimeField(_("original start"))
original_end = models.DateTimeField(_("original end"))
created_on = models.DateTimeField(_("created on"), auto_now_add=True)
updated_on = models.DateTimeField(_("updated on"), auto_now=True)
class Meta:
verbose_name = _("occurrence")
verbose_name_plural = _("occurrences")
index_together = (("start", "end"),)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.title and self.event_id:
self.title = self.event.title
if not self.description and self.event_id:
self.description = self.event.description
def moved(self):
return self.original_start != self.start or self.original_end != self.end
moved = property(moved)
def move(self, new_start, new_end):
self.start = new_start
self.end = new_end
self.save()
def cancel(self):
self.cancelled = True
self.save()
def uncancel(self):
self.cancelled = False
self.save()
@property
def seconds(self):
return (self.end - self.start).total_seconds()
@property
def minutes(self):
return float(self.seconds) / 60
@property
def hours(self):
return float(self.seconds) / 3600
def get_absolute_url(self):
if self.pk is not None:
return reverse(
"occurrence",
kwargs={"occurrence_id": self.pk, "event_id": self.event.id},
)
return reverse(
"occurrence_by_date",
kwargs={
"event_id": self.event.id,
"year": self.start.year,
"month": self.start.month,
"day": self.start.day,
"hour": self.start.hour,
"minute": self.start.minute,
"second": self.start.second,
},
)
def get_cancel_url(self):
if self.pk is not None:
return reverse(
"cancel_occurrence",
kwargs={"occurrence_id": self.pk, "event_id": self.event.id},
)
return reverse(
"cancel_occurrence_by_date",
kwargs={
"event_id": self.event.id,
"year": self.start.year,
"month": self.start.month,
"day": self.start.day,
"hour": self.start.hour,
"minute": self.start.minute,
"second": self.start.second,
},
)
def get_edit_url(self):
if self.pk is not None:
return reverse(
"edit_occurrence",
kwargs={"occurrence_id": self.pk, "event_id": self.event.id},
)
return reverse(
"edit_occurrence_by_date",
kwargs={
"event_id": self.event.id,
"year": self.start.year,
"month": self.start.month,
"day": self.start.day,
"hour": self.start.hour,
"minute": self.start.minute,
"second": self.start.second,
},
)
def __str__(self):
return gettext("%(start)s to %(end)s") % {
"start": date(self.start, django_settings.DATE_FORMAT),
"end": date(self.end, django_settings.DATE_FORMAT),
}
def __lt__(self, other):
return self.end < other.end
def __hash__(self):
if not self.pk:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self.pk)
def __eq__(self, other):
return (
isinstance(other, Occurrence)
and self.original_start == other.original_start
and self.original_end == other.original_end
)
|
llazzaro/django-scheduler
|
schedule/models/events.py
|
Python
|
bsd-3-clause
| 26,473
|
import itertools
import string
import unicodedata
from functools import partial
from random import choice, randrange
from unittest.mock import Mock
import pytest
from orderedset import OrderedSet
from notifications_utils import SMS_CHAR_COUNT_LIMIT
from notifications_utils.countries import Country
from notifications_utils.recipients import (
Cell,
RecipientCSV,
Row,
first_column_headings,
)
from notifications_utils.template import (
EmailPreviewTemplate,
LetterImageTemplate,
SMSMessageTemplate,
)
def _sample_template(template_type, content='foo'):
return {
'email': EmailPreviewTemplate(
{'content': content, 'subject': 'bar', 'template_type': 'email'}
),
'sms': SMSMessageTemplate(
{'content': content, 'template_type': 'sms'}
),
'letter': LetterImageTemplate(
{'content': content, 'subject': 'bar', 'template_type': 'letter'},
image_url='https://example.com',
page_count=1,
),
}.get(template_type)
def _index_rows(rows):
return set(row.index for row in rows)
@pytest.mark.parametrize('template_type, expected', (
('email', ['email address']),
('sms', ['phone number']),
('letter', [
'address line 1',
'address line 2',
'address line 3',
'address line 4',
'address line 5',
'address line 6',
'postcode',
'address line 7',
]),
))
def test_recipient_column_headers(template_type, expected):
recipients = RecipientCSV("", template=_sample_template(template_type))
assert (
recipients.recipient_column_headers
) == (
first_column_headings[template_type]
) == (
expected
)
@pytest.mark.parametrize(
"file_contents,template_type,expected",
[
(
"",
"sms",
[],
),
(
"phone number",
"sms",
[],
),
(
"""
phone number,name
+44 123, test1
+44 456,test2
""",
"sms",
[
[('phone number', '+44 123'), ('name', 'test1')],
[('phone number', '+44 456'), ('name', 'test2')]
]
),
(
"""
phone number,name
+44 123,
+44 456
""",
"sms",
[
[('phone number', '+44 123'), ('name', None)],
[('phone number', '+44 456'), ('name', None)]
]
),
(
"""
email address,name
test@example.com,test1
test2@example.com, test2
""",
"email",
[
[('email address', 'test@example.com'), ('name', 'test1')],
[('email address', 'test2@example.com'), ('name', 'test2')]
]
),
(
"""
email address
test@example.com,test1,red
test2@example.com, test2,blue
""",
"email",
[
[('email address', 'test@example.com'), (None, ['test1', 'red'])],
[('email address', 'test2@example.com'), (None, ['test2', 'blue'])]
]
),
(
"""
email address,name
test@example.com,"test1"
test2@example.com," test2 "
test3@example.com," test3"
""",
"email",
[
[('email address', 'test@example.com'), ('name', 'test1')],
[('email address', 'test2@example.com'), ('name', 'test2')],
[('email address', 'test3@example.com'), ('name', 'test3')]
]
),
(
"""
email address,date,name
test@example.com,"Nov 28, 2016",test1
test2@example.com,"Nov 29, 2016",test2
""",
"email",
[
[('email address', 'test@example.com'), ('date', 'Nov 28, 2016'), ('name', 'test1')],
[('email address', 'test2@example.com'), ('date', 'Nov 29, 2016'), ('name', 'test2')]
]
),
(
"""
address_line_1
Alice
Bob
""",
"letter",
[
[('address_line_1', 'Alice')],
[('address_line_1', 'Bob')]
]
),
(
"""
address line 1,address line 2,address line 5,address line 6,postcode,name,thing
A. Name,,,,XM4 5HQ,example,example
""",
"letter",
[[
('addressline1', 'A. Name'),
('addressline2', None),
# optional address rows 3 and 4 not in file
('addressline5', None),
('addressline5', None),
('postcode', 'XM4 5HQ'),
('name', 'example'),
('thing', 'example'),
]]
),
(
"""
phone number, list, list, list
07900900001, cat, rat, gnat
07900900002, dog, hog, frog
07900900003, elephant
""",
"sms",
[
[
('phone number', '07900900001'),
('list', ['cat', 'rat', 'gnat'])
],
[
('phone number', '07900900002'),
('list', ['dog', 'hog', 'frog'])
],
[
('phone number', '07900900003'),
('list', ['elephant', None, None])
],
]
)
]
)
def test_get_rows(file_contents, template_type, expected):
rows = list(RecipientCSV(file_contents, template=_sample_template(template_type)).rows)
if not expected:
assert rows == expected
for index, row in enumerate(expected):
assert len(rows[index].items()) == len(row)
for key, value in row:
assert rows[index].get(key).data == value
def test_get_rows_does_no_error_checking_of_rows_or_cells(mocker):
has_error_mock = mocker.patch.object(Row, 'has_error')
has_bad_recipient_mock = mocker.patch.object(Row, 'has_bad_recipient')
has_missing_data_mock = mocker.patch.object(Row, 'has_missing_data')
cell_recipient_error_mock = mocker.patch.object(Cell, 'recipient_error')
recipients = RecipientCSV(
"""
email address, name
a@b.com,
a@b.com, My Name
a@b.com,
""",
template=_sample_template('email', 'hello ((name))'),
max_errors_shown=3
)
rows = recipients.get_rows()
for _ in range(3):
assert next(rows).recipient == 'a@b.com'
assert has_error_mock.called is False
assert has_bad_recipient_mock.called is False
assert has_missing_data_mock.called is False
assert cell_recipient_error_mock.called is False
def test_get_rows_only_iterates_over_file_once(mocker):
row_mock = mocker.patch('notifications_utils.recipients.Row')
recipients = RecipientCSV(
"""
email address, name
a@b.com,
a@b.com, My Name
a@b.com,
""",
template=_sample_template('email', 'hello ((name))'),
)
rows = recipients.get_rows()
for _ in range(3):
next(rows)
assert row_mock.call_count == 3
assert recipients.rows_as_list is None
@pytest.mark.parametrize(
"file_contents,template_type,expected",
[
(
"""
phone number,name
07700900460, test1
+447700 900 460,test2
,
""",
'sms',
[
{
'index': 0,
'message_too_long': False
},
{
'index': 1,
'message_too_long': False
},
]
),
(
"""
email address,name,colour
test@example.com,test1,blue
test2@example.com, test2,red
""",
'email',
[
{
'index': 0,
'message_too_long': False
},
{
'index': 1,
'message_too_long': False
},
]
)
]
)
def test_get_annotated_rows(file_contents, template_type, expected):
recipients = RecipientCSV(
file_contents,
template=_sample_template(template_type, 'hello ((name))'),
max_initial_rows_shown=1
)
for index, expected_row in enumerate(expected):
annotated_row = list(recipients.rows)[index]
assert annotated_row.index == expected_row['index']
assert annotated_row.message_too_long == expected_row['message_too_long']
assert len(list(recipients.rows)) == 2
assert len(list(recipients.initial_rows)) == 1
assert not recipients.has_errors
def test_get_rows_with_errors():
recipients = RecipientCSV(
"""
email address, name
a@b.com,
a@b.com,
a@b.com,
a@b.com,
a@b.com,
a@b.com,
""",
template=_sample_template('email', 'hello ((name))'),
max_errors_shown=3
)
assert len(list(recipients.rows_with_errors)) == 6
assert len(list(recipients.initial_rows_with_errors)) == 3
assert recipients.has_errors
@pytest.mark.parametrize('template_type, row_count, header, filler, row_with_error', [
('email', 500, "email address\n", "test@example.com\n", "test at example dot com"),
('sms', 500, "phone number\n", "07900900123\n", "12345"),
])
def test_big_list_validates_right_through(template_type, row_count, header, filler, row_with_error):
big_csv = RecipientCSV(
header + (filler * (row_count - 1) + row_with_error),
template=_sample_template(template_type),
max_errors_shown=100,
max_initial_rows_shown=3
)
assert len(list(big_csv.rows)) == row_count
assert _index_rows(big_csv.rows_with_bad_recipients) == {row_count - 1} # 0 indexed
assert _index_rows(big_csv.rows_with_errors) == {row_count - 1}
assert len(list(big_csv.initial_rows_with_errors)) == 1
assert big_csv.has_errors
@pytest.mark.parametrize('template_type, row_count, header, filler', [
('email', 50, "email address\n", "test@example.com\n"),
('sms', 50, "phone number\n", "07900900123\n"),
])
def test_check_if_message_too_long_for_sms_but_not_email_in_CSV(
mocker, template_type, row_count, header, filler
):
# we do not validate email size for CSVs to avoid performance issues
RecipientCSV(
header + filler * row_count,
template=_sample_template(template_type),
max_errors_shown=100,
max_initial_rows_shown=3
)
is_message_too_long = mocker.patch(
'notifications_utils.template.Template.is_message_too_long',
side_effect=False
)
if template_type == 'email':
is_message_too_long.assert_not_called
else:
is_message_too_long.called
def test_overly_big_list_stops_processing_rows_beyond_max(mocker):
mock_strip_and_remove_obscure_whitespace = mocker.patch(
'notifications_utils.recipients.strip_and_remove_obscure_whitespace'
)
mock_insert_or_append_to_dict = mocker.patch(
'notifications_utils.recipients.insert_or_append_to_dict'
)
big_csv = RecipientCSV(
"phonenumber,name\n" + ("07700900123,example\n" * 123),
template=_sample_template('sms', content='hello ((name))'),
)
big_csv.max_rows = 10
# Our CSV has lots of rows…
assert big_csv.too_many_rows
assert len(big_csv) == 123
# …but we’ve only called the expensive whitespace function on each
# of the 2 cells in the first 10 rows
assert len(
mock_strip_and_remove_obscure_whitespace.call_args_list
) == 20
# …and we’ve only called the function which builds the internal data
# structure once for each of the first 10 rows
assert len(
mock_insert_or_append_to_dict.call_args_list
) == 10
def test_file_with_lots_of_empty_columns():
process = Mock()
lots_of_commas = ',' * 10_000
for row in RecipientCSV(
f'phone_number{lots_of_commas}\n' + (
f'07900900900{lots_of_commas}\n' * 100
),
template=_sample_template('sms'),
):
assert [
(key, cell.data) for key, cell in row.items()
] == [
# Note that we haven’t stored any of the empty cells
('phonenumber', '07900900900')
]
process()
assert process.call_count == 100
def test_empty_column_names():
recipient_csv = RecipientCSV(
"""
phone_number,,,name
07900900123,foo,bar,baz
""",
template=_sample_template('sms'),
)
assert recipient_csv[0]['phone_number'].data == '07900900123'
assert recipient_csv[0][''].data == ['foo', 'bar']
assert recipient_csv[0]['name'].data == 'baz'
@pytest.mark.parametrize(
"file_contents,template,expected_recipients,expected_personalisation",
[
(
"""
phone number,name, date
+44 123,test1,today
+44456, ,tomorrow
,,
, ,
""",
_sample_template('sms', 'hello ((name))'),
['+44 123', '+44456'],
[{'name': 'test1'}, {'name': None}]
),
(
"""
email address,name,colour
test@example.com,test1,red
testatexampledotcom,test2,blue
""",
_sample_template('email', '((colour))'),
['test@example.com', 'testatexampledotcom'],
[
{'colour': 'red'},
{'colour': 'blue'}
]
),
(
"""
email address
test@example.com,test1,red
testatexampledotcom,test2,blue
""",
_sample_template('email'),
['test@example.com', 'testatexampledotcom'],
[]
)
]
)
def test_get_recipient(file_contents, template, expected_recipients, expected_personalisation):
recipients = RecipientCSV(file_contents, template=template)
for index, row in enumerate(expected_personalisation):
for key, value in row.items():
assert recipients[index].recipient == expected_recipients[index]
assert recipients[index].personalisation.get(key) == value
@pytest.mark.parametrize(
"file_contents,template,expected_recipients,expected_personalisation",
[
(
"""
email address,test
test@example.com,test1,red
testatexampledotcom,test2,blue
""",
_sample_template('email', '((test))'),
[
(0, 'test@example.com'),
(1, 'testatexampledotcom')
],
[
{'emailaddress': 'test@example.com', 'test': 'test1'},
{'emailaddress': 'testatexampledotcom', 'test': 'test2'},
],
)
]
)
def test_get_recipient_respects_order(file_contents,
template,
expected_recipients,
expected_personalisation):
recipients = RecipientCSV(file_contents, template=template)
for row, email in expected_recipients:
assert (
recipients[row].index,
recipients[row].recipient,
recipients[row].personalisation,
) == (
row,
email,
expected_personalisation[row],
)
@pytest.mark.parametrize(
"file_contents,template_type,expected,expected_missing",
[
(
"", 'sms', [], set(['phone number', 'name'])
),
(
"""
phone number,name
07700900460,test1
07700900460,test1
07700900460,test1
""",
'sms',
['phone number', 'name'],
set()
),
(
"""
email address,name,colour
""",
'email',
['email address', 'name', 'colour'],
set()
),
(
"""
address_line_1, address_line_2, postcode, name
""",
'letter',
['address_line_1', 'address_line_2', 'postcode', 'name'],
set()
),
(
"""
email address,colour
""",
'email',
['email address', 'colour'],
set(['name'])
),
(
"""
address_line_1, address_line_2, name
""",
'letter',
['address_line_1', 'address_line_2', 'name'],
set()
),
(
"""
phone number,list,list,name,list
""",
'sms',
['phone number', 'list', 'name'],
set()
),
]
)
def test_column_headers(file_contents, template_type, expected, expected_missing):
recipients = RecipientCSV(file_contents, template=_sample_template(template_type, '((name))'))
assert recipients.column_headers == expected
assert recipients.missing_column_headers == expected_missing
assert recipients.has_errors == bool(expected_missing)
@pytest.mark.parametrize(
'content',
[
'hello',
'hello ((name))',
]
)
@pytest.mark.parametrize(
'file_contents,template_type',
[
pytest.param('', 'sms', marks=pytest.mark.xfail),
pytest.param('name', 'sms', marks=pytest.mark.xfail),
pytest.param('email address', 'sms', marks=pytest.mark.xfail),
pytest.param(
'address_line_1',
'letter',
marks=pytest.mark.xfail,
),
pytest.param(
'address_line_1, address_line_2',
'letter',
marks=pytest.mark.xfail,
),
pytest.param(
'address_line_6, postcode',
'letter',
marks=pytest.mark.xfail,
),
pytest.param(
'address_line_1, postcode, address_line_7',
'letter',
marks=pytest.mark.xfail,
),
('phone number', 'sms'),
('phone number,name', 'sms'),
('email address', 'email'),
('email address,name', 'email'),
('PHONENUMBER', 'sms'),
('email_address', 'email'),
(
'address_line_1, address_line_2, postcode',
'letter'
),
(
'address_line_1, address_line_2, address_line_7',
'letter'
),
(
'address_line_1, address_line_2, address_line_3',
'letter'
),
(
'address_line_4, address_line_5, address_line_6',
'letter'
),
(
'address_line_1, address_line_2, address_line_3, address_line_4, address_line_5, address_line_6, postcode',
'letter'
),
]
)
def test_recipient_column(content, file_contents, template_type):
assert RecipientCSV(file_contents, template=_sample_template(template_type, content)).has_recipient_columns
@pytest.mark.parametrize(
"file_contents,template_type,rows_with_bad_recipients,rows_with_missing_data",
[
(
"""
phone number,name,date
07700900460,test1,test1
07700900460,test1
+44 123,test1,test1
07700900460,test1,test1
07700900460,test1
+1644000000,test1,test1
,test1,test1
""",
'sms',
{2, 5}, {1, 4, 6}
),
(
"""
phone number,name
07700900460,test1,test2
""",
'sms',
set(), set()
),
(
"""
""",
'sms',
set(), set()
),
(
# missing postcode
"""
address_line_1,address_line_2,address_line_3,address_line_4,address_line_5,postcode,date
name, building, street, town, county, SE1 7LS,today
name, building, street, town, county, , today
""",
'letter',
{1}, set()
),
(
# not enough address fields
"""
address_line_1, postcode, date
name, SE1 7LS, today
""",
'letter',
{0}, set()
),
(
# optional address fields not filled in
"""
address_line_1,address_line_2,address_line_3,address_line_4,address_line_5,postcode,date
name ,123 fake st. , , , ,SE1 7LS,today
name , , , , ,SE1 7LS,today
""",
'letter',
{1}, set()
),
(
# Can use any address columns
"""
address_line_3, address_line_4, address_line_7, date
name , 123 fake st., SE1 7LS, today
""",
'letter',
set(), set()
),
]
)
@pytest.mark.parametrize('partial_instance', [
partial(RecipientCSV),
partial(RecipientCSV, allow_international_sms=False),
])
def test_bad_or_missing_data(
file_contents, template_type, rows_with_bad_recipients, rows_with_missing_data, partial_instance
):
recipients = partial_instance(file_contents, template=_sample_template(template_type, '((date))'))
assert _index_rows(recipients.rows_with_bad_recipients) == rows_with_bad_recipients
assert _index_rows(recipients.rows_with_missing_data) == rows_with_missing_data
if rows_with_bad_recipients or rows_with_missing_data:
assert recipients.has_errors is True
@pytest.mark.parametrize("file_contents,rows_with_bad_recipients", [
(
"""
phone number
800000000000
1234
+447900123
""",
{0, 1, 2},
),
(
"""
phone number, country
1-202-555-0104, USA
+12025550104, USA
23051234567, Mauritius
""",
set(),
),
])
def test_international_recipients(file_contents, rows_with_bad_recipients):
recipients = RecipientCSV(
file_contents,
template=_sample_template('sms'),
allow_international_sms=True,
)
assert _index_rows(recipients.rows_with_bad_recipients) == rows_with_bad_recipients
def test_errors_when_too_many_rows():
recipients = RecipientCSV(
"email address\n" + ("a@b.com\n" * 101),
template=_sample_template('email'),
)
# Confirm the normal max_row limit
assert recipients.max_rows == 100_000
# Override to make this test faster
recipients.max_rows = 100
assert recipients.too_many_rows is True
assert recipients.has_errors is True
assert recipients.rows[99]['email_address'].data == 'a@b.com'
# We stop processing subsequent rows
assert recipients.rows[100] is None
@pytest.mark.parametrize(
"file_contents,template_type,guestlist,count_of_rows_with_errors",
[
(
"""
phone number
07700900460
07700900461
07700900462
07700900463
""",
'sms',
['+447700900460'], # Same as first phone number but in different format
3
),
(
"""
phone number
7700900460
447700900461
07700900462
""",
'sms',
['07700900460', '07700900461', '07700900462', '07700900463', 'test@example.com'],
0
),
(
"""
email address
IN_GUESTLIST@EXAMPLE.COM
not_in_guestlist@example.com
""",
'email',
['in_guestlist@example.com', '07700900460'], # Email case differs to the one in the CSV
1
)
]
)
def test_recipient_guestlist(file_contents, template_type, guestlist, count_of_rows_with_errors):
recipients = RecipientCSV(
file_contents,
template=_sample_template(template_type),
guestlist=guestlist
)
if count_of_rows_with_errors:
assert not recipients.allowed_to_send_to
else:
assert recipients.allowed_to_send_to
# Make sure the guestlist isn’t emptied by reading it. If it’s an iterator then
# there’s a risk that it gets emptied after being read once
recipients.guestlist = (str(fake_number) for fake_number in range(7700900888, 7700900898))
list(recipients.guestlist)
assert not recipients.allowed_to_send_to
assert recipients.has_errors
# An empty guestlist is treated as no guestlist at all
recipients.guestlist = []
assert recipients.allowed_to_send_to
recipients.guestlist = itertools.chain()
assert recipients.allowed_to_send_to
def test_detects_rows_which_result_in_overly_long_messages():
template = SMSMessageTemplate(
{'content': '((placeholder))', 'template_type': 'sms'},
sender=None,
prefix=None,
)
recipients = RecipientCSV(
"""
phone number,placeholder
07700900460,1
07700900461,{one_under}
07700900462,{exactly}
07700900463,{one_over}
""".format(
one_under='a' * (SMS_CHAR_COUNT_LIMIT - 1),
exactly='a' * SMS_CHAR_COUNT_LIMIT,
one_over='a' * (SMS_CHAR_COUNT_LIMIT + 1),
),
template=template
)
assert _index_rows(recipients.rows_with_errors) == {3}
assert _index_rows(recipients.rows_with_message_too_long) == {3}
assert recipients.has_errors
assert recipients[0].has_error_spanning_multiple_cells is False
assert recipients[1].has_error_spanning_multiple_cells is False
assert recipients[2].has_error_spanning_multiple_cells is False
assert recipients[3].has_error_spanning_multiple_cells is True
def test_detects_rows_which_result_in_empty_messages():
template = SMSMessageTemplate(
{'content': '((show??content))', 'template_type': 'sms'},
sender=None,
prefix=None,
)
recipients = RecipientCSV(
"""
phone number,show
07700900460,yes
07700900462,no
07700900463,yes
""",
template=template
)
assert _index_rows(recipients.rows_with_errors) == {1}
assert _index_rows(recipients.rows_with_empty_message) == {1}
assert recipients.has_errors
assert recipients[0].has_error_spanning_multiple_cells is False
assert recipients[1].has_error_spanning_multiple_cells is True
assert recipients[2].has_error_spanning_multiple_cells is False
@pytest.mark.parametrize(
"key, expected",
sum([
[(key, expected) for key in group] for expected, group in [
('07700900460', (
'phone number',
' PHONENUMBER',
'phone_number',
'phone-number',
'phoneNumber'
)),
('Jo', (
'FIRSTNAME',
'first name',
'first_name ',
'first-name',
'firstName'
)),
('Bloggs', (
'Last Name',
'LASTNAME',
' last_name',
'last-name',
'lastName '
))
]
], [])
)
def test_ignores_spaces_and_case_in_placeholders(key, expected):
recipients = RecipientCSV(
"""
phone number,FIRSTNAME, Last Name
07700900460, Jo, Bloggs
""",
template=_sample_template('sms', content='((phone_number)) ((First Name)) ((lastname))')
)
first_row = recipients[0]
assert first_row.get(key).data == expected
assert first_row[key].data == expected
assert first_row.recipient == '07700900460'
assert len(first_row.items()) == 3
assert not recipients.has_errors
assert recipients.missing_column_headers == set()
recipients.placeholders = {'one', 'TWO', 'Thirty_Three'}
assert recipients.missing_column_headers == {'one', 'TWO', 'Thirty_Three'}
assert recipients.has_errors
@pytest.mark.parametrize('character, name', (
(' ', 'SPACE'),
# these ones don’t have unicode names
('\n', None), # newline
('\r', None), # carriage return
('\t', None), # tab
('\u180E', 'MONGOLIAN VOWEL SEPARATOR'),
('\u200B', 'ZERO WIDTH SPACE'),
('\u200C', 'ZERO WIDTH NON-JOINER'),
('\u200D', 'ZERO WIDTH JOINER'),
('\u2060', 'WORD JOINER'),
('\uFEFF', 'ZERO WIDTH NO-BREAK SPACE'),
# all the things
(' \n\r\t\u000A\u000D\u180E\u200B\u200C\u200D\u2060\uFEFF', None)
))
def test_ignores_leading_whitespace_in_file(character, name):
if name is not None:
assert unicodedata.name(character) == name
recipients = RecipientCSV(
'{}emailaddress\ntest@example.com'.format(character),
template=_sample_template('email'),
)
first_row = recipients[0]
assert recipients.column_headers == ['emailaddress']
assert recipients.recipient_column_headers == ['email address']
assert recipients.missing_column_headers == set()
assert recipients.placeholders == ['email address']
assert first_row.get('email address').data == 'test@example.com'
assert first_row['email address'].data == 'test@example.com'
assert first_row.recipient == 'test@example.com'
assert not recipients.has_errors
def test_error_if_too_many_recipients():
recipients = RecipientCSV(
'phone number,\n07700900460,\n07700900460,\n07700900460,',
template=_sample_template('sms'),
remaining_messages=2
)
assert recipients.has_errors
assert recipients.more_rows_than_can_send
def test_dont_error_if_too_many_recipients_not_specified():
recipients = RecipientCSV(
'phone number,\n07700900460,\n07700900460,\n07700900460,',
template=_sample_template('sms'),
)
assert not recipients.has_errors
assert not recipients.more_rows_than_can_send
@pytest.mark.parametrize('index, expected_row', [
(
0,
{
'phone number': '07700 90000 1',
'colour': 'red',
},
),
(
1,
{
'phone_number': '07700 90000 2',
'COLOUR': 'green',
},
),
(
2,
{
'p h o n e n u m b e r': '07700 90000 3',
' colour ': 'blue'
},
),
pytest.param(
3,
{
'phone number': 'foo'
},
marks=pytest.mark.xfail(raises=IndexError),
),
(
-1,
{
'p h o n e n u m b e r': '07700 90000 3',
' colour ': 'blue'
},
),
])
def test_recipients_can_be_accessed_by_index(index, expected_row):
recipients = RecipientCSV(
"""
phone number, colour
07700 90000 1, red
07700 90000 2, green
07700 90000 3, blue
""",
template=_sample_template('sms'),
)
for key, value in expected_row.items():
assert recipients[index][key].data == value
@pytest.mark.parametrize('international_sms', (True, False))
def test_multiple_sms_recipient_columns(international_sms):
recipients = RecipientCSV(
"""
phone number, phone number, phone_number, foo
07900 900111, 07900 900222, 07900 900333, bar
""",
template=_sample_template('sms'),
allow_international_sms=international_sms,
)
assert recipients.column_headers == ['phone number', 'phone_number', 'foo']
assert recipients.column_headers_as_column_keys == dict(phonenumber='', foo='').keys()
assert recipients.rows[0].get('phone number').data == (
'07900 900333'
)
assert recipients.rows[0].get('phone_number').data == (
'07900 900333'
)
assert recipients.rows[0].get('phone number').error is None
assert recipients.duplicate_recipient_column_headers == OrderedSet([
'phone number', 'phone_number'
])
assert recipients.has_errors
@pytest.mark.parametrize('column_name', (
"phone_number", "phonenumber", "phone number", "phone-number", 'p h o n e n u m b e r'
))
def test_multiple_sms_recipient_columns_with_missing_data(column_name):
recipients = RecipientCSV(
"""
names, phone number, {}
"Joanna and Steve", 07900 900111
""".format(column_name),
template=_sample_template('sms'),
allow_international_sms=True,
)
expected_column_headers = ['names', 'phone number']
if column_name != "phone number":
expected_column_headers.append(column_name)
assert recipients.column_headers == expected_column_headers
assert recipients.column_headers_as_column_keys == dict(phonenumber='', names='').keys()
# A piece of weirdness uncovered: since rows are created before spaces in column names are normalised, when
# there are duplicate recipient columns and there is data for only one of the columns, if the columns have the same
# spacing, phone number data will be a list of this one phone number and None, while if the spacing style differs
# between two duplicate column names, the phone number data will be None. If there are no duplicate columns
# then our code finds the phone number well regardless of the spacing, so this should not affect our users.
phone_number_data = None
if column_name == "phone number":
phone_number_data = ['07900 900111', None]
assert recipients.rows[0]['phonenumber'].data == phone_number_data
assert recipients.rows[0].get('phone number').error is None
expected_duplicated_columns = ['phone number']
if column_name != "phone number":
expected_duplicated_columns.append(column_name)
assert recipients.duplicate_recipient_column_headers == OrderedSet(expected_duplicated_columns)
assert recipients.has_errors
def test_multiple_email_recipient_columns():
recipients = RecipientCSV(
"""
EMAILADDRESS, email_address, foo
one@two.com, two@three.com, bar
""",
template=_sample_template('email'),
)
assert recipients.rows[0].get('email address').data == (
'two@three.com'
)
assert recipients.rows[0].get('email address').error is None
assert recipients.has_errors
assert recipients.duplicate_recipient_column_headers == OrderedSet([
'EMAILADDRESS', 'email_address'
])
assert recipients.has_errors
def test_multiple_letter_recipient_columns():
recipients = RecipientCSV(
"""
address line 1, Address Line 2, address line 1, address_line_2
1,2,3,4
""",
template=_sample_template('letter'),
)
assert recipients.rows[0].get('addressline1').data == (
'3'
)
assert recipients.rows[0].get('addressline1').error is None
assert recipients.has_errors
assert recipients.duplicate_recipient_column_headers == OrderedSet([
'address line 1', 'Address Line 2', 'address line 1', 'address_line_2'
])
assert recipients.has_errors
def test_displayed_rows_when_some_rows_have_errors():
recipients = RecipientCSV(
"""
email address, name
a@b.com,
a@b.com,
a@b.com, My Name
a@b.com,
a@b.com,
""",
template=_sample_template('email', '((name))'),
max_errors_shown=3
)
assert len(list(recipients.displayed_rows)) == 3
def test_displayed_rows_when_there_are_no_rows_with_errors():
recipients = RecipientCSV(
"""
email address, name
a@b.com, My Name
a@b.com, My Name
a@b.com, My Name
a@b.com, My Name
""",
template=_sample_template('email', '((name))'),
max_errors_shown=3
)
assert len(list(recipients.displayed_rows)) == 4
def test_multi_line_placeholders_work():
recipients = RecipientCSV(
"""
email address, data
a@b.com, "a\nb\n\nc"
""",
template=_sample_template('email', '((data))'),
)
assert recipients.rows[0].personalisation['data'] == 'a\nb\n\nc'
@pytest.mark.parametrize('extra_args, expected_errors, expected_bad_rows', (
({}, True, {0}),
({'allow_international_letters': False}, True, {0}),
({'allow_international_letters': True}, False, set()),
))
def test_accepts_international_addresses_when_allowed(
extra_args, expected_errors, expected_bad_rows
):
recipients = RecipientCSV(
"""
address line 1, address line 2, address line 3
First Lastname, 123 Example St, Fiji
First Lastname, 123 Example St, SW1A 1AA
""",
template=_sample_template('letter'),
**extra_args
)
assert recipients.has_errors is expected_errors
assert _index_rows(recipients.rows_with_bad_recipients) == expected_bad_rows
# Prove that the error isn’t because the given country is unknown
assert recipients[0].as_postal_address.country == Country('Fiji')
def test_address_validation_speed():
# We should be able to validate 1000 lines of address data in about
# a second – if it starts to get slow, something is inefficient
number_of_lines = 1000
uk_addresses_with_valid_postcodes = '\n'.join((
'{n} Example Street, London, {a}{b} {c}{d}{e}'.format(
n=randrange(1000),
a=choice(['n', 'e', 'sw', 'se', 'w']),
b=choice(range(1, 10)),
c=choice(range(1, 10)),
d=choice('ABDefgHJLNPqrstUWxyZ'),
e=choice('ABDefgHJLNPqrstUWxyZ'),
)
for i in range(number_of_lines)
))
recipients = RecipientCSV(
'address line 1, address line 2, address line 3\n' + (
uk_addresses_with_valid_postcodes
),
template=_sample_template('letter'),
allow_international_letters=False,
)
for row in recipients:
assert not row.has_bad_postal_address
def test_email_validation_speed():
email_addresses = '\n'.join((
'{a}{b}@example-{n}.com,Example,Thursday'.format(
n=randrange(1000),
a=choice(string.ascii_letters),
b=choice(string.ascii_letters),
)
for i in range(1000)
))
recipients = RecipientCSV(
'email address,name,day\n' + email_addresses,
template=_sample_template(
'email',
content=f'''
hello ((name)) today is ((day))
here’s the letter ‘a’ 1000 times:
{'a' * 1000}
'''
),
)
for row in recipients:
assert not row.has_error
@pytest.mark.parametrize('should_validate', [True, False])
def test_recipient_csv_checks_should_validate_flag(should_validate):
template = _sample_template('sms')
template.is_message_empty = Mock(return_value=False)
recipients = RecipientCSV(
"""phone number,name
07700900460, test1
+447700 900 460,test2""",
template=template,
should_validate=should_validate
)
recipients._get_error_for_field = Mock(return_value=None)
list(recipients.get_rows())
assert template.is_message_empty.called is should_validate
assert recipients._get_error_for_field.called is should_validate
|
alphagov/notifications-utils
|
tests/test_recipient_csv.py
|
Python
|
mit
| 40,659
|
#!/usr/bin/env python
# This file is part of OMG-tools.
#
# OMG-tools -- Optimal Motion Generation-tools
# Copyright (C) 2016 Ruben Van Parys & Tim Mercy, KU Leuven.
# All rights reserved.
#
# OMG-tools is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import roslib; roslib.load_manifest('p3dx_motionplanner')
import omgtools as omg
import rospy
from geometry_msgs.msg import Twist, Pose
from nav_msgs.msg import Odometry
from gazebo_msgs.msg import ModelState, ModelStates
from gazebo_msgs.srv import SetModelState
import numpy as np
import tf
from std_msgs.msg import Bool
from p3dx_motionplanner.msg import Trigger, FleetTrajectories, P3DXPose, Obstacle, Room, Settings
class Controller(object):
_cmd_twist = Twist()
_trigger = Trigger()
def __init__(self, sample_time, update_time, n_robots, obst_traj=[]):
rospy.init_node('p3dx_controller')
self._sample_time = sample_time
self._update_time = update_time
self._mp_status = False
self._n_robots = n_robots
self._obst_traj = obst_traj
self._robobst = list(obst_traj.keys())
self._robot_est_pose = [[0., 0., 0.] for k in range(n_robots)]
self._robot_real_pose = [[0., 0., 0.] for k in range(n_robots)]
self._robobst_est_pose = [[0., 0.] for k in range(len(self._robobst))]
self._robobst_est_velocity = [[0., 0.] for k in range(len(self._robobst))]
self._vel_traj = [{'v': [], 'w': []} for k in range(n_robots)]
self._vel_traj_applied = [{'v': [], 'w': []} for k in range(n_robots)]
self._cmd_vel_topic = [rospy.Publisher('robot'+str(k)+'/p3dx/cmd_vel', Twist, queue_size=1) for k in range(n_robots)]
self._cmd_vel_robobst_topic = [rospy.Publisher('robobst'+str(l)+'/p3dx/cmd_vel', Twist, queue_size=1) for l in range(len(self._robobst))]
self._mp_trigger_topic = rospy.Publisher('mp_trigger', Trigger, queue_size=1)
self._mp_configure_topic = rospy.Publisher('mp_configure', Settings, queue_size=1)
rospy.Subscriber('mp_result', FleetTrajectories, self.get_mp_result)
rospy.Subscriber('mp_feedback', Bool, self.get_mp_feedback)
# for k in range(n_robots):
# rospy.Subscriber('robot'+str(k)+'/p3dx/base_pose_ground_truth', Odometry, callback=self.get_est_pose, callback_args=k)
rospy.Subscriber('/gazebo/model_states', ModelStates, callback=self.get_model_states)
# for l in range(len(self._robobst)):
# rospy.Subscriber('robobst'+str(l)+'/p3dx/base_pose_ground_truth', Odometry, callback=self.get_est_pose_robobst, callback_args=l)
def get_model_states(self, data):
for k in range(self._n_robots):
index = data.name.index('p3dx'+str(k))
self._robot_est_pose[k][0] = data.pose[index].position.x
self._robot_est_pose[k][1] = data.pose[index].position.y
qt = data.pose[index].orientation
r, p, y = tf.transformations.euler_from_quaternion([qt.x, qt.y, qt.z, qt.w])
self._robot_est_pose[k][2] = y
for k in range(len(self._robobst)):
index = data.name.index('p3dx_obs'+str(k))
self._robobst_est_pose[k] = [data.pose[index].position.x, data.pose[index].position.y]
self._robobst_est_velocity[k] = [data.twist[index].linear.x, data.twist[index].linear.y]
# def get_est_pose(self, data, k):
# self._robot_est_pose[k][0] = data.pose.pose.position.x
# self._robot_est_pose[k][1] = data.pose.pose.position.y
# qt = data.pose.pose.orientation
# r, p, y = tf.transformations.euler_from_quaternion([qt.x, qt.y, qt.z, qt.w])
# self._robot_est_pose[k][2] = y
# def get_est_pose_robobst(self, data, k):
# self._robobst_est_pose[k][0] = data.pose.pose.position.x
# self._robobst_est_pose[k][1] = data.pose.pose.position.y
# self._robobst_est_velocity[k][0] = data.twist.twist.linear.x
# self._robobst_est_velocity[k][1] = data.twist.twist.linear.y
# def get_real_pose(self, data, k):
# self._robot_real_pose[k][0] = data.pose[1].position.x
# self._robot_real_pose[k][1] = data.pose[1].position.y
# qt = data.pose[1].orientation
# r, p, y = tf.transformations.euler_from_quaternion([qt.x, qt.y, qt.z, qt.w])
# self._robot_real_pose[k][2] = y
def get_mp_feedback(self, data):
self._mp_status = data
def get_mp_result(self, data):
print('got result!')
v_traj = [data.trajectories[k].v_traj for k in range(self._n_robots)]
w_traj = [data.trajectories[k].w_traj for k in range(self._n_robots)]
self.store_trajectories(v_traj, w_traj)
def update(self):
pose0 = [self._robot_est_pose[k][:] for k in range(self._n_robots)]
if self._init:
if not self._new_trajectories:
return
self._index = int(self._update_time/self._sample_time)
self._init = False
if self._index >= int(self._update_time/self._sample_time):
if self._new_trajectories:
# load fresh trajectories
self.load_trajectories()
self._new_trajectories = False
self._time += self._index*self._sample_time
self._index = 0
# trigger motion planner
self.fire_motionplanner(self._time, pose0)
else:
print('overtime!')
# send velocity sample
for k in range(self._n_robots):
self._cmd_twist.linear.x = self._vel_traj[k]['v'][self._index]
self._cmd_twist.angular.z = self._vel_traj[k]['w'][self._index]
self._cmd_vel_topic[k].publish(self._cmd_twist)
self._vel_traj_applied[k]['v'].append(self._cmd_twist.linear.x)
self._vel_traj_applied[k]['w'].append(self._cmd_twist.angular.z)
for l, k in enumerate(self._robobst):
if (self._time) >= self._obst_traj[k]['t']:
cmd_twist = Twist()
cmd_twist.linear.x = self._obst_traj[k]['v']
cmd_twist.angular.z = self._obst_traj[k]['w']
self._cmd_vel_robobst_topic[l].publish(cmd_twist)
self._index += 1
def load_trajectories(self):
for k in range(self._n_robots):
self._vel_traj[k]['v'] = self._vel_traj_strg[k]['v'][:]
self._vel_traj[k]['w'] = self._vel_traj_strg[k]['w'][:]
def store_trajectories(self, v_traj, w_traj):
self._vel_traj_strg = [{} for _ in range(self._n_robots)]
for k in range(self._n_robots):
self._vel_traj_strg[k] = {'v': v_traj[k], 'w': w_traj[k]}
self._new_trajectories = True
def proceed(self):
if len(self._vel_traj_applied[0]['v']) == 0:
return True
stop = True
for k in range(self._n_robots):
pos_nrm = np.linalg.norm(np.array(self._robot_est_pose[k]) - np.array(self._goal[k].pose))
vel_nrm = np.linalg.norm([self._vel_traj_applied[k]['v'][-1], self._vel_traj_applied[k]['w'][-1]])
stop *= (pos_nrm < 0.1 and vel_nrm < 0.1)
return not stop
def set_goal(self, goal):
self._goal = goal
self._time = 0.
pose0 = self._robot_est_pose[:]
self._new_trajectories = False
self.fire_motionplanner(self._time, pose0)
self._init = True
def fire_motionplanner(self, time, pose0):
print('firing!')
self._trigger.goal = self._goal
self._trigger.state = [P3DXPose(pose0[k][:]) for k in range(self._n_robots)]
self._trigger.obstacles = [Obstacle(pose=self._robobst_est_pose[k], velocity=self._robobst_est_velocity[k]) for k in range(len(self._robobst))]
self._trigger.current_time = time
self._mp_trigger_topic.publish(self._trigger)
def start(self):
rate = rospy.Rate(1./self._sample_time)
proceed = True
while (not self._mp_status):
rate.sleep()
print('controller started!')
self.set_goal(self._settings.terminal_pose)
k = 0
while (proceed):
k += 1
controller.update()
proceed = controller.proceed()
rate.sleep()
for k in range(self._n_robots):
self._cmd_twist.linear.x = 0.
self._cmd_twist.angular.z = 0.
self._cmd_vel_topic[k].publish(self._cmd_twist)
print('target reached!')
def init_gazebo(self, st):
rospy.set_param('gazebo/use_sim_time', True)
try:
ssm = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
except rospy.ServiceException as e:
print('Service call failed: %s' % (e))
for k in range(self._n_robots):
pose0, twist0 = Pose(), Twist()
pose0.position.x = st.init_pose[k].pose[0]
pose0.position.y = st.init_pose[k].pose[1]
x, y, z, w = tf.transformations.quaternion_from_euler(0, 0, st.init_pose[k].pose[2])
pose0.orientation.x = x
pose0.orientation.y = y
pose0.orientation.z = z
pose0.orientation.w = w
twist0.linear.x = 0.
twist0.angular.z = 0.
mod0 = ModelState('p3dx'+str(k), pose0, twist0, 'world')
ssm(mod0)
for l, k in enumerate(st.robobst):
pose0, twist0 = Pose(), Twist()
pose0.position.x = st.obstacles[k].pose[0]
pose0.position.y = st.obstacles[k].pose[1]
x, y, z, w = tf.transformations.quaternion_from_euler(0, 0, st.obstacles[k].pose[2])
pose0.orientation.x = x
pose0.orientation.y = y
pose0.orientation.z = z
pose0.orientation.w = w
twist0.linear.x = 0.
twist0.angular.z = 0.
mod0 = ModelState('p3dx_obs'+str(l), pose0, twist0, 'world')
ssm(mod0)
def configure(self):
print('configure controller')
st = Settings()
# timing
st.sample_time = self._sample_time
st.update_time = self._update_time
# robots
configuration = omg.RegularPolyhedron(0.5, self._n_robots, np.pi).vertices.T
configurationT = omg.RegularPolyhedron(0.5, self._n_robots, np.pi/2.).vertices.T
init_ctr = [-3.5, -1.]
terminal_ctr = [3.5, 1.]
st.fleet_config = [P3DXPose(pose=[c[0], c[1], 0.]) for c in configuration]
st.init_pose = [P3DXPose(pose=[init_ctr[0]+c[0], init_ctr[1]+c[1], np.pi/2.]) for c in configuration]
st.terminal_pose = [P3DXPose(pose=[terminal_ctr[0]+c[0], terminal_ctr[1]+c[1], 0.]) for c in configurationT]
# environment
st.room = Room(position=[0., 0.], shape=[10., 5.])
obstacles = []
obstacles.append(Obstacle(pose=[-2., -2.3, np.pi/2.], shape=[4., 0.1]))
obstacles.append(Obstacle(pose=[0.5, -1.5, np.pi/2.], shape=[0.35]))
st.obstacles = obstacles
st.robobst = self._robobst
# set motionplanner
self._mp_configure_topic.publish(st)
# init gazebo
self.init_gazebo(st)
self._settings = st
obst_traj = {1: {'t': 10., 'v': 0.3, 'w': 0.}}
if __name__ == '__main__':
sample_time = 0.01
update_time = 0.5
controller = Controller(sample_time, update_time, n_robots=3, obst_traj=obst_traj)
rospy.sleep(0.5)
controller.configure()
controller.start()
|
meco-group/omg-tools
|
examples/ros_example/src/p3dx_motionplanner/src/controller.py
|
Python
|
lgpl-3.0
| 12,112
|
# -----------------------------------------------------------
# basic implementation of a queue for multiprocessing
#o
# (C) 2015-2017 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
# define basic module
import multiprocessing
from time import sleep
# define worker function
def calculate(processName, tasks, results):
# output: evaluation started
print("[%s] evaluation routine starts" % processName)
while True:
newValue = tasks.get()
if newValue < 0:
# output: evaluation is finished
print("[%s] evaluation routine quits" % processName)
# indicate: finished
results.put(-1)
break
else:
# compute result
compute = newValue * newValue
sleep(0.02*newValue)
# output received value, and calculation result
print("[%s] received value: %i" % (processName, newValue))
print("[%s] calculated value: %i" % (processName, compute))
# add result to the queue
results.put(compute)
return
if __name__ == "__main__":
# define ipc manager
manager = multiprocessing.Manager()
# define queue for tasks, and for the computation results
tasks = manager.Queue()
results = manager.Queue()
# define process pool with four processes
numberOfProcesses = 4
pool = multiprocessing.Pool(processes=numberOfProcesses)
processes = []
# initiate the worker processes
for i in range(numberOfProcesses):
# set process name
processName = "P%i" % i
# create process, and connect to function, and task queue
newProcess = multiprocessing.Process(target=calculate, args=(processName,tasks,results))
# add new process to the list of processes
processes.append(newProcess)
# start the process
newProcess.start()
# fill task queue
taskList = [43, 1, 780, 256, 142, 68, 183, 334, 325, 3]
for singleTask in taskList:
tasks.put(singleTask)
# meanwhile, do sth. else ...
sleep(5)
# quit the worker processes
for i in range(numberOfProcesses):
tasks.put(-1)
# read calculation results
processesThatFinished = 0
while True:
# read result
newResult = results.get()
# have a look at the results
if newResult == -1:
# one process has finished
processesThatFinished += 1
if processesThatFinished == numberOfProcesses:
break
else:
# output result
print("result:", newResult)
|
hofmannedv/training-python
|
queue/queue-multiprocessing.py
|
Python
|
gpl-2.0
| 2,388
|
"""empty message
Revision ID: 40f48d69b68
Revises: 1265912a75
Create Date: 2016-03-30 14:12:17.280281
"""
# revision identifiers, used by Alembic.
revision = '40f48d69b68'
down_revision = '1265912a75'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('gene', sa.Column('reference_assembly_id', sa.Integer(), nullable=True))
op.create_unique_constraint('gene_within_assembly_unique', 'gene', ['name', 'reference_assembly_id'])
op.drop_constraint('gene_reference_assemlby_id_fkey', 'gene', type_='foreignkey')
op.create_foreign_key(None, 'gene', 'reference_assembly', ['reference_assembly_id'], ['id'])
op.drop_column('gene', 'reference_assemlby_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('gene', sa.Column('reference_assemlby_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'gene', type_='foreignkey')
op.create_foreign_key('gene_reference_assemlby_id_fkey', 'gene', 'reference_assembly', ['reference_assemlby_id'], ['id'])
op.drop_constraint('gene_within_assembly_unique', 'gene', type_='unique')
op.drop_column('gene', 'reference_assembly_id')
### end Alembic commands ###
|
EnvGen/BARM_web_server
|
migrations/versions/40f48d69b68_.py
|
Python
|
gpl-2.0
| 1,339
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq bind client --cluster`."""
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import Cluster, Service, ServiceInstance
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.services import Chooser, ChooserCache
from aquilon.worker.dbwrappers.change_management import ChangeManagement
class CommandBindClientCluster(BrokerCommand):
requires_plenaries = True
required_parameters = ["cluster", "service"]
def render(self, session, logger, plenaries, cluster, service, instance, user,
justification, reason, force=False, **arguments):
dbcluster = Cluster.get_unique(session, cluster, compel=True)
# Validate ChangeManagement
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
cm.consider(dbcluster)
cm.validate()
dbservice = Service.get_unique(session, service, compel=True)
if instance:
dbinstance = ServiceInstance.get_unique(session, service=dbservice,
name=instance, compel=True)
else:
dbinstance = None
chooser_cache = ChooserCache()
failed = []
# FIXME: this logic should be in the chooser
for dbobj in dbcluster.all_objects():
# Always add the binding on the cluster we were called on
if dbobj == dbcluster or dbservice in dbobj.required_services:
chooser = Chooser(dbobj, plenaries, logger=logger,
required_only=False, cache=chooser_cache)
try:
chooser.set_single(dbservice, dbinstance, force=force)
except ArgumentError as err:
failed.append(str(err))
if failed:
raise ArgumentError("The following objects failed service "
"binding:\n%s" % "\n".join(failed))
session.flush()
plenaries.flatten()
plenaries.write()
return
|
quattor/aquilon
|
lib/aquilon/worker/commands/bind_client_cluster.py
|
Python
|
apache-2.0
| 2,797
|
# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <me@bramschoenmakers.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A list of todo items.
"""
from datetime import date
import re
from six import text_type
from topydo.lib.Config import config
from topydo.lib import Filter
from topydo.lib.HashListValues import hash_list_values
from topydo.lib.PrettyPrinter import PrettyPrinter
from topydo.lib.Todo import Todo
from topydo.lib.View import View
class InvalidTodoException(Exception):
pass
class TodoListBase(object):
"""
Provides operations for a todo list, such as adding items, removing them,
etc.
The list is usually a complete list found in the program's input (e.g. a
todo.txt file), not an arbitrary set of todo items.
"""
def __init__(self, p_todostrings):
"""
Should be given a list of strings, each element a single todo string.
The string will be parsed.
"""
self._todos = []
self._todo_id_map = {}
self._id_todo_map = {}
self.add_list(p_todostrings)
self.dirty = False
def todo(self, p_identifier):
"""
The _todos list has the same order as in the backend store (usually
a todo.txt file. The user refers to the first task as number 1, so use
index 0, etc.
Alternative ways to identify a todo is using a hashed version based on
the todo's text, or a regexp that matches the todo's source. The regexp
match is a fallback.
Returns None when the todo couldn't be found.
"""
result = None
def todo_by_uid(p_identifier):
""" Returns the todo that corresponds to the unique ID. """
result = None
if config().identifiers() == 'text':
try:
result = self._id_todo_map[p_identifier]
except KeyError:
pass # we'll try something else
return result
def todo_by_linenumber(p_identifier):
"""
Attempts to find the todo on the given line number.
When the identifier is a number but has leading zeroes, the result
will be None.
"""
result = None
if config().identifiers() != 'text':
try:
if re.match('[1-9]\d*', p_identifier):
# the expression is a string and no leading zeroes,
# treat it as an integer
raise TypeError
except TypeError:
try:
result = self._todos[int(p_identifier) - 1]
except IndexError:
raise InvalidTodoException
return result
def todo_by_regexp(p_identifier):
"""
Returns the todo that is (uniquely) identified by the given regexp.
If the regexp matches more than one item, no result is returned.
"""
result = None
candidates = Filter.GrepFilter(p_identifier).filter(self._todos)
if len(candidates) == 1:
result = candidates[0]
else:
raise InvalidTodoException
return result
result = todo_by_uid(p_identifier)
if not result:
result = todo_by_linenumber(p_identifier)
if not result:
# convert integer to text so we pass on a valid regex
result = todo_by_regexp(text_type(p_identifier))
return result
def add(self, p_src):
""" Given a todo string, parse it and put it to the end of the list. """
todos = self.add_list([p_src])
return todos[0] if len(todos) else None
def add_list(self, p_srcs):
todos = [Todo(src) for src in p_srcs if re.search(r'\S', src)]
self.add_todos(todos)
return todos
def add_todo(self, p_todo):
""" Add an Todo object to the list. """
self.add_todos([p_todo])
def add_todos(self, p_todos):
for todo in p_todos:
self._todos.append(todo)
self._update_todo_ids()
self.dirty = True
def delete(self, p_todo):
""" Deletes a todo item from the list. """
try:
number = self._todos.index(p_todo)
del self._todos[number]
self._update_todo_ids()
self.dirty = True
except ValueError:
# todo item couldn't be found, ignore
pass
def erase(self):
""" Erases all todos from the list. """
self._todos = []
self.dirty = True
def count(self):
""" Returns the number of todos on this list. """
return len(self._todos)
def append(self, p_todo, p_string):
"""
Appends a text to the todo, specified by its number.
The todo will be parsed again, such that tags and projects in de
appended string are processed.
"""
if len(p_string) > 0:
new_text = p_todo.source() + ' ' + p_string
p_todo.set_source_text(new_text)
self._update_todo_ids()
self.dirty = True
def projects(self):
""" Returns a set of all projects in this list. """
result = set()
for todo in self._todos:
projects = todo.projects()
result = result.union(projects)
return result
def contexts(self):
""" Returns a set of all contexts in this list. """
result = set()
for todo in self._todos:
contexts = todo.contexts()
result = result.union(contexts)
return result
def view(self, p_sorter, p_filters):
"""
Constructs a view of the todo list.
A view is a sorted and filtered todo list, where the properties are
defined by the end user. Todos is this list should not be modified,
modifications should occur through this class.
"""
return View(p_sorter, p_filters, self)
def is_dirty(self):
return self.dirty
def set_dirty(self):
self.dirty = True
def todos(self):
return self._todos
def set_todo_completed(self, p_todo, p_completion_date=date.today()):
p_todo.set_completed(p_completion_date)
self.dirty = True
def set_priority(self, p_todo, p_priority):
if p_todo.priority() != p_priority:
p_todo.set_priority(p_priority)
self.dirty = True
def number(self, p_todo):
"""
Returns the line number or text ID of a todo (depends on the
configuration.
"""
try:
if config().identifiers() == 'text':
return self._todo_id_map[p_todo]
else:
return self._todos.index(p_todo) + 1
except (ValueError, KeyError):
raise InvalidTodoException
def _update_todo_ids(self):
# the idea is to have a hash that is independent of the position of the
# todo. Use the text (without tags) of the todo to keep the id as stable
# as possible (not influenced by priorities or due dates, etc.)
self._todo_id_map = {}
self._id_todo_map = {}
uids = hash_list_values(self._todos, lambda t: t.text())
for (todo, uid) in uids:
self._todo_id_map[todo] = uid
self._id_todo_map[uid] = todo
def print_todos(self):
"""
Returns a pretty-printed string (without colors) of the todo items in
this list.
"""
printer = PrettyPrinter()
return printer.print_list(self._todos)
|
MinchinWeb/topydo
|
topydo/lib/TodoListBase.py
|
Python
|
gpl-3.0
| 8,337
|
import sys
tests = [("testExecs/main.exe", "", {}), ]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
rvianello/rdkit
|
Code/Numerics/EigenSolvers/test_list.py
|
Python
|
bsd-3-clause
| 228
|
"""Check that raise ... from .. uses a proper exception context """
# pylint: disable=unreachable, import-error
import socket, unknown
__revision__ = 0
class ExceptionSubclass(Exception):
""" subclass """
def test():
""" docstring """
raise IndexError from 1
raise IndexError from None
raise IndexError from ZeroDivisionError
raise IndexError from object()
raise IndexError from ExceptionSubclass
raise IndexError from socket.error
raise IndexError() from None
raise IndexError() from ZeroDivisionError
raise IndexError() from ZeroDivisionError()
raise IndexError() from object()
raise IndexError() from unknown
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/pylint/test/input/func_bad_exception_context_py30.py
|
Python
|
agpl-3.0
| 669
|
# -*- coding: utf-8 -*-
u"""Suddenly.
---
layout: post
source: Reference for Writers
source_url: http://bit.ly/1E94vyD
title: suddenly
date: 2014-06-10 12:31:19
categories: writing
---
“Sudden” means quickly and without warning, but using the word “suddenly” both
slows down the action and warns your reader. Do you know what’s more effective
for creating the sense of the sudden? Just saying what happens.
When using “suddenly,” you communicate through the narrator that the action
seemed sudden. By jumping directly into the action, you allow the reader to
experience that suddenness first hand. “Suddenly” also suffers from being
nondescript, failing to communicate the nature of the action itself; providing
no sensory experience or concrete fact to hold on to. Just … suddenly.
Feel free to employ “suddenly” in situations where the suddenness is not
apparent in the action itself. For example, in “Suddenly, I don’t hate you
anymore,” the “suddenly” substantially changes the way we think about the
shift in emotional calibration.
"""
from proselint.tools import memoize, existence_check
@memoize
def check_ellipsis(text):
"""Use an ellipsis instead of three dots."""
err = "palahniuk.suddenly"
msg = u"Suddenly is nondescript, slows the action, and warns your reader."
regex = "Suddenly,"
return existence_check(text, [regex], err, msg, max_errors=3,
require_padding=False, offset=-1, ignore_case=False)
|
jstewmon/proselint
|
proselint/checks/palahniuk/suddenly.py
|
Python
|
bsd-3-clause
| 1,518
|
#!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2012 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <joseph@artefactual.com>
# @version svn: $Id$
import sys
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from executeOrRunSubProcess import executeOrRun
from restructureForCompliance import restructureBagForComplianceFileUUIDsAssigned
from databaseFunctions import insertIntoEvents
import databaseInterface
printSubProcessOutput=False
exitCode = 0
verificationCommands = []
verificationCommandsOutputs = []
def verifyBag(bag):
global exitCode
verificationCommands = [
"/usr/share/bagit/bin/bag verifyvalid \"" + bag + "\"",
"/usr/share/bagit/bin/bag checkpayloadoxum \"" + bag + "\"",
"/usr/share/bagit/bin/bag verifycomplete \"" + bag + "\"",
"/usr/share/bagit/bin/bag verifypayloadmanifests \"" + bag + "\"",
"/usr/share/bagit/bin/bag verifytagmanifests \"" + bag + "\"" ]
for command in verificationCommands:
ret = executeOrRun("command", command, printing=printSubProcessOutput)
verificationCommandsOutputs.append(ret)
exit, stdOut, stdErr = ret
if exit != 0:
print >>sys.stderr, "Failed test: ", command
print >>sys.stderr, stdErr
print >>sys.stderr
exitCode += 1
else:
print "Passed test: ", command
if __name__ == '__main__':
target = sys.argv[1]
transferUUID = sys.argv[2]
verifyBag(target)
if exitCode != 0:
print >>sys.stderr, "Failed bagit compliance. Not restructuring."
exit(exitCode)
restructureBagForComplianceFileUUIDsAssigned(target, transferUUID, "transferUUID")
for i in range(len(verificationCommands)):
print verificationCommands[i]
print verificationCommandsOutputs[i]
print
sql = "SELECT Files.fileUUID FROM Files WHERE removedTime = 0 AND Files.currentLocation LIKE '\%transferDirectory\%objects/%' AND transferUUID = '" + transferUUID + "';"
rows = databaseInterface.queryAllSQL(sql)
for row in rows:
insertIntoEvents(fileUUID=row[0], \
eventType="fixity check", \
eventDetail="Bagit - verifypayloadmanifests", \
eventOutcome="Pass")
exit(exitCode)
|
artefactual/archivematica-history
|
src/MCPClient/lib/clientScripts/verifyAndRestructureTransferBag.py
|
Python
|
agpl-3.0
| 3,088
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handler for data backup operation.
Generic datastore admin console transfers control to ConfirmBackupHandler
after selection of entities. The ConfirmBackupHandler confirms with user
his choice, enters a backup name and transfers control to
DoBackupHandler. DoBackupHandler starts backup mappers and displays confirmation
page.
This module also contains actual mapper code for backing data over.
"""
from __future__ import with_statement
import cStringIO
import datetime
import itertools
import logging
import os
import random
import re
import time
import urllib
import xml.dom.minidom
from google.appengine.datastore import entity_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import app_identity
from google.appengine.api import blobstore as blobstore_api
from google.appengine.api import capabilities
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.api import files
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api.files import records
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import backup_pb2
from google.appengine.ext.datastore_admin import config
from google.appengine.ext.datastore_admin import utils
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import datastore_range_iterators as db_iters
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import json_util
from google.appengine.ext.mapreduce import operation as op
from google.appengine.ext.mapreduce import output_writers
from google.appengine.runtime import apiproxy_errors
try:
from google.appengine.ext.datastore_admin import services_client
except ImportError:
pass
XSRF_ACTION = 'backup'
BUCKET_PATTERN = (r'^([a-zA-Z0-9]+([\-_]+[a-zA-Z0-9]+)*)'
r'(\.([a-zA-Z0-9]+([\-_]+[a-zA-Z0-9]+)*))*$')
MAX_BUCKET_LEN = 222
MIN_BUCKET_LEN = 3
MAX_BUCKET_SEGMENT_LEN = 63
NUM_KINDS_DEFERRED_THRESHOLD = 10
MAX_BLOBS_PER_DELETE = 500
TEST_WRITE_FILENAME_PREFIX = 'datastore_backup_write_test'
MAX_KEYS_LIST_SIZE = 100
MAX_TEST_FILENAME_TRIES = 10
MEANING_TO_PRIMITIVE_TYPE = {
entity_pb.Property.GD_WHEN: backup_pb2.EntitySchema.DATE_TIME,
entity_pb.Property.GD_RATING: backup_pb2.EntitySchema.RATING,
entity_pb.Property.ATOM_LINK: backup_pb2.EntitySchema.LINK,
entity_pb.Property.ATOM_CATEGORY: backup_pb2.EntitySchema.CATEGORY,
entity_pb.Property.GD_PHONENUMBER: backup_pb2.EntitySchema.PHONE_NUMBER,
entity_pb.Property.GD_POSTALADDRESS: backup_pb2.EntitySchema.POSTAL_ADDRESS,
entity_pb.Property.GD_EMAIL: backup_pb2.EntitySchema.EMAIL,
entity_pb.Property.GD_IM: backup_pb2.EntitySchema.IM_HANDLE,
entity_pb.Property.BLOBKEY: backup_pb2.EntitySchema.BLOB_KEY,
entity_pb.Property.TEXT: backup_pb2.EntitySchema.TEXT,
entity_pb.Property.BLOB: backup_pb2.EntitySchema.BLOB,
entity_pb.Property.BYTESTRING: backup_pb2.EntitySchema.SHORT_BLOB
}
class ConfirmBackupHandler(webapp.RequestHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'confirm_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
kinds = handler.request.get_all('kind')
sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds)
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
blob_warning = bool(blobstore.BlobInfo.all().count(1))
template_params = {
'run_as_a_service': handler.request.get('run_as_a_service'),
'form_target': DoBackupHandler.SUFFIX,
'kind_list': kinds,
'remainder': remainder,
'sizes_known': sizes_known,
'size_total': size_total,
'queues': None,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'namespaces': get_namespaces(handler.request.get('namespace', None)),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'blob_warning': blob_warning,
'backup_name': 'datastore_backup_%s' % time.strftime('%Y_%m_%d')
}
utils.RenderToResponse(handler, 'confirm_backup.html', template_params)
def get_namespaces(selected_namespace):
namespaces = [('--All--', '*', selected_namespace is None)]
for ns in datastore.Query('__namespace__', keys_only=True).Run():
ns_name = ns.name() or ''
namespaces.append((ns_name or '--Default--',
ns_name,
ns_name == selected_namespace))
return namespaces
class ConfirmDeleteBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to delete a backup copy."""
SUFFIX = 'confirm_delete_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
gs_warning = False
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
gs_warning |= backup.filesystem == files.GS_FILESYSTEM
template_params = {
'form_target': DoBackupDeleteHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'gs_warning': gs_warning,
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_delete_backup.html',
template_params)
class ConfirmAbortBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to abort a backup copy."""
SUFFIX = 'confirm_abort_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
template_params = {
'form_target': DoBackupAbortHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_abort_backup.html',
template_params)
class ConfirmRestoreFromBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to restore from backup."""
SUFFIX = 'confirm_restore_from_backup'
@classmethod
def Render(cls, handler, default_backup_id=None,
default_delete_backup_after_restore=False):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
default_backup_id: default value for handler.request
default_delete_backup_after_restore: default value for handler.request
"""
backup_id = handler.request.get('backup_id', default_backup_id)
backup = db.get(backup_id) if backup_id else None
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
original_app_warning = backup.original_app
if os.getenv('APPLICATION_ID') == original_app_warning:
original_app_warning = None
template_params = {
'form_target': DoBackupRestoreHandler.SUFFIX,
'queues': None,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backup': backup,
'delete_backup_after_restore': handler.request.get(
'delete_backup_after_restore', default_delete_backup_after_restore),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'original_app_warning': original_app_warning,
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_restore_from_backup.html',
template_params)
class ConfirmBackupImportHandler(webapp.RequestHandler):
"""Handler to import backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
gs_handle = handler.request.get('gs_handle')
error = None if gs_handle else 'Google Cloud Storage path is missing'
other_backup_info_files = []
selected_backup_info_file = None
backup_info_specified = False
if not error:
try:
gs_handle = gs_handle.rstrip()
bucket_name, prefix = parse_gs_handle(gs_handle)
validate_gs_bucket_name(bucket_name)
if not is_accessible_bucket_name(bucket_name):
raise BackupValidationError(
'Bucket "%s" is not accessible' % bucket_name)
if prefix.endswith('.backup_info'):
prefix = prefix[0:prefix.rfind('/')]
backup_info_specified = True
elif prefix and not prefix.endswith('/'):
prefix += '/'
for backup_info_file in list_bucket_files(bucket_name, prefix):
backup_info_path = '/gs/%s/%s' % (bucket_name, backup_info_file)
if backup_info_specified and backup_info_path == gs_handle:
selected_backup_info_file = backup_info_path
elif (backup_info_file.endswith('.backup_info')
and backup_info_file.count('.') == 1):
other_backup_info_files.append(backup_info_path)
except Exception, ex:
error = 'Failed to read bucket: %s' % ex.message
logging.exception(ex.message)
template_params = {
'error': error,
'form_target': DoBackupImportHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'selected_backup_info_file': selected_backup_info_file,
'other_backup_info_files': other_backup_info_files,
'backup_info_specified': backup_info_specified,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_backup_import.html',
template_params)
class BackupInformationHandler(webapp.RequestHandler):
"""Handler to display backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
backup_ids = handler.request.get_all('backup_id')
template_params = {
'backups': db.get(backup_ids),
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'backup_information.html', template_params)
class BaseDoHandler(webapp.RequestHandler):
"""Base class for all Do*Handlers."""
MAPREDUCE_DETAIL = config.MAPREDUCE_PATH + '/detail?mapreduce_id='
def get(self):
"""Handler for get requests to datastore_admin backup operations.
Status of executed jobs is displayed.
"""
jobs = self.request.get_all('job')
remote_job = self.request.get('remote_job')
tasks = self.request.get_all('task')
error = self.request.get('error', '')
xsrf_error = self.request.get('xsrf_error', '')
template_params = {
'job_list': jobs,
'remote_job': remote_job,
'task_list': tasks,
'mapreduce_detail': self.MAPREDUCE_DETAIL,
'error': error,
'xsrf_error': xsrf_error,
'datastore_admin_home': utils.GenerateHomeUrl(self.request),
}
utils.RenderToResponse(self, self._get_html_page, template_params)
@property
def _get_html_page(self):
"""Return the name of the HTML page for HTTP/GET requests."""
raise NotImplementedError
@property
def _get_post_html_page(self):
"""Return the name of the HTML page for HTTP/POST requests."""
raise NotImplementedError
def _ProcessPostRequest(self):
"""Process the HTTP/POST request and return the result as parametrs."""
raise NotImplementedError
def _GetBasicMapperParams(self):
namespace = self.request.get('namespace', None)
if namespace == '*':
namespace = None
return {'namespace': namespace}
def SendRedirect(self, path=None, params=()):
"""Send a redirect response."""
run_as_a_service = self.request.get('run_as_a_service')
if run_as_a_service:
params = list(params)
params.append(('run_as_a_service', True))
dest = config.BASE_PATH
if path:
dest = '%s/%s' % (dest, path)
if params:
dest = '%s?%s' % (dest, urllib.urlencode(params))
self.redirect(dest)
def post(self):
"""Handler for post requests to datastore_admin/backup.do.
Redirects to the get handler after processing the request.
"""
token = self.request.get('xsrf_token')
if not utils.ValidateXsrfToken(token, XSRF_ACTION):
parameters = [('xsrf_error', '1')]
else:
try:
parameters = self._ProcessPostRequest()
except Exception, e:
error = self._HandleException(e)
parameters = [('error', error)]
self.SendRedirect(self._get_post_html_page, parameters)
def _HandleException(self, e):
"""Make exception handling overridable by tests.
Args:
e: The exception to handle.
Returns:
The exception error string.
"""
logging.exception(e.message)
return '%s: %s' % (type(e), e.message)
class BackupValidationError(utils.Error):
"""Raised upon backup request validation."""
def _perform_backup(run_as_a_service, kinds, selected_namespace,
filesystem, gs_bucket_name, backup,
queue, mapper_params, max_jobs):
"""Triggers backup mapper jobs.
Args:
run_as_a_service: True if backup should be done via admin-jobs
kinds: a sequence of kind names
selected_namespace: The selected namespace or None for all
filesystem: files.BLOBSTORE_FILESYSTEM or files.GS_FILESYSTEM
or None to default to blobstore
gs_bucket_name: the GS file system bucket in which to store the backup
when using the GS file system, and otherwise ignored
backup: the backup name
queue: the task queue for the backup task
mapper_params: the mapper parameters
max_jobs: if backup needs more jobs than this, defer them
Returns:
The job or task ids.
Raises:
BackupValidationError: On validation error.
Exception: On other error.
"""
BACKUP_COMPLETE_HANDLER = __name__ + '.BackupCompleteHandler'
BACKUP_HANDLER = __name__ + '.BackupEntity.map'
INPUT_READER = __name__ + '.DatastoreEntityProtoInputReader'
OUTPUT_WRITER = output_writers.__name__ + '.FileRecordsOutputWriter'
if run_as_a_service:
if not gs_bucket_name:
raise BackupValidationError('Bucket name missing.')
gs_bucket_name = validate_and_canonicalize_gs_bucket(gs_bucket_name)
datastore_admin_service = services_client.DatastoreAdminClient()
description = 'Remote backup job: %s' % backup
remote_job_id = datastore_admin_service.create_backup(
description, backup, gs_bucket_name, selected_namespace, kinds)
return [('remote_job', remote_job_id)]
queue = queue or os.environ.get('HTTP_X_APPENGINE_QUEUENAME', 'default')
if queue[0] == '_':
queue = 'default'
if not filesystem:
filesystem = files.BLOBSTORE_FILESYSTEM
if filesystem == files.GS_FILESYSTEM:
if not gs_bucket_name:
raise BackupValidationError('Bucket name missing.')
gs_bucket_name = validate_and_canonicalize_gs_bucket(gs_bucket_name)
elif filesystem == files.BLOBSTORE_FILESYSTEM:
pass
else:
raise BackupValidationError('Unknown filesystem "%s".' % filesystem)
job_name = 'datastore_backup_%s_%%(kind)s' % re.sub(r'[^\w]', '_', backup)
try:
job_operation = utils.StartOperation('Backup: %s' % backup)
backup_info = BackupInformation(parent=job_operation)
backup_info.filesystem = filesystem
backup_info.name = backup
backup_info.kinds = kinds
if selected_namespace is not None:
backup_info.namespaces = [selected_namespace]
backup_info.put(force_writes=True)
mapreduce_params = {
'done_callback_handler': BACKUP_COMPLETE_HANDLER,
'backup_info_pk': str(backup_info.key()),
'force_ops_writes': True,
}
mapper_params = dict(mapper_params)
mapper_params['filesystem'] = filesystem
if filesystem == files.GS_FILESYSTEM:
mapper_params['gs_bucket_name'] = gs_bucket_name
if len(kinds) <= max_jobs:
return [('job', job) for job in _run_map_jobs(
job_operation.key(), backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER, OUTPUT_WRITER,
mapper_params, mapreduce_params, queue)]
else:
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=1)
deferred_task = deferred.defer(_run_map_jobs_deferred,
backup, job_operation.key(),
backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER,
OUTPUT_WRITER, mapper_params,
mapreduce_params, queue, _queue=queue,
_url=config.DEFERRED_PATH,
_retry_options=retry_options)
return [('task', deferred_task.name)]
except Exception:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
backup)
if backup_info:
delete_backup_info(backup_info)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class BackupLinkHandler(webapp.RequestHandler):
"""Handler to deal with requests to the backup link to backup data."""
SUFFIX = 'backup.create'
def get(self):
"""Handler for get requests to datastore_admin/backup.create."""
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup.create."""
try:
if ('X-AppEngine-TaskName' not in self.request.headers and
'X-AppEngine-Cron' not in self.request.headers):
logging.critical('Scheduled backups must be started via task queue or '
'cron.')
self.response.set_status(403)
return
backup_prefix = self.request.get('name')
if not backup_prefix:
if self.request.headers.get('X-AppEngine-Cron'):
backup_prefix = 'cron-'
else:
backup_prefix = 'link-'
backup_prefix_with_date = backup_prefix + time.strftime('%Y_%m_%d')
backup_name = backup_prefix_with_date
backup_suffix_counter = 1
while BackupInformation.name_exists(backup_name):
backup_suffix_counter += 1
backup_name = backup_prefix_with_date + '-' + str(backup_suffix_counter)
kinds = self.request.get_all('kind')
if not kinds:
self.errorResponse('Backup must include at least one kind.')
return
for kind in kinds:
if not utils.IsKindNameVisible(kind):
self.errorResponse('Invalid kind %s.' % kind)
return
namespace = self.request.get('namespace', None)
if namespace == '*':
namespace = None
mapper_params = {'namespace': namespace}
_perform_backup(self.request.get('run_as_a_service', False),
kinds,
namespace,
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup_name,
self.request.get('queue'),
mapper_params,
1000000)
except Exception, e:
self.errorResponse(e.message)
def errorResponse(self, message):
logging.error('Could not create backup via link: %s', message)
self.response.set_status(400, message)
class DatastoreEntityProtoInputReader(input_readers.RawDatastoreInputReader):
"""An input reader which yields datastore entity proto for a kind."""
_KEY_RANGE_ITER_CLS = db_iters.KeyRangeEntityProtoIterator
class DoBackupHandler(BaseDoHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'backup.do'
_get_html_page = 'do_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup mapper jobs and returns their ids."""
try:
backup = self.request.get('backup_name').strip()
if not backup:
raise BackupValidationError('Unspecified backup name.')
if BackupInformation.name_exists(backup):
raise BackupValidationError('Backup "%s" already exists.' % backup)
mapper_params = self._GetBasicMapperParams()
backup_result = _perform_backup(self.request.get('run_as_a_service',
False),
self.request.get_all('kind'),
mapper_params.get('namespace'),
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup,
self.request.get('queue'),
mapper_params,
10)
return backup_result
except Exception, e:
logging.exception(e.message)
return [('error', e.message)]
def _run_map_jobs_deferred(backup_name, job_operation_key, backup_info_key,
kinds, job_name, backup_handler, input_reader,
output_writer, mapper_params, mapreduce_params,
queue):
backup_info = BackupInformation.get(backup_info_key)
if backup_info:
try:
_run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue)
except BaseException:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
backup_name)
delete_backup_info(backup_info)
else:
logging.info('Missing backup info, can not start backup jobs for "%s"',
backup_name)
def _run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue):
"""Creates backup/restore MR jobs for the given operation.
Args:
job_operation_key: a key of utils.DatastoreAdminOperation entity.
backup_info_key: a key of BackupInformation entity.
kinds: a list of kinds to run the M/R for.
job_name: the M/R job name prefix.
backup_handler: M/R job completion handler.
input_reader: M/R input reader.
output_writer: M/R output writer.
mapper_params: custom parameters to pass to mapper.
mapreduce_params: dictionary parameters relevant to the whole job.
queue: the name of the queue that will be used by the M/R.
Returns:
Ids of all started mapper jobs as list of strings.
"""
backup_info = BackupInformation.get(backup_info_key)
if not backup_info:
return []
jobs = utils.RunMapForKinds(
job_operation_key,
kinds,
job_name,
backup_handler,
input_reader,
output_writer,
mapper_params,
mapreduce_params,
queue_name=queue)
backup_info.active_jobs = jobs
backup_info.put(force_writes=True)
return jobs
def get_backup_files(backup_info, selected_kinds=None):
"""Returns the backup filenames for selected kinds or all if None/Empty."""
if backup_info.blob_files:
return backup_info.blob_files
else:
kinds_backup_files = backup_info.get_kind_backup_files(selected_kinds)
return list(itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
def delete_backup_files(filesystem, backup_files):
if backup_files:
if filesystem == files.BLOBSTORE_FILESYSTEM:
blob_keys = []
for fname in backup_files:
blob_key = files.blobstore.get_blob_key(fname)
if blob_key:
blob_keys.append(blob_key)
if len(blob_keys) == MAX_BLOBS_PER_DELETE:
blobstore_api.delete(blob_keys)
blob_keys = []
if blob_keys:
blobstore_api.delete(blob_keys)
def delete_backup_info(backup_info, delete_files=True):
"""Deletes a backup including its associated files and other metadata."""
if backup_info.blob_files:
delete_backup_files(backup_info.filesystem, backup_info.blob_files)
backup_info.delete(force_writes=True)
else:
kinds_backup_files = tuple(backup_info.get_kind_backup_files())
if delete_files:
delete_backup_files(backup_info.filesystem, itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
db.delete(kinds_backup_files + (backup_info,), force_writes=True)
class DoBackupDeleteHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to delete backup data."""
SUFFIX = 'backup_delete.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_delete.do.
Deletes are executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
params = ()
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to delete datastore backup.')
params = [('error', e.message)]
self.SendRedirect(params=params)
class DoBackupAbortHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to abort pending backups."""
SUFFIX = 'backup_abort.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_abort.do.
Abort is executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
params = ()
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
operation = backup_info.parent()
if operation.parent_key():
job_id = str(operation.parent_key())
datastore_admin_service = services_client.DatastoreAdminClient()
datastore_admin_service.abort_backup(job_id)
else:
utils.AbortAdminOperation(operation.key())
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to abort pending datastore backup.')
params = [('error', e.message)]
self.SendRedirect(params=params)
class DoBackupRestoreHandler(BaseDoHandler):
"""Handler to restore backup data.
Deals with requests from the admin console.
"""
SUFFIX = 'backup_restore.do'
BACKUP_RESTORE_HANDLER = __name__ + '.RestoreEntity.map'
RESTORE_COMPLETE_HANDLER = __name__ + '.RestoreCompleteHandler'
INPUT_READER = input_readers.__name__ + '.RecordsReader'
_get_html_page = 'do_restore_from_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup restore mapper jobs and returns their ids."""
backup_id = self.request.get('backup_id')
if not backup_id:
return [('error', 'Unspecified Backup.')]
backup = db.get(db.Key(backup_id))
if not backup:
return [('error', 'Invalid Backup id.')]
if backup.gs_handle:
if not is_readable_gs_handle(backup.gs_handle):
return [('error', 'Backup not readable')]
kinds = set(self.request.get_all('kind'))
if not (backup.blob_files or kinds):
return [('error', 'No kinds were selected')]
backup_kinds = set(backup.kinds)
difference = kinds.difference(backup_kinds)
if difference:
return [('error', 'Backup does not have kind[s] %s' %
', '.join(difference))]
if self.request.get('run_as_a_service', False):
if backup.filesystem != files.GS_FILESYSTEM:
return [('error',
'Restore as a service is only available for GS backups')]
datastore_admin_service = services_client.DatastoreAdminClient()
description = 'Remote restore job: %s' % backup.name
remote_job_id = datastore_admin_service.restore_from_backup(
description, backup_id, list(kinds))
return [('remote_job', remote_job_id)]
queue = self.request.get('queue')
job_name = 'datastore_backup_restore_%s' % re.sub(r'[^\w]', '_',
backup.name)
job_operation = None
try:
operation_name = 'Restoring %s from backup: %s' % (
', '.join(kinds) if kinds else 'all', backup.name)
job_operation = utils.StartOperation(operation_name)
mapper_params = self._GetBasicMapperParams()
kinds = list(kinds) if len(backup_kinds) != len(kinds) else []
mapper_params['files'] = get_backup_files(backup, kinds)
mapper_params['kind_filter'] = kinds
mapper_params['original_app'] = backup.original_app
mapreduce_params = {
'backup_name': backup.name,
'force_ops_writes': True,
}
shard_count = min(max(utils.MAPREDUCE_MIN_SHARDS,
len(mapper_params['files'])),
utils.MAPREDUCE_MAX_SHARDS)
job = utils.StartMap(job_operation.key(), job_name,
self.BACKUP_RESTORE_HANDLER, self.INPUT_READER, None,
mapper_params, mapreduce_params, queue_name=queue,
shard_count=shard_count)
return [('job', job)]
except Exception:
logging.exception('Failed to start a restore from backup job "%s".',
job_name)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class DoBackupImportHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to import backup info."""
SUFFIX = 'import_backup.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/import_backup.do.
Import is executed and user is redirected to the base-path handler.
"""
gs_handle = self.request.get('gs_handle')
token = self.request.get('xsrf_token')
error = None
if gs_handle and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
bucket_name, path = parse_gs_handle(gs_handle)
file_content = get_gs_object(bucket_name, path)
entities = parse_backup_info_file(file_content)
original_backup_info = entities.next()
entity = datastore.Entity(BackupInformation.kind())
entity.update(original_backup_info)
backup_info = BackupInformation.from_entity(entity)
if original_backup_info.key().app() != os.getenv('APPLICATION_ID'):
backup_info.original_app = original_backup_info.key().app()
def tx():
backup_info.put(force_writes=True)
kind_files_models = []
for entity in entities:
kind_files = backup_info.create_kind_backup_files(
entity.key().name(), entity['files'])
kind_files_models.append(kind_files)
db.put(kind_files_models, force_writes=True)
db.run_in_transaction(tx)
backup_id = str(backup_info.key())
except Exception, e:
logging.exception('Failed to Import datastore backup information.')
error = e.message
if error:
self.SendRedirect(params=[('error', error)])
elif self.request.get('Restore'):
ConfirmRestoreFromBackupHandler.Render(
self, default_backup_id=backup_id,
default_delete_backup_after_restore=True)
else:
self.SendRedirect()
class BackupInformation(db.Model):
"""An entity to keep information on a datastore backup."""
name = db.StringProperty()
kinds = db.StringListProperty()
namespaces = db.StringListProperty()
filesystem = db.StringProperty(default=files.BLOBSTORE_FILESYSTEM)
start_time = db.DateTimeProperty(auto_now_add=True)
active_jobs = db.StringListProperty()
completed_jobs = db.StringListProperty()
complete_time = db.DateTimeProperty(default=None)
blob_files = db.StringListProperty()
original_app = db.StringProperty(default=None)
gs_handle = db.TextProperty(default=None)
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND
@classmethod
def name_exists(cls, backup_name):
query = BackupInformation.all(keys_only=True)
query.filter('name =', backup_name)
return query.get() is not None
def create_kind_backup_files_key(self, kind):
return db.Key.from_path(KindBackupFiles.kind(), kind, parent=self.key())
def create_kind_backup_files(self, kind, kind_files):
return KindBackupFiles(key=self.create_kind_backup_files_key(kind),
files=kind_files)
def get_kind_backup_files(self, kinds=None):
if kinds:
return db.get([self.create_kind_backup_files_key(kind) for kind in kinds])
else:
return KindBackupFiles.all().ancestor(self).run()
class KindBackupFiles(db.Model):
"""An entity to keep files information per kind for a backup.
A key for this model should created using kind as a name and the associated
BackupInformation as a parent.
"""
files = db.StringListProperty(indexed=False)
@property
def backup_kind(self):
return self.key().name()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_FILES_KIND
def BackupCompleteHandler(operation, job_id, mapreduce_state):
"""Updates BackupInformation record for a completed mapper job."""
mapreduce_spec = mapreduce_state.mapreduce_spec
filenames = mapreduce_spec.mapper.output_writer_class().get_filenames(
mapreduce_state)
_perform_backup_complete(operation,
job_id,
mapreduce_spec.mapper.params['entity_kind'],
mapreduce_spec.params['backup_info_pk'],
mapreduce_spec.mapper.params.get('gs_bucket_name'),
filenames,
mapreduce_spec.params.get('done_callback_queue'))
@db.transactional
def _perform_backup_complete(
operation, job_id, kind, backup_info_pk, gs_bucket_name, filenames, queue):
backup_info = BackupInformation.get(backup_info_pk)
if backup_info:
if job_id in backup_info.active_jobs:
backup_info.active_jobs.remove(job_id)
backup_info.completed_jobs = list(
set(backup_info.completed_jobs + [job_id]))
if backup_info.filesystem == files.BLOBSTORE_FILESYSTEM:
filenames = drop_empty_files(filenames)
kind_backup_files = backup_info.get_kind_backup_files([kind])[0]
if kind_backup_files:
kind_backup_files.files = list(set(kind_backup_files.files + filenames))
else:
kind_backup_files = backup_info.create_kind_backup_files(kind, filenames)
db.put((backup_info, kind_backup_files), force_writes=True)
if operation.status == utils.DatastoreAdminOperation.STATUS_COMPLETED:
deferred.defer(finalize_backup_info, backup_info.key(),
gs_bucket_name,
_url=config.DEFERRED_PATH,
_queue=queue,
_transactional=True)
else:
logging.warn('BackupInfo was not found for %s', backup_info_pk)
def finalize_backup_info(backup_info_pk, gs_bucket):
"""Finalize the state of BackupInformation and creates info file for GS."""
def get_backup_info():
return BackupInformation.get(backup_info_pk)
backup_info = db.run_in_transaction(get_backup_info)
if backup_info:
complete_time = datetime.datetime.now()
backup_info.complete_time = complete_time
gs_handle = None
if backup_info.filesystem == files.GS_FILESYSTEM:
gs_handle = BackupInfoWriter(gs_bucket).write(backup_info)[0]
def set_backup_info_with_finalize_info():
backup_info = get_backup_info()
backup_info.complete_time = complete_time
backup_info.gs_handle = gs_handle
backup_info.put(force_writes=True)
db.run_in_transaction(set_backup_info_with_finalize_info)
logging.info('Backup %s completed', backup_info.name)
else:
logging.warn('Backup %s could not be found', backup_info_pk)
def parse_backup_info_file(content):
"""Returns entities iterator from a backup_info file content."""
reader = records.RecordsReader(cStringIO.StringIO(content))
version = reader.read()
if version != '1':
raise IOError('Unsupported version')
return (datastore.Entity.FromPb(record) for record in reader)
@db.non_transactional
def drop_empty_files(filenames):
"""Deletes empty files and returns filenames minus the deleted ones."""
non_empty_filenames = []
empty_file_keys = []
blobs_info = blobstore.BlobInfo.get(
[files.blobstore.get_blob_key(fn) for fn in filenames])
for filename, blob_info in itertools.izip(filenames, blobs_info):
if blob_info:
if blob_info.size > 0:
non_empty_filenames.append(filename)
else:
empty_file_keys.append(blob_info.key())
blobstore_api.delete(empty_file_keys)
return non_empty_filenames
class BackupInfoWriter(object):
"""A class for writing Datastore backup metadata files."""
def __init__(self, gs_bucket):
"""Construct a BackupInfoWriter.
Args:
gs_bucket: Required string for the target GS bucket.
"""
self.__gs_bucket = gs_bucket
def write(self, backup_info):
"""Write the metadata files for the given backup_info.
As a side effect, updates the backup_info in-memory entity object with the
gs_handle to the Backup info filename. This is not saved to the datastore.
Args:
backup_info: Required BackupInformation.
Returns:
A list with Backup info filename followed by Kind info filenames.
"""
fn = self._write_backup_info(backup_info)
return [fn] + self._write_kind_info(backup_info)
def _generate_filename(self, backup_info, suffix):
key_str = str(backup_info.key()).replace('/', '_')
return '/gs/%s/%s%s' % (self.__gs_bucket, key_str, suffix)
def _write_backup_info(self, backup_info):
"""Writes a backup_info_file.
Args:
backup_info: Required BackupInformation.
Returns:
Backup info filename.
"""
filename = self._generate_filename(backup_info, '.backup_info')
backup_info.gs_handle = filename
info_file = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
with records.RecordsWriter(info_file) as writer:
writer.write('1')
writer.write(db.model_to_protobuf(backup_info).SerializeToString())
for kind_files in backup_info.get_kind_backup_files():
writer.write(db.model_to_protobuf(kind_files).SerializeToString())
finally:
info_file.close(finalize=True)
return filename
def _write_kind_info(self, backup_info):
"""Writes type information schema for each kind in backup_info.
Args:
backup_info: Required BackupInformation.
Returns:
A list with all created filenames.
"""
def get_backup_files_tx():
kind_backup_files_list = []
for kind_backup_files in backup_info.get_kind_backup_files():
kind_backup_files_list.append(kind_backup_files)
return kind_backup_files_list
kind_backup_files_list = db.run_in_transaction(get_backup_files_tx)
filenames = []
for kind_backup_files in kind_backup_files_list:
backup = self._create_kind_backup(backup_info, kind_backup_files)
filename = self._generate_filename(
backup_info, '.%s.backup_info' % kind_backup_files.backup_kind)
self._write_kind_backup_info_file(filename, backup)
filenames.append(filename)
return filenames
def _create_kind_backup(self, backup_info, kind_backup_files):
"""Creates and populate a backup_pb2.Backup."""
backup = backup_pb2.Backup()
backup.backup_info.backup_name = backup_info.name
backup.backup_info.start_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.start_time)
backup.backup_info.end_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.complete_time)
kind = kind_backup_files.backup_kind
kind_info = backup.kind_info.add()
kind_info.kind = kind
kind_info.entity_schema.kind = kind
kind_info.file.extend(kind_backup_files.files)
entity_type_info = EntityTypeInfo(kind=kind)
for sharded_aggregation in SchemaAggregationResult.load(
backup_info.key(), kind):
if sharded_aggregation.is_partial:
kind_info.is_partial = True
if sharded_aggregation.entity_type_info:
entity_type_info.merge(sharded_aggregation.entity_type_info)
entity_type_info.populate_entity_schema(kind_info.entity_schema)
return backup
@classmethod
def _write_kind_backup_info_file(cls, filename, backup):
"""Writes a kind backup_info.
Args:
filename: The name of the file to be created as string.
backup: apphosting.ext.datastore_admin.Backup proto.
"""
f = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
f.write(backup.SerializeToString())
finally:
f.close(finalize=True)
class PropertyTypeInfo(json_util.JsonMixin):
"""Type information for an entity property."""
def __init__(self, name, is_repeated=False, primitive_types=None,
embedded_entities=None):
"""Construct a PropertyTypeInfo instance.
Args:
name: The name of the property as a string.
is_repeated: A boolean that indicates if the property is repeated.
primitive_types: Optional list of PrimitiveType integer values.
embedded_entities: Optional list of EntityTypeInfo.
"""
self.__name = name
self.__is_repeated = is_repeated
self.__primitive_types = set(primitive_types) if primitive_types else set()
self.__embedded_entities = {}
for entity in embedded_entities or ():
if entity.kind in self.__embedded_entities:
self.__embedded_entities[entity.kind].merge(entity)
else:
self.__embedded_entities[entity.kind] = entity
@property
def name(self):
return self.__name
@property
def is_repeated(self):
return self.__is_repeated
@property
def primitive_types(self):
return self.__primitive_types
def embedded_entities_kind_iter(self):
return self.__embedded_entities.iterkeys()
def get_embedded_entity(self, kind):
return self.__embedded_entities.get(kind)
def merge(self, other):
"""Merge a PropertyTypeInfo with this instance.
Args:
other: Required PropertyTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if property names do not match.
TypeError: if other is not instance of PropertyTypeInfo.
"""
if not isinstance(other, PropertyTypeInfo):
raise TypeError('Expected PropertyTypeInfo, was %r' % (other,))
if other.__name != self.__name:
raise ValueError('Property names mismatch (%s, %s)' %
(self.__name, other.__name))
changed = False
if other.__is_repeated and not self.__is_repeated:
self.__is_repeated = True
changed = True
if not other.__primitive_types.issubset(self.__primitive_types):
self.__primitive_types = self.__primitive_types.union(
other.__primitive_types)
changed = True
for kind, other_embedded_entity in other.__embedded_entities.iteritems():
embedded_entity = self.__embedded_entities.get(kind)
if embedded_entity:
changed = embedded_entity.merge(other_embedded_entity) or changed
else:
self.__embedded_entities[kind] = other_embedded_entity
changed = True
return changed
def populate_entity_schema_field(self, entity_schema):
"""Add an populate a Field to the given entity_schema.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if not (self.__primitive_types or self.__embedded_entities):
return
field = entity_schema.field.add()
field.name = self.__name
field_type = field.type.add()
field_type.is_list = self.__is_repeated
field_type.primitive_type.extend(self.__primitive_types)
for embedded_entity in self.__embedded_entities.itervalues():
embedded_entity_schema = field_type.embedded_schema.add()
embedded_entity.populate_entity_schema(embedded_entity_schema)
def to_json(self):
json = dict()
json['name'] = self.__name
json['is_repeated'] = self.__is_repeated
json['primitive_types'] = list(self.__primitive_types)
json['embedded_entities'] = [e.to_json() for e in
self.__embedded_entities.itervalues()]
return json
@classmethod
def from_json(cls, json):
return cls(json['name'], json['is_repeated'], json.get('primitive_types'),
[EntityTypeInfo.from_json(entity_json) for entity_json
in json.get('embedded_entities')])
class EntityTypeInfo(json_util.JsonMixin):
"""Type information for an entity."""
def __init__(self, kind=None, properties=None):
"""Construct an EntityTypeInfo instance.
Args:
kind: An optional kind name as string.
properties: An optional list of PropertyTypeInfo.
"""
self.__kind = kind
self.__properties = {}
for property_type_info in properties or ():
if property_type_info.name in self.__properties:
self.__properties[property_type_info.name].merge(property_type_info)
else:
self.__properties[property_type_info.name] = property_type_info
@property
def kind(self):
return self.__kind
def properties_name_iter(self):
return self.__properties.iterkeys()
def get_property(self, name):
return self.__properties.get(name)
def merge(self, other):
"""Merge an EntityTypeInfo with this instance.
Args:
other: Required EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if kinds do not match.
TypeError: if other is not instance of EntityTypeInfo.
"""
if not isinstance(other, EntityTypeInfo):
raise TypeError('Expected EntityTypeInfo, was %r' % (other,))
if other.__kind != self.__kind:
raise ValueError('Kinds mismatch (%s, %s)' % (self.__kind, other.__kind))
changed = False
for name, other_property in other.__properties.iteritems():
self_property = self.__properties.get(name)
if self_property:
changed = self_property.merge(other_property) or changed
else:
self.__properties[name] = other_property
changed = True
return changed
def populate_entity_schema(self, entity_schema):
"""Populates the given entity_schema with values from this instance.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if self.__kind:
entity_schema.kind = self.__kind
for property_type_info in self.__properties.itervalues():
property_type_info.populate_entity_schema_field(entity_schema)
def to_json(self):
return {
'kind': self.__kind,
'properties': [p.to_json() for p in self.__properties.itervalues()]
}
@classmethod
def from_json(cls, json):
kind = json.get('kind')
properties_json = json.get('properties')
if properties_json:
return cls(kind, [PropertyTypeInfo.from_json(p) for p in properties_json])
else:
return cls(kind)
@classmethod
def create_from_entity_proto(cls, entity_proto):
"""Creates and populates an EntityTypeInfo from an EntityProto."""
properties = [cls.__get_property_type_info(property_proto) for
property_proto in itertools.chain(
entity_proto.property_list(),
entity_proto.raw_property_list())]
kind = utils.get_kind_from_entity_pb(entity_proto)
return cls(kind, properties)
@classmethod
def __get_property_type_info(cls, property_proto):
"""Returns the type mapping for the provided property."""
name = property_proto.name()
is_repeated = bool(property_proto.multiple())
primitive_type = None
entity_type = None
if property_proto.has_meaning():
primitive_type = MEANING_TO_PRIMITIVE_TYPE.get(property_proto.meaning())
if primitive_type is None:
value = property_proto.value()
if value.has_int64value():
primitive_type = backup_pb2.EntitySchema.INTEGER
elif value.has_booleanvalue():
primitive_type = backup_pb2.EntitySchema.BOOLEAN
elif value.has_stringvalue():
if property_proto.meaning() == entity_pb.Property.ENTITY_PROTO:
entity_proto = entity_pb.EntityProto()
try:
entity_proto.ParsePartialFromString(value.stringvalue())
except Exception:
pass
else:
entity_type = EntityTypeInfo.create_from_entity_proto(entity_proto)
else:
primitive_type = backup_pb2.EntitySchema.STRING
elif value.has_doublevalue():
primitive_type = backup_pb2.EntitySchema.FLOAT
elif value.has_pointvalue():
primitive_type = backup_pb2.EntitySchema.GEO_POINT
elif value.has_uservalue():
primitive_type = backup_pb2.EntitySchema.USER
elif value.has_referencevalue():
primitive_type = backup_pb2.EntitySchema.REFERENCE
return PropertyTypeInfo(
name, is_repeated,
(primitive_type,) if primitive_type is not None else None,
(entity_type,) if entity_type else None)
class SchemaAggregationResult(db.Model):
"""Persistent aggregated type information for a kind.
An instance can be retrieved via the load method or created
using the create method. An instance aggregates all type information
for all seen embedded_entities via the merge method and persisted when needed
using the model put method.
"""
entity_type_info = json_util.JsonProperty(
EntityTypeInfo, default=EntityTypeInfo(), indexed=False)
is_partial = db.BooleanProperty(default=False)
def merge(self, other):
"""Merge a SchemaAggregationResult or an EntityTypeInfo with this instance.
Args:
other: Required SchemaAggregationResult or EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
"""
if self.is_partial:
return False
if isinstance(other, SchemaAggregationResult):
other = other.entity_type_info
return self.entity_type_info.merge(other)
@classmethod
def _get_parent_key(cls, backup_id, kind_name):
return datastore_types.Key.from_path('Kind', kind_name, parent=backup_id)
@classmethod
def create(cls, backup_id, kind_name, shard_id):
"""Create SchemaAggregationResult instance.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Required shard id as string.
Returns:
A new SchemaAggregationResult instance.
"""
parent = cls._get_parent_key(backup_id, kind_name)
return SchemaAggregationResult(
key_name=shard_id, parent=parent,
entity_type_info=EntityTypeInfo(kind=kind_name))
@classmethod
def load(cls, backup_id, kind_name, shard_id=None):
"""Retrieve SchemaAggregationResult from the Datastore.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Optional shard id as string.
Returns:
SchemaAggregationResult iterator or an entity if shard_id not None.
"""
parent = cls._get_parent_key(backup_id, kind_name)
if shard_id:
key = datastore_types.Key.from_path(cls.kind(), shard_id, parent=parent)
return SchemaAggregationResult.get(key)
else:
return db.Query(cls).ancestor(parent).run()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND_TYPE_INFO
class SchemaAggregationPool(object):
"""An MR pool to aggregation type information per kind."""
def __init__(self, backup_id, kind, shard_id):
"""Construct SchemaAggregationPool instance.
Args:
backup_id: Required BackupInformation Key.
kind: Required kind name as string.
shard_id: Required shard id as string.
"""
self.__backup_id = backup_id
self.__kind = kind
self.__shard_id = shard_id
self.__aggregation = SchemaAggregationResult.load(backup_id, kind, shard_id)
if not self.__aggregation:
self.__aggregation = SchemaAggregationResult.create(backup_id, kind,
shard_id)
self.__needs_save = True
else:
self.__needs_save = False
def merge(self, entity_type_info):
"""Merge EntityTypeInfo into aggregated type information."""
if self.__aggregation.merge(entity_type_info):
self.__needs_save = True
def flush(self):
"""Save aggregated type information to the datastore if changed."""
if self.__needs_save:
def update_aggregation_tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation:
if aggregation.merge(self.__aggregation):
aggregation.put(force_writes=True)
self.__aggregation = aggregation
else:
self.__aggregation.put(force_writes=True)
def mark_aggregation_as_partial_tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation is None:
aggregation = SchemaAggregationResult.create(
self.__backup_id, self.__kind, self.__shard_id)
aggregation.is_partial = True
aggregation.put(force_writes=True)
self.__aggregation = aggregation
try:
db.run_in_transaction(update_aggregation_tx)
except apiproxy_errors.RequestTooLargeError:
db.run_in_transaction(mark_aggregation_as_partial_tx)
self.__needs_save = False
class AggregateSchema(op.Operation):
"""An MR Operation to aggregation type information for a kind.
This operation will register an MR pool, SchemaAggregationPool, if
one is not already registered and will invoke the pool's merge operation
per entity. The pool is responsible for keeping a persistent state of
type aggregation using the sharded db model, SchemaAggregationResult.
"""
def __init__(self, entity_proto):
self.__entity_info = EntityTypeInfo.create_from_entity_proto(entity_proto)
def __call__(self, ctx):
pool = ctx.get_pool('schema_aggregation_pool')
if not pool:
backup_id = datastore_types.Key(
context.get().mapreduce_spec.params['backup_info_pk'])
pool = SchemaAggregationPool(
backup_id, self.__entity_info.kind, ctx.shard_id)
ctx.register_pool('schema_aggregation_pool', pool)
pool.merge(self.__entity_info)
class BackupEntity(object):
"""A class which dumps the entity to the writer."""
def map(self, entity_proto):
"""Backup entity map handler.
Args:
entity_proto: An instance of entity_pb.EntityProto.
Yields:
A serialized entity_pb.EntityProto as a string
"""
yield entity_proto.SerializeToString()
yield AggregateSchema(entity_proto)
class RestoreEntity(object):
"""A class which restore the entity to datastore."""
def __init__(self):
self.initialized = False
self.kind_filter = None
self.app_id = None
def initialize(self):
if self.initialized:
return
mapper_params = context.get().mapreduce_spec.mapper.params
kind_filter = mapper_params.get('kind_filter')
self.kind_filter = set(kind_filter) if kind_filter else None
original_app = mapper_params.get('original_app')
if original_app and os.getenv('APPLICATION_ID') != original_app:
self.app_id = os.getenv('APPLICATION_ID')
self.initialized = True
def map(self, record):
"""Restore entity map handler.
Args:
record: A serialized entity_pb.EntityProto.
Yields:
A operation.db.Put for the mapped entity
"""
self.initialize()
pb = entity_pb.EntityProto(contents=record)
if self.app_id:
utils.FixKeys(pb, self.app_id)
entity = datastore.Entity.FromPb(pb)
if not self.kind_filter or entity.kind() in self.kind_filter:
yield op.db.Put(entity)
if self.app_id:
yield utils.ReserveKey(entity.key())
def validate_gs_bucket_name(bucket_name):
"""Validate the format of the given bucket_name.
Validation rules are based:
https://developers.google.com/storage/docs/bucketnaming#requirements
Args:
bucket_name: The bucket name to validate.
Raises:
BackupValidationError: If the bucket name is invalid.
"""
if len(bucket_name) > MAX_BUCKET_LEN:
raise BackupValidationError(
'Bucket name length should not be longer than %d' % MAX_BUCKET_LEN)
if len(bucket_name) < MIN_BUCKET_LEN:
raise BackupValidationError(
'Bucket name length should be longer than %d' % MIN_BUCKET_LEN)
if bucket_name.lower().startswith('goog'):
raise BackupValidationError(
'Bucket name should not start with a "goog" prefix')
bucket_elements = bucket_name.split('.')
for bucket_element in bucket_elements:
if len(bucket_element) > MAX_BUCKET_SEGMENT_LEN:
raise BackupValidationError(
'Segment length of bucket name should not be longer than %d' %
MAX_BUCKET_SEGMENT_LEN)
if not re.match(BUCKET_PATTERN, bucket_name):
raise BackupValidationError('Invalid bucket name "%s"' % bucket_name)
def is_accessible_bucket_name(bucket_name):
"""Returns True if the application has access to the specified bucket."""
scope = config.GoogleApiScope('devstorage.read_write')
bucket_url = config.GsBucketURL(bucket_name)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(bucket_url, method=urlfetch.HEAD, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
return result and result.status_code == 200
def verify_bucket_writable(bucket_name):
"""Verify the application can write to the specified bucket.
Args:
bucket_name: The bucket to verify.
Raises:
BackupValidationError: If the bucket is not writable.
"""
path = '/gs/%s' % bucket_name
try:
file_names = files.gs.listdir(path,
{'prefix': TEST_WRITE_FILENAME_PREFIX,
'max_keys': MAX_KEYS_LIST_SIZE})
except (files.InvalidParameterError, files.PermissionDeniedError):
raise BackupValidationError('Bucket "%s" not accessible' % bucket_name)
except files.InvalidFileNameError:
raise BackupValidationError('Bucket "%s" does not exist' % bucket_name)
file_name = '%s/%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX)
file_name_try = 0
while True:
if file_name_try >= MAX_TEST_FILENAME_TRIES:
return
if file_name not in file_names:
break
gen = random.randint(0, 9999)
file_name = '%s/%s_%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX, gen)
file_name_try += 1
try:
test_file = files.open(files.gs.create(file_name), 'a', exclusive_lock=True)
try:
test_file.write('test')
finally:
test_file.close(finalize=True)
except files.PermissionDeniedError:
raise BackupValidationError('Bucket "%s" is not writable' % bucket_name)
try:
files.delete(file_name)
except (files.InvalidArgumentError, files.InvalidFileNameError, IOError):
logging.warn('Failed to delete test file %s', file_name)
def is_readable_gs_handle(gs_handle):
"""Return True if the application can read the specified gs_handle."""
try:
with files.open(gs_handle) as bak_file:
bak_file.read(1)
except files.PermissionDeniedError:
return False
return True
def parse_gs_handle(gs_handle):
"""Splits [/gs/]?bucket_name[/folder]*[/file]? to (bucket_name, path | '')."""
if gs_handle.startswith('/'):
filesystem = gs_handle[1:].split('/', 1)[0]
if filesystem == 'gs':
gs_handle = gs_handle[4:]
else:
raise BackupValidationError('Unsupported filesystem: %s' % filesystem)
tokens = gs_handle.split('/', 1)
return (tokens[0], '') if len(tokens) == 1 else tuple(tokens)
def validate_and_canonicalize_gs_bucket(gs_bucket_name):
bucket_name, path = parse_gs_handle(gs_bucket_name)
gs_bucket_name = ('%s/%s' % (bucket_name, path)).rstrip('/')
validate_gs_bucket_name(bucket_name)
verify_bucket_writable(bucket_name)
return gs_bucket_name
def list_bucket_files(bucket_name, prefix, max_keys=1000):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = config.GoogleApiScope('devstorage.read_only')
bucket_url = config.GsBucketURL(bucket_name)
url = bucket_url + '?'
query = [('max-keys', max_keys)]
if prefix:
query.append(('prefix', prefix))
url += urllib.urlencode(query)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
doc = xml.dom.minidom.parseString(result.content)
return [node.childNodes[0].data for node in doc.getElementsByTagName('Key')]
raise BackupValidationError('Request to Google Cloud Storage failed')
def get_gs_object(bucket_name, path):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = config.GoogleApiScope('devstorage.read_only')
bucket_url = config.GsBucketURL(bucket_name)
url = bucket_url + path
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
return result.content
if result and result.status_code == 403:
raise BackupValidationError(
'Requested path %s is not accessible/access denied' % url)
if result and result.status_code == 404:
raise BackupValidationError('Requested path %s was not found' % url)
raise BackupValidationError('Error encountered accessing requested path %s' %
url)
def get_queue_names(app_id=None, max_rows=100):
"""Returns a list with all non-special queue names for app_id."""
rpc = apiproxy_stub_map.UserRPC('taskqueue')
request = taskqueue_service_pb.TaskQueueFetchQueuesRequest()
response = taskqueue_service_pb.TaskQueueFetchQueuesResponse()
if app_id:
request.set_app_id(app_id)
request.set_max_rows(max_rows)
queues = ['default']
try:
rpc.make_call('FetchQueues', request, response)
rpc.check_success()
for queue in response.queue_list():
if (queue.mode() == taskqueue_service_pb.TaskQueueMode.PUSH and
not queue.queue_name().startswith('__') and
queue.queue_name() != 'default'):
queues.append(queue.queue_name())
except Exception:
logging.exception('Failed to get queue names.')
return queues
def handlers_list(base_path):
return [
(r'%s/%s' % (base_path, BackupLinkHandler.SUFFIX),
BackupLinkHandler),
(r'%s/%s' % (base_path, ConfirmBackupHandler.SUFFIX),
ConfirmBackupHandler),
(r'%s/%s' % (base_path, DoBackupHandler.SUFFIX), DoBackupHandler),
(r'%s/%s' % (base_path, DoBackupRestoreHandler.SUFFIX),
DoBackupRestoreHandler),
(r'%s/%s' % (base_path, DoBackupDeleteHandler.SUFFIX),
DoBackupDeleteHandler),
(r'%s/%s' % (base_path, DoBackupAbortHandler.SUFFIX),
DoBackupAbortHandler),
(r'%s/%s' % (base_path, DoBackupImportHandler.SUFFIX),
DoBackupImportHandler),
]
|
pigeonflight/strider-plone
|
docker/appengine/google/appengine/ext/datastore_admin/backup_handler.py
|
Python
|
mit
| 66,025
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
dataobject.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
from qgis.core import QGis, QgsProject, QgsVectorFileWriter, QgsMapLayer, QgsRasterLayer, QgsVectorLayer, QgsMapLayerRegistry, QgsCoordinateReferenceSystem
from qgis.gui import QgsSublayersDialog
from PyQt4.QtCore import QSettings
from qgis.utils import iface
from processing.core.ProcessingConfig import ProcessingConfig
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import getTempFilenameInTempFolder, getTempFilename, isWindows
ALL_TYPES = [-1]
_loadedLayers = {}
def resetLoadedLayers():
global _loadedLayers
_loadedLayers = {}
def getSupportedOutputVectorLayerExtensions():
formats = QgsVectorFileWriter.supportedFiltersAndFormats()
exts = ['shp'] # shp is the default, should be the first
for extension in formats.keys():
extension = unicode(extension)
extension = extension[extension.find('*.') + 2:]
extension = extension[:extension.find(' ')]
if extension.lower() != 'shp':
exts.append(extension)
return exts
def getSupportedOutputRasterLayerExtensions():
allexts = ['tif']
for exts in GdalUtils.getSupportedRasters().values():
for ext in exts:
if ext not in allexts:
allexts.append(ext)
return allexts
def getSupportedOutputTableExtensions():
exts = ['csv']
return exts
def getRasterLayers(sorting=True):
layers = QgsProject.instance().layerTreeRoot().findLayers()
raster = []
for layer in layers:
mapLayer = layer.layer()
if mapLayer.type() == QgsMapLayer.RasterLayer:
if mapLayer.providerType() == 'gdal': # only gdal file-based layers
raster.append(mapLayer)
if sorting:
return sorted(raster, key=lambda layer: layer.name().lower())
else:
return raster
def getVectorLayers(shapetype=[-1], sorting=True):
layers = QgsProject.instance().layerTreeRoot().findLayers()
vector = []
for layer in layers:
mapLayer = layer.layer()
if mapLayer.type() == QgsMapLayer.VectorLayer and mapLayer.dataProvider().name() != "grass":
if (mapLayer.hasGeometryType() and
(shapetype == ALL_TYPES or mapLayer.geometryType() in shapetype)):
vector.append(mapLayer)
if sorting:
return sorted(vector, key=lambda layer: layer.name().lower())
else:
return vector
def getAllLayers():
layers = []
layers += getRasterLayers()
layers += getVectorLayers()
return sorted(layers, key=lambda layer: layer.name().lower())
def getTables(sorting=True):
layers = QgsProject.instance().layerTreeRoot().findLayers()
tables = []
for layer in layers:
mapLayer = layer.layer()
if mapLayer.type() == QgsMapLayer.VectorLayer:
tables.append(mapLayer)
if sorting:
return sorted(tables, key=lambda table: table.name().lower())
else:
return tables
def extent(layers):
first = True
for layer in layers:
if not isinstance(layer, (QgsMapLayer.QgsRasterLayer, QgsMapLayer.QgsVectorLayer)):
layer = getObjectFromUri(layer)
if layer is None:
continue
if first:
xmin = layer.extent().xMinimum()
xmax = layer.extent().xMaximum()
ymin = layer.extent().yMinimum()
ymax = layer.extent().yMaximum()
else:
xmin = min(xmin, layer.extent().xMinimum())
xmax = max(xmax, layer.extent().xMaximum())
ymin = min(ymin, layer.extent().yMinimum())
ymax = max(ymax, layer.extent().yMaximum())
first = False
if first:
return '0,0,0,0'
else:
return unicode(xmin) + ',' + unicode(xmax) + ',' + unicode(ymin) + ',' + unicode(ymax)
def loadList(layers):
for layer in layers:
load(layer)
def load(fileName, name=None, crs=None, style=None):
"""Loads a layer/table into the current project, given its file.
"""
if fileName is None:
return
prjSetting = None
settings = QSettings()
if crs is not None:
prjSetting = settings.value('/Projections/defaultBehaviour')
settings.setValue('/Projections/defaultBehaviour', '')
if name is None:
name = os.path.split(fileName)[1]
qgslayer = QgsVectorLayer(fileName, name, 'ogr')
if qgslayer.isValid():
if crs is not None and qgslayer.crs() is None:
qgslayer.setCrs(crs, False)
if style is None:
if qgslayer.geometryType() == QGis.Point:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POINT_STYLE)
elif qgslayer.geometryType() == QGis.Line:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_LINE_STYLE)
else:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POLYGON_STYLE)
qgslayer.loadNamedStyle(style)
QgsMapLayerRegistry.instance().addMapLayers([qgslayer])
else:
qgslayer = QgsRasterLayer(fileName, name)
if qgslayer.isValid():
if crs is not None and qgslayer.crs() is None:
qgslayer.setCrs(crs, False)
if style is None:
style = ProcessingConfig.getSetting(ProcessingConfig.RASTER_STYLE)
qgslayer.loadNamedStyle(style)
QgsMapLayerRegistry.instance().addMapLayers([qgslayer])
iface.legendInterface().refreshLayerSymbology(qgslayer)
else:
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
raise RuntimeError('Could not load layer: ' + unicode(fileName)
+ '\nCheck the procesing framework log to look for errors')
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
return qgslayer
def getObjectFromName(name):
layers = getAllLayers()
for layer in layers:
if layer.name() == name:
return layer
def getObject(uriorname):
ret = getObjectFromName(uriorname)
if ret is None:
ret = getObjectFromUri(uriorname)
return ret
def normalizeLayerSource(source):
if isWindows():
source = source.replace('\\', '/')
source = source.replace('"', "'")
return source
def getObjectFromUri(uri, forceLoad=True):
"""Returns an object (layer/table) given a source definition.
if forceLoad is true, it tries to load it if it is not currently open
Otherwise, it will return the object only if it is loaded in QGIS.
"""
if uri is None:
return None
if uri in _loadedLayers:
return _loadedLayers[uri]
layers = getRasterLayers()
for layer in layers:
if normalizeLayerSource(layer.source()) == normalizeLayerSource(uri):
return layer
layers = getVectorLayers()
for layer in layers:
if normalizeLayerSource(layer.source()) == normalizeLayerSource(uri):
return layer
tables = getTables()
for table in tables:
if normalizeLayerSource(table.source()) == normalizeLayerSource(uri):
return table
if forceLoad:
settings = QSettings()
prjSetting = settings.value('/Projections/defaultBehaviour')
settings.setValue('/Projections/defaultBehaviour', '')
# If is not opened, we open it
layer = QgsVectorLayer(uri, uri, 'ogr')
if layer.isValid():
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
_loadedLayers[normalizeLayerSource(layer.source())] = layer
return layer
layer = QgsVectorLayer(uri, uri, 'postgres')
if layer.isValid():
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
_loadedLayers[normalizeLayerSource(layer.source())] = layer
return layer
layer = QgsRasterLayer(uri, uri)
if layer.isValid():
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
_loadedLayers[normalizeLayerSource(layer.source())] = layer
return layer
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
else:
return None
def exportVectorLayer(layer):
"""Takes a QgsVectorLayer and returns the filename to refer to it,
which allows external apps which support only file-based layers to
use it. It performs the necessary export in case the input layer
is not in a standard format suitable for most applications, it is
a remote one or db-based (non-file based) one, or if there is a
selection and it should be used, exporting just the selected
features.
Currently, the output is restricted to shapefiles, so anything
that is not in a shapefile will get exported. It also export to
a new file if the original one contains non-ascii characters.
"""
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
filename = os.path.basename(unicode(layer.source()))
idx = filename.rfind('.')
if idx != -1:
filename = filename[:idx]
filename = unicode(layer.name())
validChars = \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:'
filename = ''.join(c for c in filename if c in validChars)
if len(filename) == 0:
filename = 'layer'
output = getTempFilenameInTempFolder(filename + '.shp')
provider = layer.dataProvider()
useSelection = ProcessingConfig.getSetting(ProcessingConfig.USE_SELECTED)
if useSelection and layer.selectedFeatureCount() != 0:
writer = QgsVectorFileWriter(output, systemEncoding,
layer.pendingFields(),
provider.geometryType(), layer.crs())
selection = layer.selectedFeatures()
for feat in selection:
writer.addFeature(feat)
del writer
return output
else:
isASCII = True
try:
unicode(layer.source()).decode('ascii')
except UnicodeEncodeError:
isASCII = False
if not unicode(layer.source()).endswith('shp') or not isASCII:
writer = QgsVectorFileWriter(
output, systemEncoding,
layer.pendingFields(), provider.geometryType(),
layer.crs()
)
for feat in layer.getFeatures():
writer.addFeature(feat)
del writer
return output
else:
return unicode(layer.source())
def exportRasterLayer(layer):
"""Takes a QgsRasterLayer and returns the filename to refer to it,
which allows external apps which support only file-based layers to
use it. It performs the necessary export in case the input layer
is not in a standard format suitable for most applications, it is
a remote one or db-based (non-file based) one.
Currently, the output is restricted to geotiff, but not all other
formats are exported. Only those formats not supported by GDAL are
exported, so it is assumed that the external app uses GDAL to read
the layer.
"""
# TODO: Do the conversion here
return unicode(layer.source())
def exportTable(table):
"""Takes a QgsVectorLayer and returns the filename to refer to its
attributes table, which allows external apps which support only
file-based layers to use it.
It performs the necessary export in case the input layer is not in
a standard format suitable for most applications, it isa remote
one or db-based (non-file based) one.
Currently, the output is restricted to DBF. It also export to a new
file if the original one contains non-ascii characters.
"""
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
output = getTempFilename()
provider = table.dataProvider()
isASCII = True
try:
unicode(table.source()).decode('ascii')
except UnicodeEncodeError:
isASCII = False
isDbf = unicode(table.source()).endswith('dbf') \
or unicode(table.source()).endswith('shp')
if not isDbf or not isASCII:
writer = QgsVectorFileWriter(output, systemEncoding,
provider.fields(), QGis.WKBNoGeometry,
QgsCoordinateReferenceSystem('4326'))
for feat in table.getFeatures():
writer.addFeature(feat)
del writer
return output + '.dbf'
else:
filename = unicode(table.source())
if unicode(table.source()).endswith('shp'):
return filename[:-3] + 'dbf'
else:
return filename
def getRasterSublayer(path, param):
layer = QgsRasterLayer(path)
try:
# If the layer is a raster layer and has multiple sublayers, let the user chose one.
# Based on QgisApp::askUserForGDALSublayers
if layer and param.showSublayersDialog and layer.dataProvider().name() == "gdal" and len(layer.subLayers()) > 1:
layers = []
subLayerNum = 0
# simplify raster sublayer name
for subLayer in layer.subLayers():
# if netcdf/hdf use all text after filename
if bool(re.match('netcdf', subLayer, re.I)) or bool(re.match('hdf', subLayer, re.I)):
subLayer = subLayer.split(path)[1]
subLayer = subLayer[1:]
else:
# remove driver name and file name
subLayer.replace(subLayer.split(":")[0], "")
subLayer.replace(path, "")
# remove any : or " left over
if subLayer.startswith(":"):
subLayer = subLayer[1:]
if subLayer.startswith("\""):
subLayer = subLayer[1:]
if subLayer.endswith(":"):
subLayer = subLayer[:-1]
if subLayer.endswith("\""):
subLayer = subLayer[:-1]
layers.append(unicode(subLayerNum) + "|" + subLayer)
subLayerNum = subLayerNum + 1
# Use QgsSublayersDialog
# Would be good if QgsSublayersDialog had an option to allow only one sublayer to be selected
chooseSublayersDialog = QgsSublayersDialog(QgsSublayersDialog.Gdal, "gdal")
chooseSublayersDialog.populateLayerTable(layers, "|")
if chooseSublayersDialog.exec_():
return layer.subLayers()[chooseSublayersDialog.selectionIndexes()[0]]
else:
# If user pressed cancel then just return the input path
return path
else:
# If the sublayers selection dialog is not to be shown then just return the input path
return path
except:
# If the layer is not a raster layer, then just return the input path
return path
|
jarped/QGIS
|
python/plugins/processing/tools/dataobjects.py
|
Python
|
gpl-2.0
| 16,227
|
import cv2
import numpy as np
def fold_line(i, line):
x1, y1, x2, y2 = line
dx, dy = (x2 - x1, y2 - y1)
angle = np.arctan2(dy, dx) + (-1) ** i * np.pi / 4
leg = np.cos(np.pi / 4) * np.sqrt(dx ** 2 + dy ** 2)
x3 = x1 + np.cos(angle) * leg
y3 = y1 + np.sin(angle) * leg
line1 = [x1, y1, x3, y3]
line2 = [x3, y3, x2, y2]
return map(int, line1), map(int, line2)
def fold_lines(lines):
for i, line in enumerate(lines):
line1, line2 = fold_line(i, line)
yield line1
yield line2
def draw_lines(lines):
im = np.zeros((1000, 1000))
map(lambda line: cv2.line(im, (line[0], line[1]), (line[2], line[3]), 255), lines)
cv2.imshow('Dragon', im)
cv2.waitKey(100)
lines = [[250, 500, 750, 500]]
for a in xrange(16):
draw_lines(lines)
lines = [line for line in fold_lines(lines)]
print 'Complete'
|
Billtholomew/Fractals
|
dragon.py
|
Python
|
mit
| 876
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1beta1_deployment_rollback import V1beta1DeploymentRollback
class TestV1beta1DeploymentRollback(unittest.TestCase):
""" V1beta1DeploymentRollback unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1DeploymentRollback(self):
"""
Test V1beta1DeploymentRollback
"""
model = lib_openshift.models.v1beta1_deployment_rollback.V1beta1DeploymentRollback()
if __name__ == '__main__':
unittest.main()
|
detiber/lib_openshift
|
test/test_v1beta1_deployment_rollback.py
|
Python
|
apache-2.0
| 1,366
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "physical_validation/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
shirtsgroup/physical-validation
|
physical_validation/_version.py
|
Python
|
lgpl-2.1
| 18,480
|
"""
Updates a SiteConfiguration to include new DOT-specific OAUTH2 settings.
"""
import logging
from django.contrib.sites.models import Site
from django.core.management import BaseCommand
from oscar.core.loading import get_model
from ecommerce.core.models import SiteConfiguration
logger = logging.getLogger(__name__)
Partner = get_model('partner', 'Partner')
class Command(BaseCommand):
help = 'Create or update Site, Partner, and SiteConfiguration'
def add_arguments(self, parser):
parser.add_argument('--site-id',
action='store',
dest='site_id',
required=True,
type=int,
help='ID of the Site to update.')
parser.add_argument('--sso-client-id',
action='store',
dest='sso_client_id',
required=True,
type=str,
help='SSO client ID for individual user auth')
parser.add_argument('--sso-client-secret',
action='store',
dest='sso_client_secret',
required=True,
type=str,
help='SSO client secret for individual user auth')
parser.add_argument('--backend-service-client-id',
action='store',
dest='backend_service_client_id',
required=True,
type=str,
help='Backend-service client ID for IDA-to-IDA auth')
parser.add_argument('--backend-service-client-secret',
action='store',
dest='backend_service_client_secret',
required=True,
type=str,
help='Backend-service client secret for IDA-to-IDA auth')
def handle(self, *args, **options):
site_id = options.get('site_id')
sso_client_id = options.get('sso_client_id')
sso_client_secret = options.get('sso_client_secret')
backend_service_client_id = options.get('backend_service_client_id')
backend_service_client_secret = options.get('backend_service_client_secret')
site = Site.objects.get(id=site_id)
site_configuration = SiteConfiguration.objects.get(site=site)
oauth_settings = site_configuration.oauth_settings
lms_url_root = site_configuration.lms_url_root
oauth_settings.update({
'SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT': lms_url_root,
'SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL': '{lms_url_root}/logout'.format(lms_url_root=lms_url_root),
'SOCIAL_AUTH_EDX_OAUTH2_ISSUERS': [lms_url_root],
'SOCIAL_AUTH_EDX_OAUTH2_KEY': sso_client_id,
'SOCIAL_AUTH_EDX_OAUTH2_SECRET': sso_client_secret,
'BACKEND_SERVICE_EDX_OAUTH2_KEY': backend_service_client_id,
'BACKEND_SERVICE_EDX_OAUTH2_SECRET': backend_service_client_secret,
})
site_configuration.save()
|
eduNEXT/edunext-ecommerce
|
ecommerce/core/management/commands/update_site_oauth_settings.py
|
Python
|
agpl-3.0
| 3,215
|
from logpy.unification import unify, reify, _unify, _reify
from logpy import var
def test_reify():
x, y, z = var(), var(), var()
s = {x: 1, y: 2, z: (x, y)}
assert reify(x, s) == 1
assert reify(10, s) == 10
assert reify((1, y), s) == (1, 2)
assert reify((1, (x, (y, 2))), s) == (1, (1, (2, 2)))
assert reify(z, s) == (1, 2)
def test_reify_dict():
x, y = var(), var()
s = {x: 2, y: 4}
e = {1: x, 3: {5: y}}
assert reify(e, s) == {1: 2, 3: {5: 4}}
def test_reify_list():
x, y = var(), var()
s = {x: 2, y: 4}
e = [1, [x, 3], y]
assert reify(e, s) == [1, [2, 3], 4]
def test_reify_complex():
x, y = var(), var()
s = {x: 2, y: 4}
e = {1: [x], 3: (y, 5)}
assert reify(e, s) == {1: [2], 3: (4, 5)}
def test_unify():
assert unify(1, 1, {}) == {}
assert unify(1, 2, {}) == False
assert unify(var(1), 2, {}) == {var(1): 2}
assert unify(2, var(1), {}) == {var(1): 2}
def test_unify_seq():
assert unify((1, 2), (1, 2), {}) == {}
assert unify([1, 2], [1, 2], {}) == {}
assert unify((1, 2), (1, 2, 3), {}) == False
assert unify((1, var(1)), (1, 2), {}) == {var(1): 2}
assert unify((1, var(1)), (1, 2), {var(1): 3}) == False
def test_unify_dict():
assert unify({1: 2}, {1: 2}, {}) == {}
assert unify({1: 2}, {1: 3}, {}) == False
assert unify({2: 2}, {1: 2}, {}) == False
assert unify({1: var(5)}, {1: 2}, {}) == {var(5): 2}
def test_unify_complex():
assert unify((1, {2: 3}), (1, {2: 3}), {}) == {}
assert unify((1, {2: 3}), (1, {2: 4}), {}) == False
assert unify((1, {2: var(5)}), (1, {2: 4}), {}) == {var(5): 4}
assert unify({1: (2, 3)}, {1: (2, var(5))}, {}) == {var(5): 3}
assert unify({1: [2, 3]}, {1: [2, var(5)]}, {}) == {var(5): 3}
|
cpcloud/logpy
|
logpy/tests/test_unification.py
|
Python
|
bsd-3-clause
| 1,783
|
import threading
import time
class Thread(threading.Thread):
def run(self):
print("{} inicio".format(self.getName()))
time.sleep(1)
print("{} terminado".format(self.getName()))
if __name__ == "__main__":
for i in range(4):
thread = Thread(name="Thread {}".format(i+1))
thread.start()
time.sleep(.5)
|
andresmtz98/GoogleNews_Scraper_Django
|
news/thread.py
|
Python
|
mit
| 356
|
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
"""odict - an ordered dictionary"""
try:
# Starting with Python 2.7 we can use collections.OrderedDict
from collections import OrderedDict as odict
except ImportError:
# But we still support Python 2.5 and 2.6
from UserDict import DictMixin
class odict(DictMixin):
def __init__(self):
self._items = {}
self._keys = []
def __setitem__(self, key, value):
if key not in self._items:
self._keys.append(key)
self._items[key] = value
def __getitem__(self, key):
return self._items[key]
def __delitem__(self, key):
del self._items[key]
self._keys.remove(key)
def keys(self):
return self._keys[:]
|
jackjansen/gobject-introspection
|
giscanner/odict.py
|
Python
|
gpl-2.0
| 1,630
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Represent MongoClient's configuration."""
import threading
from pymongo import monitor, pool
from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT
from pymongo.topology_description import TOPOLOGY_TYPE
from pymongo.pool import PoolOptions
from pymongo.server_description import ServerDescription
class TopologySettings(object):
def __init__(self,
seeds=None,
replica_set_name=None,
pool_class=None,
pool_options=None,
monitor_class=None,
condition_class=None,
local_threshold_ms=LOCAL_THRESHOLD_MS,
server_selection_timeout=SERVER_SELECTION_TIMEOUT):
"""Represent MongoClient's configuration.
Take a list of (host, port) pairs and optional replica set name.
"""
self._seeds = seeds or [('localhost', 27017)]
self._replica_set_name = replica_set_name
self._pool_class = pool_class or pool.Pool
self._pool_options = pool_options or PoolOptions()
self._monitor_class = monitor_class or monitor.Monitor
self._condition_class = condition_class or threading.Condition
self._local_threshold_ms = local_threshold_ms
self._server_selection_timeout = server_selection_timeout
self._direct = (len(self._seeds) == 1 and not replica_set_name)
@property
def seeds(self):
"""List of server addresses."""
return self._seeds
@property
def replica_set_name(self):
return self._replica_set_name
@property
def pool_class(self):
return self._pool_class
@property
def pool_options(self):
return self._pool_options
@property
def monitor_class(self):
return self._monitor_class
@property
def condition_class(self):
return self._condition_class
@property
def local_threshold_ms(self):
return self._local_threshold_ms
@property
def server_selection_timeout(self):
return self._server_selection_timeout
@property
def direct(self):
"""Connect directly to a single server, or use a set of servers?
True if there is one seed and no replica_set_name.
"""
return self._direct
def get_topology_type(self):
if self.direct:
return TOPOLOGY_TYPE.Single
elif self.replica_set_name is not None:
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
return TOPOLOGY_TYPE.Unknown
def get_server_descriptions(self):
"""Initial dict of (address, ServerDescription) for all seeds."""
return dict([
(address, ServerDescription(address))
for address in self.seeds])
|
elioth010/lugama
|
venv/lib/python2.7/site-packages/pymongo/settings.py
|
Python
|
gpl-2.0
| 3,353
|
import install
|
oleiade/Elevator
|
fabfile/__init__.py
|
Python
|
mit
| 15
|
# -*- coding: utf-8 -*-
from .generator import *
|
nk113/django-ficuspumila
|
ficuspumila/core/common/fixtures/__init__.py
|
Python
|
bsd-3-clause
| 49
|
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import sys
from neon import logger as neon_logger
try:
from aeon import DataLoader as AeonDataLoader # noqa
except ImportError:
neon_logger.error('Unable to load aeon dataloading module.')
neon_logger.error('Please follow installation instructions at:')
neon_logger.error('https://github.com/NervanaSystems/aeon')
sys.exit(1)
|
matthijsvk/multimodalSR
|
code/Experiments/neon-master/neon/data/aeon_shim.py
|
Python
|
mit
| 1,096
|
"""
This is some of the code behind 'cobbler sync'.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os.path, traceback, errno
import re
import clogger
import pxegen
import shutil
import glob
import utils
from cexceptions import *
import templar
from utils import _
def register():
"""
The mandatory cobbler module registration hook.
"""
return "manage"
class InTftpdManager:
def what(self):
return "tftpd"
def __init__(self,config,logger):
"""
Constructor
"""
self.logger = logger
if self.logger is None:
self.logger = clogger.Logger()
self.config = config
self.templar = templar.Templar(config)
self.settings_file = "/etc/xinetd.d/tftp"
self.pxegen = pxegen.PXEGen(config, self.logger)
self.systems = config.systems()
self.bootloc = utils.tftpboot_location()
def regen_hosts(self):
pass # not used
def write_dns_files(self):
pass # not used
def write_boot_files_distro(self,distro):
# collapse the object down to a rendered datastructure
# the second argument set to false means we don't collapse
# hashes/arrays into a flat string
target = utils.blender(self.config.api, False, distro)
# Create metadata for the templar function
# Right now, just using img_path, but adding more
# cobbler variables here would probably be good
metadata = {}
metadata["img_path"] = os.path.join(
utils.tftpboot_location(),
"images",distro.name)
# Create the templar instance. Used to template the target directory
templater = templar.Templar(self.config)
# Loop through the hash of boot files,
# executing a cp for each one
for file in target["boot_files"].keys():
file_dst = templater.render(file,metadata,None)
try:
for f in glob.glob(target["boot_files"][file]):
rawpath,rawfile=os.path.split(f)
filedst = file_dst+rawfile
if not os.path.isfile(filedst) :
shutil.copyfile(f, filedst)
self.config.api.log("copied file %s to %s for %s" % (
target["boot_files"][file],
filedst,
distro.name))
except:
self.logger.error("failed to copy file %s to %s for %s" % (
target["boot_files"][file],
filedst,
distro.name))
# Continue on to sync what you can
return 0
def write_boot_files(self):
"""
Copy files in profile["boot_files"] into /tftpboot. Used for vmware
currently.
"""
for distro in self.config.distros():
self.write_boot_files_distro(distro)
return 0
def write_tftpd_files(self):
"""
xinetd files are written when manage_tftp is set in
/var/lib/cobbler/settings.
"""
template_file = "/etc/cobbler/tftpd.template"
try:
f = open(template_file,"r")
except:
raise CX(_("error reading template %s") % template_file)
template_data = ""
template_data = f.read()
f.close()
metadata = {
"user" : "root",
"binary" : "/usr/sbin/in.tftpd",
"args" : "%s" % self.bootloc
}
self.logger.info("generating %s" % self.settings_file)
self.templar.render(template_data, metadata, self.settings_file, None)
def update_netboot(self,name):
"""
Write out new pxelinux.cfg files to /tftpboot
"""
system = self.systems.find(name=name)
if system is None:
utils.die(self.logger,"error in system lookup for %s" % name)
self.pxegen.write_all_system_files(system)
# generate any templates listed in the system
self.pxegen.write_templates(system)
def add_single_system(self,system):
"""
Write out new pxelinux.cfg files to /tftpboot
"""
# write the PXE files for the system
self.pxegen.write_all_system_files(system)
# generate any templates listed in the distro
self.pxegen.write_templates(system)
def add_single_distro(self,distro):
self.pxegen.copy_single_distro_files(distro,self.bootloc,False)
self.write_boot_files_distro(distro)
def sync(self,verbose=True):
"""
Write out all files to /tftpdboot
"""
self.pxegen.verbose = verbose
self.logger.info("copying bootloaders")
self.pxegen.copy_bootloaders()
self.logger.info("copying distros to tftpboot")
# Adding in the exception handling to not blow up if files have
# been moved (or the path references an NFS directory that's no longer
# mounted)
for d in self.config.distros():
try:
self.logger.info("copying files for distro: %s" % d.name)
self.pxegen.copy_single_distro_files(d,self.bootloc,False)
except CX, e:
self.logger.error(e.value)
self.logger.info("copying images")
self.pxegen.copy_images()
# the actual pxelinux.cfg files, for each interface
self.logger.info("generating PXE configuration files")
for x in self.systems:
self.pxegen.write_all_system_files(x)
self.logger.info("generating PXE menu structure")
self.pxegen.make_pxe_menu()
def get_manager(config,logger):
return InTftpdManager(config,logger)
|
nacc/cobbler
|
cobbler/modules/manage_in_tftpd.py
|
Python
|
gpl-2.0
| 6,507
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Custom storage for Documents to allow dynamic MEDIA_ROOT paths
"""
from django.core.files.storage import FileSystemStorage
from django.core.exceptions import SuspiciousOperation
from django.utils._os import safe_join
from treeio.core.conf import settings
import os
class FileStorage(FileSystemStorage):
def path(self, name):
try:
path = safe_join(getattr(settings, 'MEDIA_ROOT'), name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
|
rogeriofalcone/treeio
|
documents/files.py
|
Python
|
mit
| 679
|
#! /usr/bin/env python
""" detect arrow and direction printed on the side box
author: ren ye
changelog:
(2017-01-29) init
"""
import cv2
# import numpy as np
from cv_utils import *
# ## main ##
# picture folder
image_path = "image/arrow.png"
# image_path = "image/blue_right.png"
# #### load picture ####
img = cv2.imread(image_path, 1)
rotated_angle = 180
# rotate image
rows, cols, _ = img.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotated_angle, 1)
dst = cv2.warpAffine(img, M, (cols, rows))
# cv2.imshow("rotated", dst)
threshold1 = 100
threshold2 = 200
thresholdHough = 40
minLineLength = 6
maxLineGap = 5
hsv_mask = hsv_masking(dst, is_dynamic=False)
# mask, _, _ = canny_masking(hsv_mask, is_dynamic=True)
# hough_line_detection(img, mask, hsv_mask,
# is_dynamic=True, is_probabilistic=True)
contour_param, max_idx = find_contour(dst, hsv_mask, is_max=True)
# ret, hsv_mask_inv = cv2.threshold(hsv_mask, 127, 255, cv2.THRESH_BINARY_INV)
gd_center, kpts = find_good_keypoint(dst, hsv_mask, maxCorners=7,
is_dynamic=True)
# print kpt
# orb keypoint detection
# orb_kpt = find_orb_keypoint(dst, hsv_mask)
circle_center = contour_param['circle_center'][max_idx]
print np.degrees(cal_direction(circle_center, gd_center))
cv2.waitKey(0)
cv2.destroyAllWindows()
|
reinaldomaslim/Project_Bixi
|
bixi_vision/src/bixi_vision/arrow_detector.py
|
Python
|
gpl-3.0
| 1,344
|
#!/usr/bin/env python3
from os.path import basename
import apt
import glob
import json
import os
import subprocess as cmd
devices = []
for name in glob.glob('/sys/block/*'):
name = basename(name)
if name.startswith('sd'):
devices.append(name)
elif name.startswith('md'):
devices.append(name)
elif name.startswith('nvme'):
devices.append(name)
elif name.startswith('pmem'):
devices.append(name)
root = cmd.check_output("findmnt -M / -o source -v | tail -n1", shell=True)
root = basename(root.decode().strip())
root_device = ''
cdrom = cmd.check_output("findmnt -M /cdrom -o source -v | tail -n1",
shell=True)
cdrom = basename(cdrom.decode().strip())
cdrom_device = ''
disks = json.loads(cmd.check_output("lsblk -fs -J", shell=True).decode())
disks = disks['blockdevices']
for disk in disks:
if disk['name'] == root:
data = json.dumps(disk)
for device in devices:
if device in data:
root_device = device
break
elif disk['name'] == cdrom:
data = json.dumps(disk)
for device in devices:
if device in data:
cdrom_device = device
break
if root_device == cdrom_device:
exit(0)
print('No recovery partition is detected.')
def check_depends(pkg_name, depends):
if pkg_name not in cache:
return
pkg = cache[pkg_name]
if not pkg.has_versions:
return
depends_list = pkg.version_list[0].depends_list
if not depends_list:
return
if 'Depends' in depends_list:
for dep in depends_list['Depends']:
pkg_name = dep[0].all_targets()[0].parent_pkg.name
pkg = cache[pkg_name]
if pkg_name not in depends \
and not pkg.current_ver \
and pkg.has_versions \
and pkg.version_list[0].downloadable:
depends.append(pkg_name)
cache = apt.apt_pkg.Cache()
langs = ("ca", "cs", "da", "de", "en", "en_US", "es", "eu", "fr", "gl", "it",
"hu", "nl", "pl", "pt", "pt_BR", "sl", "fi", "sv", "el", "bg", "ru",
"ko", "zh-hans", "zh-hant", "ja")
depends = []
for lang in langs:
pkgs = cmd.check_output('check-language-support'
+ ' --show-installed'
+ ' -l ' + lang,
shell=True)
pkgs = pkgs.decode('utf-8').strip().split(' ')
for pkg_name in pkgs:
pkg = cache[pkg_name]
if pkg_name not in depends \
and not pkg.current_ver \
and pkg.has_versions \
and pkg.version_list[0].downloadable:
depends.append(pkg_name)
pre_len = len(depends)
while True:
for dep in depends.copy():
check_depends(dep, depends)
if len(depends) != pre_len:
pre_len = len(depends)
else:
break
os.makedirs('/dell/debs')
os.chdir('/dell/debs')
cmd.check_output('apt-get download --allow-unauthenticated '
+ ' '.join(depends), shell=True)
|
fourdollars/dell-recovery
|
late/chroot_scripts/60-detect-no-recovery-patition.py
|
Python
|
gpl-2.0
| 3,106
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.