gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
"""
Modbus Utilities
-----------------
A collection of utilities for packing data, unpacking
data computing checksums, and decode checksums.
"""
from collections import Callable
import struct
# region Helpers
def default(value):
"""
Given a python object, return the default value
of that object.
:param value: The value to get the default of
:returns: The default value
"""
return type(value)()
def dict_property(store, index):
""" Helper to create class properties from a dictionary.
Basically this allows you to remove a lot of possible
boilerplate code.
:param store: The store store to pull from
:param index: The index into the store to close over
:returns: An initialized property set
"""
if isinstance(store, Callable):
getter = lambda self: store(self)[index]
setter = lambda self, value: store(self).__setitem__(index, value)
elif isinstance(store, str):
getter = lambda self: self.__getattribute__(store)[index]
setter = lambda self, value:\
self.__getattribute__(store).__setitem__(index, value)
else:
getter = lambda self: store[index]
setter = lambda self, value: store.__setitem__(index, value)
return property(getter, setter)
# endregion
# region Bit Packing Functions
def pack_bitstring(bits):
""" Creates a string out of an array of bits
:param bits: A bit array
example::
bits = [False, True, False, True]
result = pack_bitstring(bits)
"""
ret = b''
i = packed = 0
for bit in bits:
if bit:
packed += 128
i += 1
if i == 8:
ret += struct.pack('B', packed)
i = packed = 0
else:
packed >>= 1
if 0 < i < 8:
packed >>= (7 - i)
ret += struct.pack('B', packed)
return ret
def unpack_bitstring(string):
""" Creates bit array out of a string
:param string: The modbus data packet to decode
example::
bytes = 'bytes to decode'
result = unpack_bitstring(bytes)
"""
byte_count = len(string)
bits = []
for byte in range(byte_count):
value = string[byte]
for _ in range(8):
bits.append((value & 1) == 1)
value >>= 1
return bits
# endregion
# region Error Detection Functions
def __generate_crc16_table():
""" Generates a crc16 lookup table
.. note:: This will only be generated once
"""
result = []
for byte in range(256):
crc = 0x0000
for _ in range(8):
if (byte ^ crc) & 0x0001:
crc = (crc >> 1) ^ 0xa001
else:
crc >>= 1
byte >>= 1
result.append(crc)
return result
__crc16_table = __generate_crc16_table()
def compute_crc(data):
""" Computes a crc16 on the passed in string. For modbus,
this is only used on the binary serial protocols (in this
case RTU).
The difference between modbus's crc16 and a normal crc16
is that modbus starts the crc value out at 0xffff.
:param data: The data to create a crc16 of
:returns: The calculated CRC
"""
crc = 0xffff
for a in data:
idx = __crc16_table[(crc ^ a) & 0xff]
crc = ((crc >> 8) & 0xff) ^ idx
swapped = ((crc << 8) & 0xff00) | ((crc >> 8) & 0x00ff)
return swapped
def check_crc(data, check):
""" Checks if the data matches the passed in CRC
:param data: The data to create a crc16 of
:param check: The CRC to validate
:returns: True if matched, False otherwise
"""
return compute_crc(data) == check
def compute_lrc(data):
""" Used to compute the longitudinal redundancy check
against a string. This is only used on the serial ASCII
modbus protocol. A full description of this implementation
can be found in appendex B of the serial line modbus description.
:param data: The data to apply a lrc to
:returns: The calculated LRC
"""
lrc = sum(a for a in data) & 0xff
lrc = (lrc ^ 0xff) + 1
return lrc & 0xff
def check_lrc(data, check):
""" Checks if the passed in data matches the LRC
:param data: The data to calculate
:param check: The LRC to validate
:returns: True if matched, False otherwise
"""
return compute_lrc(data) == check
def rtu_frame_size(data, byte_count_pos):
""" Calculates the size of the frame based on the byte count.
:param data: The buffer containing the frame.
:param byte_count_pos: The index of the byte count in the buffer.
:returns: The size of the frame.
The structure of frames with a byte count field is always the
same:
- first, there are some header fields
- then the byte count field
- then as many data bytes as indicated by the byte count,
- finally the CRC (two bytes).
To calculate the frame size, it is therefore sufficient to extract
the contents of the byte count field, add the position of this
field, and finally increment the sum by three (one byte for the
byte count field, two for the CRC).
"""
return data[byte_count_pos] + byte_count_pos + 3
# endregion
# Exported symbols
__all__ = [
'pack_bitstring',
'unpack_bitstring',
'default',
'compute_crc',
'check_crc',
'compute_lrc',
'check_lrc',
'rtu_frame_size'
]
|
|
# -*- coding: utf-8 -*-
"""
click.parser
~~~~~~~~~~~~
This module started out as largely a copy paste from the stdlib's
optparse module with the features removed that we do not need from
optparse because we implement them in Click on a higher level (for
instance type handling, help formatting and a lot more).
The plan is to remove more and more from here over time.
The reason this is a different module and not optparse from the stdlib
is that there are differences in 2.x and 3.x about the error messages
generated and optparse in the stdlib uses gettext for no good reason
and might cause us issues.
"""
import re
from collections import deque
from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
BadArgumentUsage
def _unpack_args(args, nargs_spec):
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
"""
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError('Cannot have two nargs < 0')
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1:] = reversed(rv[spos + 1:])
return tuple(rv), list(args)
def _error_opt_args(nargs, opt):
if nargs == 1:
raise BadOptionUsage(opt, '%s option requires an argument' % opt)
raise BadOptionUsage(opt, '%s option requires %d arguments' % (opt, nargs))
def split_opt(opt):
first = opt[:1]
if first.isalnum():
return '', opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt, ctx):
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return prefix + ctx.token_normalize_func(opt)
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts."""
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv
class Option(object):
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError('Invalid start character for option (%s)'
% opt)
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = 'store'
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ('store', 'append')
def process(self, value, state):
if self.action == 'store':
state.opts[self.dest] = value
elif self.action == 'store_const':
state.opts[self.dest] = self.const
elif self.action == 'append':
state.opts.setdefault(self.dest, []).append(value)
elif self.action == 'append_const':
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == 'count':
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError('unknown action %r' % self.action)
state.order.append(self.obj)
class Argument(object):
def __init__(self, dest, nargs=1, obj=None):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(self, value, state):
if self.nargs > 1:
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage('argument %s takes %d values'
% (self.dest, self.nargs))
state.opts[self.dest] = value
state.order.append(self.obj)
class ParsingState(object):
def __init__(self, rargs):
self.opts = {}
self.largs = []
self.rargs = rargs
self.order = []
class OptionParser(object):
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = set(['-', '--'])
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None,
obj=None):
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``appnd_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs,
const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(state.largs + state.rargs,
[x.nargs for x in self._args])
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == '--':
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
possibilities = [word for word in self._long_opt
if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif explicit_value is not None:
raise BadOptionUsage(opt, '%s option does not take a value' % opt)
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(prefix + ch, self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(prefix + ''.join(unknown_options))
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if '=' in arg:
long_opt, explicit_value = arg.split('=', 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
|
|
#!/usr/bin/env python
"""
CSV Utility Module
"""
import csv
import re
__version__ = '0.0.0'
#=============================================================================
class reader( csv.reader ):
"""
Implements similar functionality as the built-in CSV reader module, but
maps data to be contained in objects (rather than lists or dicts), and
also converts data to more useful types suitable for data analysis.
In this implementation, it is assumed that the first row of a CSV file
_always_ contains column names.
"""
#=========================================================================
def __init__( self, csvfile, dialect = 'excel', **fmtparams ):
"""
Initializes a reader object.
"""
# get a reference to the parent object instance
self._parent = super( reader, self )
# invoke parent constructor
self._parent.__init__( csvfile, dialect, **fmtparams )
# load column name list
self._columns = []
columns = self._parent.next()
for column in columns:
self._columns.append( wordify( column ) )
# create an initial record to lazily pass data back to the user
self.record = record( self._columns )
#=========================================================================
def keys( self ):
"""
Returns the list of column names.
"""
return self._columns
#=========================================================================
def next( self ):
"""
Override the next() method to return objects rather than lists.
"""
# get the next row out of the CSV file
row = self._parent.next()
# update the internal record object
self.record.load( row )
# return the record instance
return self.record
#=============================================================================
class record( object ):
"""
Manage a record of CSV data (one per row).
"""
#=========================================================================
def __init__( self, columns, values = None ):
"""
Initializes a record object.
@param columns List of column names (keys) for use as attribute names
Note: It is assumed that each string is safe for use
as an attribute name.
@param values Optional initial list of data values for this record
"""
# make a copy of the columns list
self._columns = list( columns )
# create attributes for each column
for index in range( len( self._columns ) ):
# load data into object
setattr( self, self._columns[ index ], None )
# see if any data was specified
if values is not None:
# load the values into the object's state
self.load( values )
#=========================================================================
def __getitem__( self, index ):
"""
Retrieves a value from numeric index using list notation.
@param index The numeric index for which to fetch a requested value
@return The requested value at the given index
"""
# return the attribute at the given numeric index
return getattr( self, self._columns[ index ] )
#=========================================================================
def __iter__( self ):
"""
Return an iterable copy of the data to support the iterator protocol.
@return A list of values from the object's state
"""
return self.values()
#=========================================================================
def __len__( self ):
"""
Return length of record to support the sequence protocol.
@return The number of values in this record
"""
return len( self._columns )
#=========================================================================
def keys( self ):
"""
Returns the list of attribute names.
"""
return self._columns
#=========================================================================
def load( self, values ):
"""
Loads values into the object.
@param values List of data values (strings) to load into the object
"""
# count number of columns
num_columns = len( self._columns )
# count the number of passed values
num_values = len( values )
# even it out, if necessary
if num_values < num_columns:
values.extend( [ None ] * ( num_columns - num_values ) )
# load data into each attribute
for index in range( num_columns ):
# load data into object
setattr(
self,
self._columns[ index ],
type_convert( values[ index ] )
)
#=========================================================================
def values( self ):
"""
Constructs a list of values in the object.
@return A list of values from the object's state
"""
# count expected number of values
num_columns = len( self._columns )
# always return enough data for a full record
values = [ None ] * num_columns
# load data from object into list
for index in range( num_columns ):
values[ index ] = self.__getitem__( index )
# return the list of values in this record
return values
#=============================================================================
def type_convert( value ):
"""
Performs pattern-style type conversion for CSV-originating data.
"""
# looks like a normal integer
if re.match( r'^-?\d+$', value ) is not None:
return int( value )
# looks like an integer in hexadecimal notation
elif re.match( r'^0(x|X)[a-fA-F0-9]+$', value ) is not None:
return int( value, 16 )
# looks like a fractional number
elif re.match(
r'^-?((\d+\.\d*)|(\d*\.\d+))((e|E)-?\d+)?$',
value
) is not None:
return float( value )
# do not attempt type conversion
return value
#=============================================================================
def wordify( string ):
"""
Attempts to check/convert any string into a word suitable for use in
a programming language.
"""
# trim the string
string = string.trim()
# check for internal whitespace
string = re.sub( r'[ \t\r\n]+', '_', string )
# sanitize for allowed characters
string = re.sub( r'[^a-zA-Z0-9_]', '', string )
# make sure string begins with a valid alphabetic character
string = re.sub( r'^\d+', '', string )
# return the wordified string
return string
#=============================================================================
def run_tests():
"""
Execute built-in unit tests.
"""
import cStringIO
example = """a,b_2,c-3,d 4,e
1,2,3,4,5
5,4,3,2,1
-5,3.14,1e6,1.2e-2,0x15
hello,world,"other, stuff",4th column,fifth column"""
csv_file_handle = cStringIO( example )
reader = reader( csv_file_handle )
print reader.keys()
for rec in reader:
print rec.values()
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv Arguments passed to the script
@return Exit code (0 = success)
"""
# imports when using this as a script
import argparse
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'CSV Utility Module',
add_help = False
)
parser.add_argument(
'-h',
'--help',
default = False,
help = 'Display this help message and exit.',
action = 'help'
)
parser.add_argument(
'-v',
'--version',
default = False,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# execute built-in unit tests
return run_tests()
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(idx, case, sort, method):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case, sort=sort)
@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list])
def test_intersection_base(idx, sort, klass):
first = idx[2::-1] # first 3 elements reversed
second = idx[:5]
if klass is not MultiIndex:
second = klass(second.values)
intersect = first.intersection(second, sort=sort)
if sort is None:
expected = first.sort_values()
else:
expected = first
tm.assert_index_equal(intersect, expected)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3], sort=sort)
@pytest.mark.arm_slow
@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list])
def test_union_base(idx, sort, klass):
first = idx[::-1]
second = idx[:5]
if klass is not MultiIndex:
second = klass(second.values)
union = first.union(second, sort=sort)
if sort is None:
expected = first.sort_values()
else:
expected = first
tm.assert_index_equal(union, expected)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3], sort=sort)
def test_difference_base(idx, sort):
second = idx[4:]
answer = idx[:4]
result = idx.difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
assert result.equals(answer)
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = idx.difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
idx.difference([1, 2, 3], sort=sort)
def test_symmetric_difference(idx, sort):
first = idx[1:]
second = idx[:-1]
answer = idx[[-1, 0]]
result = first.symmetric_difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3], sort=sort)
def test_multiindex_symmetric_difference():
# GH 13490
idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=["a", "b"])
with tm.assert_produces_warning(FutureWarning):
result = idx ^ idx
assert result.names == idx.names
idx2 = idx.copy().rename(["A", "B"])
with tm.assert_produces_warning(FutureWarning):
result = idx ^ idx2
assert result.names == [None, None]
def test_empty(idx):
# GH 15270
assert not idx.empty
assert idx[:0].empty
def test_difference(idx, sort):
first = idx
result = first.difference(idx[-3:], sort=sort)
vals = idx[:-3].values
if sort is None:
vals = sorted(vals)
expected = MultiIndex.from_tuples(vals, sortorder=0, names=idx.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == idx.names
tm.assert_index_equal(result, expected)
# empty difference: reflexive
result = idx.difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: superset
result = idx[-3:].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: degenerate
result = idx[:0].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# names not the same
chunklet = idx[-3:]
chunklet.names = ["foo", "baz"]
result = first.difference(chunklet, sort=sort)
assert result.names == (None, None)
# empty, but non-equal
result = idx.difference(idx.sortlevel(1)[0], sort=sort)
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values, sort=sort)
assert result.equals(first[:0])
# name from empty array
result = first.difference([], sort=sort)
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([("foo", "one")], sort=sort)
expected = pd.MultiIndex.from_tuples(
[("bar", "one"), ("baz", "two"), ("foo", "two"), ("qux", "one"), ("qux", "two")]
)
expected.names = first.names
assert first.names == result.names
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3, 4, 5], sort=sort)
def test_difference_sort_special():
# GH-24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
# sort=None, the default
result = idx.difference([])
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_difference_sort_special_true():
# TODO decide on True behaviour
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
result = idx.difference([], sort=True)
expected = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_difference_sort_incomparable():
# GH-24959
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = pd.MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
# sort=None, the default
# MultiIndex.difference deviates here from other difference
# implementations in not catching the TypeError
msg = "'<' not supported between instances of 'Timestamp' and 'int'"
with pytest.raises(TypeError, match=msg):
result = idx.difference(other)
# sort=False
result = idx.difference(other, sort=False)
tm.assert_index_equal(result, idx)
def test_difference_sort_incomparable_true():
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = pd.MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
msg = "The 'sort' keyword only takes the values of None or False; True was passed."
with pytest.raises(ValueError, match=msg):
idx.difference(other, sort=True)
def test_union(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_union = piece1.union(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_union, idx.sort_values())
assert tm.equalContents(the_union, idx)
# corner case, pass self or empty thing:
the_union = idx.union(idx, sort=sort)
tm.assert_index_equal(the_union, idx)
the_union = idx.union(idx[:0], sort=sort)
tm.assert_index_equal(the_union, idx)
# FIXME: dont leave commented-out
# won't work in python 3
# tuples = _index.values
# result = _index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(idx)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = _index.union(other)
# assert result.equals(result2)
def test_intersection(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_int = piece1.intersection(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_int, idx[3:5])
assert tm.equalContents(the_int, idx[3:5])
# corner case, pass self
the_int = idx.intersection(idx, sort=sort)
tm.assert_index_equal(the_int, idx)
# empty intersection: disjoint
empty = idx[:2].intersection(idx[2:], sort=sort)
expected = idx[:0]
assert empty.equals(expected)
# FIXME: dont leave commented-out
# can't do in python 3
# tuples = _index.values
# result = _index & tuples
# assert result.equals(tuples)
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_setop_with_categorical(idx, sort, method):
other = idx.to_flat_index().astype("category")
res_names = [None] * idx.nlevels
result = getattr(idx, method)(other, sort=sort)
expected = getattr(idx, method)(idx, sort=sort).rename(res_names)
tm.assert_index_equal(result, expected)
result = getattr(idx, method)(other[:5], sort=sort)
expected = getattr(idx, method)(idx[:5], sort=sort).rename(res_names)
tm.assert_index_equal(result, expected)
def test_intersection_non_object(idx, sort):
other = Index(range(3), name="foo")
result = idx.intersection(other, sort=sort)
expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=None)
tm.assert_index_equal(result, expected, exact=True)
# if we pass a length-0 ndarray (i.e. no name, we retain our idx.name)
result = idx.intersection(np.asarray(other)[:0], sort=sort)
expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=idx.names)
tm.assert_index_equal(result, expected, exact=True)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
# With non-zero length non-index, we try and fail to convert to tuples
idx.intersection(np.asarray(other), sort=sort)
def test_intersect_equal_sort():
# GH-24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_intersect_equal_sort_true():
# TODO decide on True behaviour
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
sorted_ = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_other_empty(slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
# MultiIndex does not special case empty.union(idx)
# tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_empty_sort(slice_):
# TODO decide on True behaviour
# # sort=True
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
other = idx[:0]
result = idx.union(other, sort=True)
expected = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_union_sort_other_incomparable():
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
# default, sort=None
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1])
tm.assert_index_equal(result, idx)
# sort=False
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_incomparable_sort():
# TODO decide on True behaviour
# # sort=True
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
with pytest.raises(TypeError, match="Cannot compare"):
idx.union(idx[:1], sort=True)
def test_union_non_object_dtype_raises():
# GH#32646 raise NotImplementedError instead of less-informative error
mi = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
idx = mi.levels[1]
msg = "Can only union MultiIndex with MultiIndex or Index of tuples"
with pytest.raises(NotImplementedError, match=msg):
mi.union(idx)
def test_union_empty_self_different_names():
# GH#38423
mi = MultiIndex.from_arrays([[]])
mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"])
result = mi.union(mi2)
expected = MultiIndex.from_arrays([[1, 2], [3, 4]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"method", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_setops_disallow_true(method):
idx1 = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
idx2 = pd.MultiIndex.from_product([["b", "c"], [1, 2]])
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
@pytest.mark.parametrize(
("tuples", "exp_tuples"),
[
([("val1", "test1")], [("val1", "test1")]),
([("val1", "test1"), ("val1", "test1")], [("val1", "test1")]),
(
[("val2", "test2"), ("val1", "test1")],
[("val2", "test2"), ("val1", "test1")],
),
],
)
def test_intersect_with_duplicates(tuples, exp_tuples):
# GH#36915
left = MultiIndex.from_tuples(tuples, names=["first", "second"])
right = MultiIndex.from_tuples(
[("val1", "test1"), ("val1", "test1"), ("val2", "test2")],
names=["first", "second"],
)
result = left.intersection(right)
expected = MultiIndex.from_tuples(exp_tuples, names=["first", "second"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"data, names, expected",
[
((1,), None, [None, None]),
((1,), ["a"], [None, None]),
((1,), ["b"], [None, None]),
((1, 2), ["c", "d"], [None, None]),
((1, 2), ["b", "a"], [None, None]),
((1, 2, 3), ["a", "b", "c"], [None, None]),
((1, 2), ["a", "c"], ["a", None]),
((1, 2), ["c", "b"], [None, "b"]),
((1, 2), ["a", "b"], ["a", "b"]),
((1, 2), [None, "b"], [None, "b"]),
],
)
def test_maybe_match_names(data, names, expected):
# GH#38323
mi = pd.MultiIndex.from_tuples([], names=["a", "b"])
mi2 = pd.MultiIndex.from_tuples([data], names=names)
result = mi._maybe_match_names(mi2)
assert result == expected
def test_intersection_equal_different_names():
# GH#30302
mi1 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["c", "b"])
mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"])
result = mi1.intersection(mi2)
expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=[None, "b"])
tm.assert_index_equal(result, expected)
def test_intersection_different_names():
# GH#38323
mi = MultiIndex.from_arrays([[1], [3]], names=["c", "b"])
mi2 = MultiIndex.from_arrays([[1], [3]])
result = mi.intersection(mi2)
tm.assert_index_equal(result, mi2)
|
|
""" command line options, ini-file and conftest.py processing. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import inspect
import os
import shlex
import sys
import types
import warnings
from distutils.version import LooseVersion
import py
import six
from pluggy import HookimplMarker
from pluggy import HookspecMarker
from pluggy import PluginManager
import _pytest._code
import _pytest.assertion
import _pytest.hookspec # the extension point definitions
from .exceptions import PrintHelp
from .exceptions import UsageError
from .findpaths import determine_setup
from .findpaths import exists
from _pytest._code import ExceptionInfo
from _pytest._code import filter_traceback
from _pytest.compat import lru_cache
from _pytest.compat import safe_str
from _pytest.outcomes import Skipped
hookimpl = HookimplMarker("pytest")
hookspec = HookspecMarker("pytest")
class ConftestImportFailure(Exception):
def __init__(self, path, excinfo):
Exception.__init__(self, path, excinfo)
self.path = path
self.excinfo = excinfo
def main(args=None, plugins=None):
""" return exit code, after performing an in-process test run.
:arg args: list of command line arguments.
:arg plugins: list of plugin objects to be auto-registered during
initialization.
"""
from _pytest.main import EXIT_USAGEERROR
try:
try:
config = _prepareconfig(args, plugins)
except ConftestImportFailure as e:
exc_info = ExceptionInfo(e.excinfo)
tw = py.io.TerminalWriter(sys.stderr)
tw.line(
"ImportError while loading conftest '{e.path}'.".format(e=e), red=True
)
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short", chain=False)
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = safe_str(exc_repr)
for line in formatted_tb.splitlines():
tw.line(line.rstrip(), red=True)
return 4
else:
try:
return config.hook.pytest_cmdline_main(config=config)
finally:
config._ensure_unconfigure()
except UsageError as e:
tw = py.io.TerminalWriter(sys.stderr)
for msg in e.args:
tw.line("ERROR: {}\n".format(msg), red=True)
return EXIT_USAGEERROR
class cmdline(object): # compatibility namespace
main = staticmethod(main)
def filename_arg(path, optname):
""" Argparse type validator for filename arguments.
:path: path of filename
:optname: name of the option
"""
if os.path.isdir(path):
raise UsageError("{} must be a filename, given: {}".format(optname, path))
return path
def directory_arg(path, optname):
"""Argparse type validator for directory arguments.
:path: path of directory
:optname: name of the option
"""
if not os.path.isdir(path):
raise UsageError("{} must be a directory, given: {}".format(optname, path))
return path
default_plugins = (
"mark",
"main",
"terminal",
"runner",
"python",
"fixtures",
"debugging",
"unittest",
"capture",
"skipping",
"tmpdir",
"monkeypatch",
"recwarn",
"pastebin",
"helpconfig",
"nose",
"assertion",
"junitxml",
"resultlog",
"doctest",
"cacheprovider",
"freeze_support",
"setuponly",
"setupplan",
"stepwise",
"warnings",
"logging",
)
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")
def get_config():
# subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager()
config = Config(pluginmanager)
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return config
def get_plugin_manager():
"""
Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
already loaded.
This function can be used by integration with other tools, like hooking
into pytest to run tests into an IDE.
"""
return get_config().pluginmanager
def _prepareconfig(args=None, plugins=None):
warning = None
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args, posix=sys.platform != "win32")
from _pytest import deprecated
warning = deprecated.MAIN_STR_ARGS
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, six.string_types):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
if warning:
from _pytest.warnings import _issue_config_warning
_issue_config_warning(warning, config=config)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args
)
except BaseException:
config._ensure_unconfigure()
raise
class PytestPluginManager(PluginManager):
"""
Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific
functionality:
* loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and
``pytest_plugins`` global variables found in plugins being loaded;
* ``conftest.py`` loading during start-up;
"""
def __init__(self):
super(PytestPluginManager, self).__init__("pytest")
self._conftest_plugins = set()
# state related to local conftest plugins
self._dirpath2confmods = {}
self._conftestpath2mod = {}
self._confcutdir = None
self._noconftest = False
self._duplicatepaths = set()
self.add_hookspecs(_pytest.hookspec)
self.register(self)
if os.environ.get("PYTEST_DEBUG"):
err = sys.stderr
encoding = getattr(err, "encoding", "utf8")
try:
err = py.io.dupfile(err, encoding=encoding)
except Exception:
pass
self.trace.root.setwriter(err.write)
self.enable_tracing()
# Config._consider_importhook will set a real object if required.
self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
# Used to know when we are importing conftests after the pytest_configure stage
self._configured = False
def addhooks(self, module_or_class):
"""
.. deprecated:: 2.8
Use :py:meth:`pluggy.PluginManager.add_hookspecs <PluginManager.add_hookspecs>`
instead.
"""
warning = dict(
code="I2",
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
nodeid=None,
message="use pluginmanager.add_hookspecs instead of "
"deprecated addhooks() method.",
)
self._warn(warning)
return self.add_hookspecs(module_or_class)
def parse_hookimpl_opts(self, plugin, name):
# pytest hooks are always prefixed with pytest_
# so we avoid accessing possibly non-readable attributes
# (see issue #1073)
if not name.startswith("pytest_"):
return
# ignore some historic special names which can not be hooks anyway
if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
return
method = getattr(plugin, name)
opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
# consider only actual functions for hooks (#3775)
if not inspect.isroutine(method):
return
# collect unmarked hooks as long as they have the `pytest_' prefix
if opts is None and name.startswith("pytest_"):
opts = {}
if opts is not None:
for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
opts.setdefault(name, hasattr(method, name))
return opts
def parse_hookspec_opts(self, module_or_class, name):
opts = super(PytestPluginManager, self).parse_hookspec_opts(
module_or_class, name
)
if opts is None:
method = getattr(module_or_class, name)
if name.startswith("pytest_"):
opts = {
"firstresult": hasattr(method, "firstresult"),
"historic": hasattr(method, "historic"),
}
return opts
def register(self, plugin, name=None):
if name in ["pytest_catchlog", "pytest_capturelog"]:
self._warn(
"{} plugin has been merged into the core, "
"please remove it from your requirements.".format(
name.replace("_", "-")
)
)
return
ret = super(PytestPluginManager, self).register(plugin, name)
if ret:
self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self)
)
if isinstance(plugin, types.ModuleType):
self.consider_module(plugin)
return ret
def getplugin(self, name):
# support deprecated naming because plugins (xdist e.g.) use it
return self.get_plugin(name)
def hasplugin(self, name):
"""Return True if the plugin with the given name is registered."""
return bool(self.get_plugin(name))
def pytest_configure(self, config):
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
# we should remove tryfirst/trylast as markers
config.addinivalue_line(
"markers",
"tryfirst: mark a hook implementation function such that the "
"plugin machinery will try to call it first/as early as possible.",
)
config.addinivalue_line(
"markers",
"trylast: mark a hook implementation function such that the "
"plugin machinery will try to call it last/as late as possible.",
)
self._configured = True
def _warn(self, message):
kwargs = (
message
if isinstance(message, dict)
else {"code": "I1", "message": message, "fslocation": None, "nodeid": None}
)
self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
#
# internal API for local conftest plugin handling
#
def _set_initial_conftests(self, namespace):
""" load initial conftest files given a preparsed "namespace".
As conftest files may add their own command line options
which have arguments ('--my-opt somepath') we might get some
false positives. All builtin and 3rd party plugins will have
been loaded, however, so common options will not confuse our logic
here.
"""
current = py.path.local()
self._confcutdir = (
current.join(namespace.confcutdir, abs=True)
if namespace.confcutdir
else None
)
self._noconftest = namespace.noconftest
self._using_pyargs = namespace.pyargs
testpaths = namespace.file_or_dir
foundanchor = False
for path in testpaths:
path = str(path)
# remove node-id syntax
i = path.find("::")
if i != -1:
path = path[:i]
anchor = current.join(path, abs=1)
if exists(anchor): # we found some file object
self._try_load_conftest(anchor)
foundanchor = True
if not foundanchor:
self._try_load_conftest(current)
def _try_load_conftest(self, anchor):
self._getconftestmodules(anchor)
# let's also consider test* subdirs
if anchor.check(dir=1):
for x in anchor.listdir("test*"):
if x.check(dir=1):
self._getconftestmodules(x)
@lru_cache(maxsize=128)
def _getconftestmodules(self, path):
if self._noconftest:
return []
if path.isfile():
directory = path.dirpath()
else:
directory = path
if six.PY2: # py2 is not using lru_cache.
try:
return self._dirpath2confmods[directory]
except KeyError:
pass
# XXX these days we may rather want to use config.rootdir
# and allow users to opt into looking into the rootdir parent
# directories instead of requiring to specify confcutdir
clist = []
for parent in directory.realpath().parts():
if self._confcutdir and self._confcutdir.relto(parent):
continue
conftestpath = parent.join("conftest.py")
if conftestpath.isfile():
mod = self._importconftest(conftestpath)
clist.append(mod)
self._dirpath2confmods[directory] = clist
return clist
def _rget_with_confmod(self, name, path):
modules = self._getconftestmodules(path)
for mod in reversed(modules):
try:
return mod, getattr(mod, name)
except AttributeError:
continue
raise KeyError(name)
def _importconftest(self, conftestpath):
try:
return self._conftestpath2mod[conftestpath]
except KeyError:
pkgpath = conftestpath.pypkgpath()
if pkgpath is None:
_ensure_removed_sysmodule(conftestpath.purebasename)
try:
mod = conftestpath.pyimport()
if (
hasattr(mod, "pytest_plugins")
and self._configured
and not self._using_pyargs
):
from _pytest.deprecated import (
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
)
warnings.warn_explicit(
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST,
category=None,
filename=str(conftestpath),
lineno=0,
)
except Exception:
raise ConftestImportFailure(conftestpath, sys.exc_info())
self._conftest_plugins.add(mod)
self._conftestpath2mod[conftestpath] = mod
dirpath = conftestpath.dirpath()
if dirpath in self._dirpath2confmods:
for path, mods in self._dirpath2confmods.items():
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
self.trace("loaded conftestmodule %r" % (mod))
self.consider_conftest(mod)
return mod
#
# API for bootstrapping plugin loading
#
#
def consider_preparse(self, args):
for opt1, opt2 in zip(args, args[1:]):
if opt1 == "-p":
self.consider_pluginarg(opt2)
def consider_pluginarg(self, arg):
if arg.startswith("no:"):
name = arg[3:]
# PR #4304 : remove stepwise if cacheprovider is blocked
if name == "cacheprovider":
self.set_blocked("stepwise")
self.set_blocked("pytest_stepwise")
self.set_blocked(name)
if not name.startswith("pytest_"):
self.set_blocked("pytest_" + name)
else:
self.import_plugin(arg)
def consider_conftest(self, conftestmodule):
self.register(conftestmodule, name=conftestmodule.__file__)
def consider_env(self):
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
def consider_module(self, mod):
self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
def _import_plugin_specs(self, spec):
plugins = _get_plugin_specs_as_list(spec)
for import_spec in plugins:
self.import_plugin(import_spec)
def import_plugin(self, modname):
# most often modname refers to builtin modules, e.g. "pytester",
# "terminal" or "capture". Those plugins are registered under their
# basename for historic purposes but must be imported with the
# _pytest prefix.
assert isinstance(modname, (six.text_type, str)), (
"module name as text required, got %r" % modname
)
modname = str(modname)
if self.is_blocked(modname) or self.get_plugin(modname) is not None:
return
if modname in builtin_plugins:
importspec = "_pytest." + modname
else:
importspec = modname
self.rewrite_hook.mark_rewrite(importspec)
try:
__import__(importspec)
except ImportError as e:
new_exc_type = ImportError
new_exc_message = 'Error importing plugin "%s": %s' % (
modname,
safe_str(e.args[0]),
)
new_exc = new_exc_type(new_exc_message)
six.reraise(new_exc_type, new_exc, sys.exc_info()[2])
except Skipped as e:
self._warn("skipped plugin %r: %s" % ((modname, e.msg)))
else:
mod = sys.modules[importspec]
self.register(mod, modname)
def _get_plugin_specs_as_list(specs):
"""
Parses a list of "plugin specs" and returns a list of plugin names.
Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in
which case it is returned as a list. Specs can also be `None` in which case an
empty list is returned.
"""
if specs is not None:
if isinstance(specs, str):
specs = specs.split(",") if specs else []
if not isinstance(specs, (list, tuple)):
raise UsageError(
"Plugin specs must be a ','-separated string or a "
"list/tuple of strings for plugin names. Given: %r" % specs
)
return list(specs)
return []
def _ensure_removed_sysmodule(modname):
try:
del sys.modules[modname]
except KeyError:
pass
class Notset(object):
def __repr__(self):
return "<NOTSET>"
notset = Notset()
def _iter_rewritable_modules(package_files):
for fn in package_files:
is_simple_module = "/" not in fn and fn.endswith(".py")
is_package = fn.count("/") == 1 and fn.endswith("__init__.py")
if is_simple_module:
module_name, _ = os.path.splitext(fn)
yield module_name
elif is_package:
package_name = os.path.dirname(fn)
yield package_name
class Config(object):
""" access to configuration values, pluginmanager and plugin hooks. """
def __init__(self, pluginmanager):
#: access to command line option as attributes.
#: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
self.option = argparse.Namespace()
from .argparsing import Parser, FILE_OR_DIR
_a = FILE_OR_DIR
self._parser = Parser(
usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
processopt=self._processopt,
)
#: a pluginmanager instance
self.pluginmanager = pluginmanager
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
self._inicache = {}
self._override_ini = ()
self._opt2dest = {}
self._cleanup = []
self._warn = self.pluginmanager._warn
self.pluginmanager.register(self, "pytestconfig")
self._configured = False
def do_setns(dic):
import pytest
setns(pytest, dic)
self.hook.pytest_namespace.call_historic(do_setns, {})
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
def add_cleanup(self, func):
""" Add a function to be called when the config object gets out of
use (usually coninciding with pytest_unconfigure)."""
self._cleanup.append(func)
def _do_configure(self):
assert not self._configured
self._configured = True
self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
def _ensure_unconfigure(self):
if self._configured:
self._configured = False
self.hook.pytest_unconfigure(config=self)
self.hook.pytest_configure._call_history = []
while self._cleanup:
fin = self._cleanup.pop()
fin()
def warn(self, code, message, fslocation=None, nodeid=None):
"""
.. deprecated:: 3.8
Use :py:func:`warnings.warn` or :py:func:`warnings.warn_explicit` directly instead.
Generate a warning for this test session.
"""
from _pytest.warning_types import RemovedInPytest4Warning
if isinstance(fslocation, (tuple, list)) and len(fslocation) > 2:
filename, lineno = fslocation[:2]
else:
filename = "unknown file"
lineno = 0
msg = "config.warn has been deprecated, use warnings.warn instead"
if nodeid:
msg = "{}: {}".format(nodeid, msg)
warnings.warn_explicit(
RemovedInPytest4Warning(msg),
category=None,
filename=filename,
lineno=lineno,
)
self.hook.pytest_logwarning.call_historic(
kwargs=dict(
code=code, message=message, fslocation=fslocation, nodeid=nodeid
)
)
def get_terminal_writer(self):
return self.pluginmanager.get_plugin("terminalreporter")._tw
def pytest_cmdline_parse(self, pluginmanager, args):
# REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
self.parse(args)
return self
def notify_exception(self, excinfo, option=None):
if option and option.fulltrace:
style = "long"
else:
style = "native"
excrepr = excinfo.getrepr(
funcargs=True, showlocals=getattr(option, "showlocals", False), style=style
)
res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
if not any(res):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" % line)
sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid):
# nodeid's are relative to the rootpath, compute relative to cwd
if self.invocation_dir != self.rootdir:
fullpath = self.rootdir.join(nodeid)
nodeid = self.invocation_dir.bestrelpath(fullpath)
return nodeid
@classmethod
def fromdictargs(cls, option_dict, args):
""" constructor useable for subprocesses. """
config = get_config()
config.option.__dict__.update(option_dict)
config.parse(args, addopts=False)
for x in config.option.plugins:
config.pluginmanager.consider_pluginarg(x)
return config
def _processopt(self, opt):
for name in opt._short_opts + opt._long_opts:
self._opt2dest[name] = opt.dest
if hasattr(opt, "default") and opt.dest:
if not hasattr(self.option, opt.dest):
setattr(self.option, opt.dest, opt.default)
@hookimpl(trylast=True)
def pytest_load_initial_conftests(self, early_config):
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
def _initini(self, args):
ns, unknown_args = self._parser.parse_known_and_unknown_args(
args, namespace=copy.copy(self.option)
)
r = determine_setup(
ns.inifilename,
ns.file_or_dir + unknown_args,
rootdir_cmd_arg=ns.rootdir or None,
config=self,
)
self.rootdir, self.inifile, self.inicfg = r
self._parser.extra_info["rootdir"] = self.rootdir
self._parser.extra_info["inifile"] = self.inifile
self.invocation_dir = py.path.local()
self._parser.addini("addopts", "extra command line options", "args")
self._parser.addini("minversion", "minimally required pytest version")
self._override_ini = ns.override_ini or ()
def _consider_importhook(self, args):
"""Install the PEP 302 import hook if using assertion rewriting.
Needs to parse the --assert=<mode> option from the commandline
and find all the installed plugins to mark them for rewriting
by the importhook.
"""
ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
mode = ns.assertmode
if mode == "rewrite":
try:
hook = _pytest.assertion.install_importhook(self)
except SystemError:
mode = "plain"
else:
self._mark_plugins_for_rewrite(hook)
_warn_about_missing_assertion(mode)
def _mark_plugins_for_rewrite(self, hook):
"""
Given an importhook, mark for rewrite any top-level
modules or packages in the distribution package for
all pytest plugins.
"""
import pkg_resources
self.pluginmanager.rewrite_hook = hook
if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
# We don't autoload from setuptools entry points, no need to continue.
return
# 'RECORD' available for plugins installed normally (pip install)
# 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)
# for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa
# so it shouldn't be an issue
metadata_files = "RECORD", "SOURCES.txt"
package_files = (
entry.split(",")[0]
for entrypoint in pkg_resources.iter_entry_points("pytest11")
for metadata in metadata_files
for entry in entrypoint.dist._get_metadata(metadata)
)
for name in _iter_rewritable_modules(package_files):
hook.mark_rewrite(name)
def _preparse(self, args, addopts=True):
if addopts:
args[:] = shlex.split(os.environ.get("PYTEST_ADDOPTS", "")) + args
self._initini(args)
if addopts:
args[:] = self.getini("addopts") + args
self._checkversion()
self._consider_importhook(args)
self.pluginmanager.consider_preparse(args)
if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
# Don't autoload from setuptools entry point. Only explicitly specified
# plugins are going to be loaded.
self.pluginmanager.load_setuptools_entrypoints("pytest11")
self.pluginmanager.consider_env()
self.known_args_namespace = ns = self._parser.parse_known_args(
args, namespace=copy.copy(self.option)
)
if self.known_args_namespace.confcutdir is None and self.inifile:
confcutdir = py.path.local(self.inifile).dirname
self.known_args_namespace.confcutdir = confcutdir
try:
self.hook.pytest_load_initial_conftests(
early_config=self, args=args, parser=self._parser
)
except ConftestImportFailure:
e = sys.exc_info()[1]
if ns.help or ns.version:
# we don't want to prevent --help/--version to work
# so just let is pass and print a warning at the end
self._warn("could not load initial conftests (%s)\n" % e.path)
else:
raise
def _checkversion(self):
import pytest
minver = self.inicfg.get("minversion", None)
if minver:
if LooseVersion(minver) > LooseVersion(pytest.__version__):
raise pytest.UsageError(
"%s:%d: requires pytest-%s, actual pytest-%s'"
% (
self.inicfg.config.path,
self.inicfg.lineof("minversion"),
minver,
pytest.__version__,
)
)
def parse(self, args, addopts=True):
# parse given cmdline arguments into this config object.
assert not hasattr(
self, "args"
), "can only parse cmdline args at most once per Config object"
self._origargs = args
self.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=self.pluginmanager)
)
self._preparse(args, addopts=addopts)
# XXX deprecated hook:
self.hook.pytest_cmdline_preparse(config=self, args=args)
self._parser.after_preparse = True
try:
args = self._parser.parse_setoption(
args, self.option, namespace=self.option
)
if not args:
if self.invocation_dir == self.rootdir:
args = self.getini("testpaths")
if not args:
args = [str(self.invocation_dir)]
self.args = args
except PrintHelp:
pass
def addinivalue_line(self, name, line):
""" add a line to an ini-file option. The option must have been
declared but might not yet be set in which case the line becomes the
the first line in its value. """
x = self.getini(name)
assert isinstance(x, list)
x.append(line) # modifies the cached list inline
def getini(self, name):
""" return configuration value from an :ref:`ini file <inifiles>`. If the
specified name hasn't been registered through a prior
:py:func:`parser.addini <_pytest.config.Parser.addini>`
call (usually from a plugin), a ValueError is raised. """
try:
return self._inicache[name]
except KeyError:
self._inicache[name] = val = self._getini(name)
return val
def _getini(self, name):
try:
description, type, default = self._parser._inidict[name]
except KeyError:
raise ValueError("unknown configuration value: %r" % (name,))
value = self._get_override_ini_value(name)
if value is None:
try:
value = self.inicfg[name]
except KeyError:
if default is not None:
return default
if type is None:
return ""
return []
if type == "pathlist":
dp = py.path.local(self.inicfg.config.path).dirpath()
values = []
for relpath in shlex.split(value):
values.append(dp.join(relpath, abs=True))
return values
elif type == "args":
return shlex.split(value)
elif type == "linelist":
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
elif type == "bool":
return bool(_strtobool(value.strip()))
else:
assert type is None
return value
def _getconftest_pathlist(self, name, path):
try:
mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
except KeyError:
return None
modpath = py.path.local(mod.__file__).dirpath()
values = []
for relroot in relroots:
if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", py.path.local.sep)
relroot = modpath.join(relroot, abs=True)
values.append(relroot)
return values
def _get_override_ini_value(self, name):
value = None
# override_ini is a list of "ini=value" options
# always use the last item if multiple values are set for same ini-name,
# e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
for ini_config in self._override_ini:
try:
key, user_ini_value = ini_config.split("=", 1)
except ValueError:
raise UsageError("-o/--override-ini expects option=value style.")
else:
if key == name:
value = user_ini_value
return value
def getoption(self, name, default=notset, skip=False):
""" return command line option value.
:arg name: name of the option. You may also specify
the literal ``--OPT`` option instead of the "dest" option name.
:arg default: default value if no option of that name exists.
:arg skip: if True raise pytest.skip if option does not exists
or has a None value.
"""
name = self._opt2dest.get(name, name)
try:
val = getattr(self.option, name)
if val is None and skip:
raise AttributeError(name)
return val
except AttributeError:
if default is not notset:
return default
if skip:
import pytest
pytest.skip("no %r option found" % (name,))
raise ValueError("no option named %r" % (name,))
def getvalue(self, name, path=None):
""" (deprecated, use getoption()) """
return self.getoption(name)
def getvalueorskip(self, name, path=None):
""" (deprecated, use getoption(skip=True)) """
return self.getoption(name, skip=True)
def _assertion_supported():
try:
assert False
except AssertionError:
return True
else:
return False
def _warn_about_missing_assertion(mode):
if not _assertion_supported():
if mode == "plain":
sys.stderr.write(
"WARNING: ASSERTIONS ARE NOT EXECUTED"
" and FAILING TESTS WILL PASS. Are you"
" using python -O?"
)
else:
sys.stderr.write(
"WARNING: assertions not in test modules or"
" plugins will be ignored"
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n"
)
def setns(obj, dic):
import pytest
for name, value in dic.items():
if isinstance(value, dict):
mod = getattr(obj, name, None)
if mod is None:
modname = "pytest.%s" % name
mod = types.ModuleType(modname)
sys.modules[modname] = mod
mod.__all__ = []
setattr(obj, name, mod)
obj.__all__.append(name)
setns(mod, value)
else:
setattr(obj, name, value)
obj.__all__.append(name)
# if obj != pytest:
# pytest.__all__.append(name)
setattr(pytest, name, value)
def create_terminal_writer(config, *args, **kwargs):
"""Create a TerminalWriter instance configured according to the options
in the config object. Every code which requires a TerminalWriter object
and has access to a config object should use this function.
"""
tw = py.io.TerminalWriter(*args, **kwargs)
if config.option.color == "yes":
tw.hasmarkup = True
if config.option.color == "no":
tw.hasmarkup = False
return tw
def _strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
.. note:: copied from distutils.util
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
|
|
# Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""tests for specific behaviour of astroid nodes
"""
import os
import sys
import textwrap
import unittest
import warnings
import six
import astroid
from astroid import bases
from astroid import builder
from astroid import context as contextmod
from astroid import exceptions
from astroid import node_classes
from astroid import nodes
from astroid import parse
from astroid import util
from astroid import test_utils
from astroid import transforms
from astroid.tests import resources
abuilder = builder.AstroidBuilder()
BUILTINS = six.moves.builtins.__name__
class AsStringTest(resources.SysPathSetup, unittest.TestCase):
def test_tuple_as_string(self):
def build(string):
return abuilder.string_build(string).body[0].value
self.assertEqual(build('1,').as_string(), '(1, )')
self.assertEqual(build('1, 2, 3').as_string(), '(1, 2, 3)')
self.assertEqual(build('(1, )').as_string(), '(1, )')
self.assertEqual(build('1, 2, 3').as_string(), '(1, 2, 3)')
@test_utils.require_version(minver='3.0')
def test_func_signature_issue_185(self):
code = textwrap.dedent('''
def test(a, b, c=42, *, x=42, **kwargs):
print(a, b, c, args)
''')
node = parse(code)
self.assertEqual(node.as_string().strip(), code.strip())
def test_as_string_for_list_containing_uninferable(self):
node = builder.extract_node('''
def foo():
bar = [arg] * 1
''')
binop = node.body[0].value
inferred = next(binop.infer())
self.assertEqual(inferred.as_string(), '[Uninferable]')
self.assertEqual(binop.as_string(), '([arg]) * (1)')
def test_frozenset_as_string(self):
ast_nodes = builder.extract_node('''
frozenset((1, 2, 3)) #@
frozenset({1, 2, 3}) #@
frozenset([1, 2, 3,]) #@
frozenset(None) #@
frozenset(1) #@
''')
ast_nodes = [next(node.infer()) for node in ast_nodes]
self.assertEqual(ast_nodes[0].as_string(), 'frozenset((1, 2, 3))')
self.assertEqual(ast_nodes[1].as_string(), 'frozenset({1, 2, 3})')
self.assertEqual(ast_nodes[2].as_string(), 'frozenset([1, 2, 3])')
self.assertNotEqual(ast_nodes[3].as_string(), 'frozenset(None)')
self.assertNotEqual(ast_nodes[4].as_string(), 'frozenset(1)')
def test_varargs_kwargs_as_string(self):
ast = abuilder.string_build('raise_string(*args, **kwargs)').body[0]
self.assertEqual(ast.as_string(), 'raise_string(*args, **kwargs)')
def test_module_as_string(self):
"""check as_string on a whole module prepared to be returned identically
"""
module = resources.build_file('data/module.py', 'data.module')
with open(resources.find('data/module.py'), 'r') as fobj:
self.assertMultiLineEqual(module.as_string(), fobj.read())
def test_module2_as_string(self):
"""check as_string on a whole module prepared to be returned identically
"""
module2 = resources.build_file('data/module2.py', 'data.module2')
with open(resources.find('data/module2.py'), 'r') as fobj:
self.assertMultiLineEqual(module2.as_string(), fobj.read())
def test_as_string(self):
"""check as_string for python syntax >= 2.7"""
code = '''one_two = {1, 2}
b = {v: k for (k, v) in enumerate('string')}
cdd = {k for k in b}\n\n'''
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
@test_utils.require_version('3.0')
def test_3k_as_string(self):
"""check as_string for python 3k syntax"""
code = '''print()
def function(var):
nonlocal counter
try:
hello
except NameError as nexc:
(*hell, o) = b'hello'
raise AttributeError from nexc
\n'''
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
@test_utils.require_version('3.0')
@unittest.expectedFailure
def test_3k_annotations_and_metaclass(self):
code_annotations = textwrap.dedent('''
def function(var:int):
nonlocal counter
class Language(metaclass=Natural):
"""natural language"""
''')
ast = abuilder.string_build(code_annotations)
self.assertEqual(ast.as_string(), code_annotations)
def test_ellipsis(self):
ast = abuilder.string_build('a[...]').body[0]
self.assertEqual(ast.as_string(), 'a[...]')
def test_slices(self):
for code in ('a[0]', 'a[1:3]', 'a[:-1:step]', 'a[:,newaxis]',
'a[newaxis,:]', 'del L[::2]', 'del A[1]', 'del Br[:]'):
ast = abuilder.string_build(code).body[0]
self.assertEqual(ast.as_string(), code)
def test_slice_and_subscripts(self):
code = """a[:1] = bord[2:]
a[:1] = bord[2:]
del bree[3:d]
bord[2:]
del av[d::f], a[df:]
a[:1] = bord[2:]
del SRC[::1,newaxis,1:]
tous[vals] = 1010
del thousand[key]
del a[::2], a[:-1:step]
del Fee.form[left:]
aout.vals = miles.of_stuff
del (ccok, (name.thing, foo.attrib.value)), Fee.form[left:]
if all[1] == bord[0:]:
pass\n\n"""
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
class _NodeTest(unittest.TestCase):
"""test transformation of If Node"""
CODE = None
@property
def astroid(self):
try:
return self.__class__.__dict__['CODE_Astroid']
except KeyError:
module = builder.parse(self.CODE)
self.__class__.CODE_Astroid = module
return module
class IfNodeTest(_NodeTest):
"""test transformation of If Node"""
CODE = """
if 0:
print()
if True:
print()
else:
pass
if "":
print()
elif []:
raise
if 1:
print()
elif True:
print()
elif func():
pass
else:
raise
"""
def test_if_elif_else_node(self):
"""test transformation for If node"""
self.assertEqual(len(self.astroid.body), 4)
for stmt in self.astroid.body:
self.assertIsInstance(stmt, nodes.If)
self.assertFalse(self.astroid.body[0].orelse) # simple If
self.assertIsInstance(self.astroid.body[1].orelse[0], nodes.Pass) # If / else
self.assertIsInstance(self.astroid.body[2].orelse[0], nodes.If) # If / elif
self.assertIsInstance(self.astroid.body[3].orelse[0].orelse[0], nodes.If)
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.block_range(1), (0, 22))
self.assertEqual(self.astroid.block_range(10), (0, 22)) # XXX (10, 22) ?
self.assertEqual(self.astroid.body[1].block_range(5), (5, 6))
self.assertEqual(self.astroid.body[1].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(7), (7, 8))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(8), (8, 8))
class TryExceptNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
except IOError:
pass
except UnicodeError:
print()
else:
print()
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 8))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 8))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[0].block_range(7), (7, 7))
self.assertEqual(self.astroid.body[0].block_range(8), (8, 8))
class TryFinallyNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 4))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
class TryExceptFinallyNodeTest(_NodeTest):
CODE = """
try:
print('pouet')
except Exception:
print ('oops')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 6))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
@unittest.skipIf(six.PY3, "Python 2 specific test.")
class TryExcept2xNodeTest(_NodeTest):
CODE = """
try:
hello
except AttributeError, (retval, desc):
pass
"""
def test_tuple_attribute(self):
handler = self.astroid.body[0].handlers[0]
self.assertIsInstance(handler.name, nodes.Tuple)
class ImportNodeTest(resources.SysPathSetup, unittest.TestCase):
def setUp(self):
super(ImportNodeTest, self).setUp()
self.module = resources.build_file('data/module.py', 'data.module')
self.module2 = resources.build_file('data/module2.py', 'data.module2')
def test_import_self_resolve(self):
myos = next(self.module2.igetattr('myos'))
self.assertTrue(isinstance(myos, nodes.Module), myos)
self.assertEqual(myos.name, 'os')
self.assertEqual(myos.qname(), 'os')
self.assertEqual(myos.pytype(), '%s.module' % BUILTINS)
def test_from_self_resolve(self):
namenode = next(self.module.igetattr('NameNode'))
self.assertTrue(isinstance(namenode, nodes.ClassDef), namenode)
self.assertEqual(namenode.root().name, 'astroid.node_classes')
self.assertEqual(namenode.qname(), 'astroid.node_classes.Name')
self.assertEqual(namenode.pytype(), '%s.type' % BUILTINS)
abspath = next(self.module2.igetattr('abspath'))
self.assertTrue(isinstance(abspath, nodes.FunctionDef), abspath)
self.assertEqual(abspath.root().name, 'os.path')
self.assertEqual(abspath.qname(), 'os.path.abspath')
self.assertEqual(abspath.pytype(), '%s.function' % BUILTINS)
def test_real_name(self):
from_ = self.module['NameNode']
self.assertEqual(from_.real_name('NameNode'), 'Name')
imp_ = self.module['os']
self.assertEqual(imp_.real_name('os'), 'os')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'os.path')
imp_ = self.module['NameNode']
self.assertEqual(imp_.real_name('NameNode'), 'Name')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'Name')
imp_ = self.module2['YO']
self.assertEqual(imp_.real_name('YO'), 'YO')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'data')
def test_as_string(self):
ast = self.module['modutils']
self.assertEqual(ast.as_string(), "from astroid import modutils")
ast = self.module['NameNode']
self.assertEqual(ast.as_string(), "from astroid.node_classes import Name as NameNode")
ast = self.module['os']
self.assertEqual(ast.as_string(), "import os.path")
code = """from . import here
from .. import door
from .store import bread
from ..cave import wine\n\n"""
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
def test_bad_import_inference(self):
# Explication of bug
'''When we import PickleError from nonexistent, a call to the infer
method of this From node will be made by unpack_infer.
inference.infer_from will try to import this module, which will fail and
raise a InferenceException (by mixins.do_import_module). The infer_name
will catch this exception and yield and Uninferable instead.
'''
code = '''
try:
from pickle import PickleError
except ImportError:
from nonexistent import PickleError
try:
pass
except PickleError:
pass
'''
module = builder.parse(code)
handler_type = module.body[1].handlers[0].type
excs = list(node_classes.unpack_infer(handler_type))
# The number of returned object can differ on Python 2
# and Python 3. In one version, an additional item will
# be returned, from the _pickle module, which is not
# present in the other version.
self.assertIsInstance(excs[0], nodes.ClassDef)
self.assertEqual(excs[0].name, 'PickleError')
self.assertIs(excs[-1], util.Uninferable)
def test_absolute_import(self):
module = resources.build_file('data/absimport.py')
ctx = contextmod.InferenceContext()
# will fail if absolute import failed
ctx.lookupname = 'message'
next(module['message'].infer(ctx))
ctx.lookupname = 'email'
m = next(module['email'].infer(ctx))
self.assertFalse(m.file.startswith(os.path.join('data', 'email.py')))
def test_more_absolute_import(self):
module = resources.build_file('data/module1abs/__init__.py', 'data.module1abs')
self.assertIn('sys', module.locals)
class CmpNodeTest(unittest.TestCase):
def test_as_string(self):
ast = abuilder.string_build("a == 2").body[0]
self.assertEqual(ast.as_string(), "a == 2")
class ConstNodeTest(unittest.TestCase):
def _test(self, value):
# pylint: disable=no-member; union type in const_factory, this shouldn't happen
node = nodes.const_factory(value)
self.assertIsInstance(node._proxied, nodes.ClassDef)
self.assertEqual(node._proxied.name, value.__class__.__name__)
self.assertIs(node.value, value)
self.assertTrue(node._proxied.parent)
self.assertEqual(node._proxied.root().name, value.__class__.__module__)
def test_none(self):
self._test(None)
def test_bool(self):
self._test(True)
def test_int(self):
self._test(1)
def test_float(self):
self._test(1.0)
def test_complex(self):
self._test(1.0j)
def test_str(self):
self._test('a')
def test_unicode(self):
self._test(u'a')
class NameNodeTest(unittest.TestCase):
def test_assign_to_True(self):
"""test that True and False assignments don't crash"""
code = """
True = False
def hello(False):
pass
del True
"""
if sys.version_info >= (3, 0):
with self.assertRaises(exceptions.AstroidBuildingError):
builder.parse(code)
else:
ast = builder.parse(code)
assign_true = ast['True']
self.assertIsInstance(assign_true, nodes.AssignName)
self.assertEqual(assign_true.name, "True")
del_true = ast.body[2].targets[0]
self.assertIsInstance(del_true, nodes.DelName)
self.assertEqual(del_true.name, "True")
class AnnAssignNodeTest(unittest.TestCase):
@test_utils.require_version(minver='3.6')
def test_primitive(self):
code = textwrap.dedent("""
test: int = 5
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "int")
self.assertEqual(assign.value.value, 5)
self.assertEqual(assign.simple, 1)
@test_utils.require_version(minver='3.6')
def test_primitive_without_initial_value(self):
code = textwrap.dedent("""
test: str
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "str")
self.assertEqual(assign.value, None)
@test_utils.require_version(minver='3.6')
def test_complex(self):
code = textwrap.dedent("""
test: Dict[List[str]] = {}
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertIsInstance(assign.annotation, astroid.Subscript)
self.assertIsInstance(assign.value, astroid.Dict)
@test_utils.require_version(minver='3.6')
def test_as_string(self):
code = textwrap.dedent("""
print()
test: int = 5
test2: str
test3: List[Dict[(str, str)]] = []
""")
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string().strip(), code.strip())
class ArgumentsNodeTC(unittest.TestCase):
def test_linenumbering(self):
ast = builder.parse('''
def func(a,
b): pass
x = lambda x: None
''')
self.assertEqual(ast['func'].args.fromlineno, 2)
self.assertFalse(ast['func'].args.is_statement)
xlambda = next(ast['x'].infer())
self.assertEqual(xlambda.args.fromlineno, 4)
self.assertEqual(xlambda.args.tolineno, 4)
self.assertFalse(xlambda.args.is_statement)
if sys.version_info < (3, 0):
self.assertEqual(ast['func'].args.tolineno, 3)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
@test_utils.require_version(minver='3.0')
def test_kwoargs(self):
ast = builder.parse('''
def func(*, x):
pass
''')
args = ast['func'].args
self.assertTrue(args.is_argument('x'))
class UnboundMethodNodeTest(unittest.TestCase):
def test_no_super_getattr(self):
# This is a test for issue
# https://bitbucket.org/logilab/astroid/issue/91, which tests
# that UnboundMethod doesn't call super when doing .getattr.
ast = builder.parse('''
class A(object):
def test(self):
pass
meth = A.test
''')
node = next(ast['meth'].infer())
with self.assertRaises(exceptions.AttributeInferenceError):
node.getattr('__missssing__')
name = node.getattr('__name__')[0]
self.assertIsInstance(name, nodes.Const)
self.assertEqual(name.value, 'test')
class BoundMethodNodeTest(unittest.TestCase):
def test_is_property(self):
ast = builder.parse('''
import abc
def cached_property():
# Not a real decorator, but we don't care
pass
def reify():
# Same as cached_property
pass
def lazy_property():
pass
def lazyproperty():
pass
def lazy(): pass
class A(object):
@property
def builtin_property(self):
return 42
@abc.abstractproperty
def abc_property(self):
return 42
@cached_property
def cached_property(self): return 42
@reify
def reified(self): return 42
@lazy_property
def lazy_prop(self): return 42
@lazyproperty
def lazyprop(self): return 42
def not_prop(self): pass
@lazy
def decorated_with_lazy(self): return 42
cls = A()
builtin_property = cls.builtin_property
abc_property = cls.abc_property
cached_p = cls.cached_property
reified = cls.reified
not_prop = cls.not_prop
lazy_prop = cls.lazy_prop
lazyprop = cls.lazyprop
decorated_with_lazy = cls.decorated_with_lazy
''')
for prop in ('builtin_property', 'abc_property', 'cached_p', 'reified',
'lazy_prop', 'lazyprop', 'decorated_with_lazy'):
inferred = next(ast[prop].infer())
self.assertIsInstance(inferred, nodes.Const, prop)
self.assertEqual(inferred.value, 42, prop)
inferred = next(ast['not_prop'].infer())
self.assertIsInstance(inferred, bases.BoundMethod)
class AliasesTest(unittest.TestCase):
def setUp(self):
self.transformer = transforms.TransformVisitor()
def parse_transform(self, code):
module = parse(code, apply_transforms=False)
return self.transformer.visit(module)
def test_aliases(self):
def test_from(node):
node.names = node.names + [('absolute_import', None)]
return node
def test_class(node):
node.name = 'Bar'
return node
def test_function(node):
node.name = 'another_test'
return node
def test_callfunc(node):
if node.func.name == 'Foo':
node.func.name = 'Bar'
return node
return None
def test_assname(node):
if node.name == 'foo':
return nodes.AssignName('bar', node.lineno, node.col_offset,
node.parent)
return None
def test_assattr(node):
if node.attrname == 'a':
node.attrname = 'b'
return node
return None
def test_getattr(node):
if node.attrname == 'a':
node.attrname = 'b'
return node
return None
def test_genexpr(node):
if node.elt.value == 1:
node.elt = nodes.Const(2, node.lineno, node.col_offset,
node.parent)
return node
return None
self.transformer.register_transform(nodes.From, test_from)
self.transformer.register_transform(nodes.Class, test_class)
self.transformer.register_transform(nodes.Function, test_function)
self.transformer.register_transform(nodes.CallFunc, test_callfunc)
self.transformer.register_transform(nodes.AssName, test_assname)
self.transformer.register_transform(nodes.AssAttr, test_assattr)
self.transformer.register_transform(nodes.Getattr, test_getattr)
self.transformer.register_transform(nodes.GenExpr, test_genexpr)
string = '''
from __future__ import print_function
class Foo: pass
def test(a): return a
foo = Foo()
foo.a = test(42)
foo.a
(1 for _ in range(0, 42))
'''
module = self.parse_transform(string)
self.assertEqual(len(module.body[0].names), 2)
self.assertIsInstance(module.body[0], nodes.ImportFrom)
self.assertEqual(module.body[1].name, 'Bar')
self.assertIsInstance(module.body[1], nodes.ClassDef)
self.assertEqual(module.body[2].name, 'another_test')
self.assertIsInstance(module.body[2], nodes.FunctionDef)
self.assertEqual(module.body[3].targets[0].name, 'bar')
self.assertIsInstance(module.body[3].targets[0], nodes.AssignName)
self.assertEqual(module.body[3].value.func.name, 'Bar')
self.assertIsInstance(module.body[3].value, nodes.Call)
self.assertEqual(module.body[4].targets[0].attrname, 'b')
self.assertIsInstance(module.body[4].targets[0], nodes.AssignAttr)
self.assertIsInstance(module.body[5], nodes.Expr)
self.assertEqual(module.body[5].value.attrname, 'b')
self.assertIsInstance(module.body[5].value, nodes.Attribute)
self.assertEqual(module.body[6].value.elt.value, 2)
self.assertIsInstance(module.body[6].value, nodes.GeneratorExp)
@unittest.skipIf(six.PY3, "Python 3 doesn't have Repr nodes.")
def test_repr(self):
def test_backquote(node):
node.value.name = 'bar'
return node
self.transformer.register_transform(nodes.Backquote, test_backquote)
module = self.parse_transform('`foo`')
self.assertEqual(module.body[0].value.value.name, 'bar')
self.assertIsInstance(module.body[0].value, nodes.Repr)
class DeprecationWarningsTest(unittest.TestCase):
def test_asstype_warnings(self):
string = '''
class C: pass
c = C()
with warnings.catch_warnings(record=True) as w:
pass
'''
module = parse(string)
filter_stmts_mixin = module.body[0]
assign_type_mixin = module.body[1].targets[0]
parent_assign_type_mixin = module.body[2]
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
filter_stmts_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
assign_type_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
parent_assign_type_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
def test_isinstance_warnings(self):
msg_format = ("%r is deprecated and slated for removal in astroid "
"2.0, use %r instead")
for cls in (nodes.Discard, nodes.Backquote, nodes.AssName,
nodes.AssAttr, nodes.Getattr, nodes.CallFunc, nodes.From):
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
isinstance(42, cls)
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
actual_msg = msg_format % (cls.__class__.__name__, cls.__wrapped__.__name__)
self.assertEqual(str(w[0].message), actual_msg)
@test_utils.require_version('3.5')
class Python35AsyncTest(unittest.TestCase):
def test_async_await_keywords(self):
async_def, async_for, async_with, await_node = builder.extract_node('''
async def func(): #@
async for i in range(10): #@
f = __(await i)
async with test(): #@
pass
''')
self.assertIsInstance(async_def, nodes.AsyncFunctionDef)
self.assertIsInstance(async_for, nodes.AsyncFor)
self.assertIsInstance(async_with, nodes.AsyncWith)
self.assertIsInstance(await_node, nodes.Await)
self.assertIsInstance(await_node.value, nodes.Name)
def _test_await_async_as_string(self, code):
ast_node = parse(code)
self.assertEqual(ast_node.as_string().strip(), code.strip())
def test_await_as_string(self):
code = textwrap.dedent('''
async def function():
await 42
''')
self._test_await_async_as_string(code)
def test_asyncwith_as_string(self):
code = textwrap.dedent('''
async def function():
async with (42):
pass
''')
self._test_await_async_as_string(code)
def test_asyncfor_as_string(self):
code = textwrap.dedent('''
async def function():
async for i in range(10):
await 42
''')
self._test_await_async_as_string(code)
class ContextTest(unittest.TestCase):
def test_subscript_load(self):
node = builder.extract_node('f[1]')
self.assertIs(node.ctx, astroid.Load)
def test_subscript_del(self):
node = builder.extract_node('del f[1]')
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_subscript_store(self):
node = builder.extract_node('f[1] = 2')
subscript = node.targets[0]
self.assertIs(subscript.ctx, astroid.Store)
def test_list_load(self):
node = builder.extract_node('[]')
self.assertIs(node.ctx, astroid.Load)
def test_list_del(self):
node = builder.extract_node('del []')
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_list_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node('[0] = 2')
def test_tuple_load(self):
node = builder.extract_node('(1, )')
self.assertIs(node.ctx, astroid.Load)
def test_tuple_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node('(1, ) = 3')
@test_utils.require_version(minver='3.5')
def test_starred_load(self):
node = builder.extract_node('a = *b')
starred = node.value
self.assertIs(starred.ctx, astroid.Load)
@test_utils.require_version(minver='3.0')
def test_starred_store(self):
node = builder.extract_node('a, *b = 1, 2')
starred = node.targets[0].elts[1]
self.assertIs(starred.ctx, astroid.Store)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# http://gerrit-documentation.googlecode.com/svn/Documentation/2.2.2/cmd-
# query.html
import pkg_resources
import subprocess
from datetime import datetime
import simplejson as json
import time
import tempfile
import textwrap
import pydoc
import os
VALID_SCORES = ['-2', '-1', '-0', '0', '+0', '+1', '+2']
def normalize_score(score):
if score not in VALID_SCORES:
raise Exception('Invalid score %r' % score)
if score in ('-0', '+0'):
score = '0'
return score
def arg_encode(arg):
# We're going to end up with the original value enclosed by single quotes,
# excepting the single quotes in the original value; those will be encoded
# in double quotes. Yes, kinda awful to explain.
# test = 'test'
# "test" = '"test"'
# 'test' = "'"'test'"'"
# "it's" = '"it'"'"'s"'
arg = "'" + arg.replace("'", "'\"'\"'") + "'"
if arg.startswith("''"):
arg = arg[2:]
if arg.endswith("''"):
arg = arg[:-2]
return arg
def get_message(message):
if not message:
editor = os.environ.get(
'FGERRIT_EDITOR', os.environ.get('EDITOR', 'vi'))
with tempfile.NamedTemporaryFile() as fp:
p = subprocess.Popen('%s %s' % (editor, fp.name), shell=True)
retval = p.wait()
if retval != 0:
raise Exception('Error on editor exit code %d' % retval)
message = fp.read().strip()
if not message:
raise Exception('Abort, no message')
if message == '-':
message = ''
return message
class FGerrit(object):
def __init__(self, ssh_user, ssh_host, project, ssh_port=29418,
status="open"):
self.ssh_user = ssh_user
self.ssh_host = ssh_host
self.ssh_port = ssh_port
self.project = project
self.status = status
term_info = os.popen('stty size', 'r').read().split()
self.term_rows = int(term_info[0])
self.full_width = int(term_info[1])
def _cprint(self, output):
"""either print output or invoke pager"""
if self.term_rows < sum([len(i.split('\n')) for i in output]):
pydoc.pager('\n'.join(output).decode('utf8').encode('utf8'))
else:
print '\n'.join(output).decode('utf8').encode('utf8')
def _conv_ts(self, timestamp, terse=False):
if terse:
when = time.time() - timestamp
if when < 60:
return '%4.1fs' % when
elif when < 3600:
return '%4.1fm' % (when / 60)
elif when < 86400:
return '%4.1fh' % (when / 3600)
else:
return '%4.1fd' % (when / 86400)
else:
return datetime.fromtimestamp(int(timestamp))
def _run_query(self, qargs, plain=False):
if not plain:
sshcmd = 'ssh -p %d %s@%s "gerrit query --format=JSON %s"' % \
(self.ssh_port, self.ssh_user, self.ssh_host, qargs)
else:
sshcmd = 'ssh -p %d %s@%s "gerrit query --format=TEXT %s"' % \
(self.ssh_port, self.ssh_user, self.ssh_host, qargs)
tmp = tempfile.TemporaryFile()
p = subprocess.Popen(sshcmd, shell=True, stdout=tmp,
stderr=subprocess.STDOUT)
retval = p.wait()
tmp.seek(0)
if retval != 0:
raise Exception('Error on ssh to gerrit %s' % tmp.readlines())
if not plain:
result = []
for line in tmp.readlines():
result.append(json.loads(line))
retval = p.wait()
return [x for x in result if 'status' in x]
else:
return " ".join(tmp.readlines())
def _run_cmd(self, cargs):
sshcmd = "ssh -p %d %s@%s %s" % (
self.ssh_port, self.ssh_user, self.ssh_host,
arg_encode('gerrit ' + cargs))
tmp = tempfile.TemporaryFile()
p = subprocess.Popen(sshcmd, shell=True, stdout=tmp,
stderr=subprocess.STDOUT)
retval = p.wait()
tmp.seek(0)
if retval != 0:
raise Exception('Error on ssh to gerrit %s' % tmp.readlines())
return " ".join(tmp.readlines())
def list_reviews(self):
return self._run_query(
'status:%s project:%s --current-patch-set' % (self.status,
self.project))
def _parse_approvals(self, review):
retval = [' ', ' ', ' ']
for i in review.get('currentPatchSet', {}).get('approvals', []):
typ = i['type']
idx = {'VRIF': 0, 'CRVW': 1, 'APRV': 2}.get(typ)
if idx is not None:
val = int(i['value'])
if val < 0:
retval[idx] = '-'
elif typ == 'CRVW':
if val > 1 and retval[idx] == ' ':
retval[idx] = '+'
elif val > 0 and retval[idx] == ' ':
retval[idx] = '+'
return retval
def get_review(self, review_id, comments=False, text=False):
"""Either a short id (5264) or long hash"""
if comments:
return self._run_query('commit:%s --current-patch-set --comments '
'--commit-message' % review_id, plain=text)
else:
return self._run_query(review_id, plain=text)
def delete_change(self, patchset):
payload = "review %s --delete" % patchset
return self._run_cmd(payload)
def abandon_change(self, patchset):
payload = "review %s --abandon" % patchset
return self._run_cmd(payload)
def restore_change(self, patchset):
payload = "review %s --restore" % patchset
return self._run_cmd(payload)
def post_message(self, review_id, message):
payload = "review %s --message=%s" % (review_id, arg_encode(message))
return self._run_cmd(payload)
def code_review(self, review_id, score, message=None):
score = normalize_score(score)
payload = 'review %s --code-review %s' % (review_id, score)
if message:
payload += ' --message=%s' % arg_encode(message)
return self._run_cmd(payload)
def approve_review(self, review_id, score):
score = normalize_score(score)
if score not in ('0', '+1'):
raise Exception('Approval score should be 0 or +1.')
payload = 'approve %s --approved %s' % (review_id, score)
return self._run_cmd(payload)
def print_reviews_list(self, reviews, show_wip=False, branches=['master']):
try:
mark = os.path.getmtime('.fgerrit-mark')
except OSError:
mark = 0
title = "Open Reviews for %s" % self.project
if mark:
title += " since " + time.asctime(time.localtime(mark))
tlen = len(title)
sep = "=" * (self.full_width - 1)
output = []
output.append(sep)
output.append(title + " " * (self.full_width - tlen - 1))
output.append(sep)
header_printed = False
for r in reviews:
if r['lastUpdated'] < mark:
continue
if r['status'] == 'WORKINPROGRESS' and not show_wip:
continue
if r['branch'] not in branches:
continue
if not header_printed:
output.append('ID When VCA Submitter: Description')
sep = "-" * (self.full_width - 1)
output.append(sep)
header_printed = True
s = ''
if r['status'] == 'WORKINPROGRESS':
s += '[WIP] '
if r['branch'] != 'master':
s += '[%s] ' % r['branch']
v, c, a = self._parse_approvals(r)
output.append('%s %s %s%s%s %s' % (
r['currentPatchSet']['revision'][:6],
self._conv_ts(r['lastUpdated'], terse=True),
v, c, a,
self.rewrap('%s <%s>: %s' % (
s + r['owner']['name'],
r['owner']['username'],
r['subject']), 20)))
output.append(sep)
self._cprint(output)
def rewrap(self, text, indent):
text_width = self.full_width - indent - 1
indention = '\n' + ' ' * indent
return indention.join(
indention.join(textwrap.wrap(v, width=text_width))
for v in text.split('\n')
)
def print_review(self, review_id):
data = self.get_review(review_id, comments=True)[0]
output = []
out = [
('Owner',
'%s <%s>' % (data['owner']['name'], data['owner']['username']))]
if data['branch'] != 'master':
out.append(('TARGETED BRANCH', data['branch']))
out.extend([
('Patch Set Number', data['currentPatchSet']['number']),
('Patch Set Date', time.asctime(time.localtime(int(
data['currentPatchSet']['createdOn'])))),
('Patch Set Id', data['currentPatchSet']['revision']),
('Patch Ref', data['currentPatchSet']['ref'])])
approvals = []
for approval in data['currentPatchSet'].get('approvals', []):
approvals.append('%+d %s' % (int(approval['value']),
approval['by']['username']))
out.extend([
('Status', ', '.join(sorted(approvals))),
('Commit Message', data['commitMessage'].strip())])
for comment in data.get('comments', []):
out.extend([
('Reviewer',
'%s <%s>' % (comment['reviewer']['name'],
comment['reviewer'].get('username', 'unknown'))),
('Date',
time.asctime(time.localtime(int(comment['timestamp'])))),
('Comment', comment['message'].strip())])
tlen = max(len(t) for t, v in out)
sep = '-' * (self.full_width - 1)
output.append(sep)
for title, value in out:
if title == 'Reviewer':
output.append(sep)
output.append(('%%0%ds %%s' % tlen) %
(title, self.rewrap(value, tlen + 2).encode('utf8')))
output.append(sep)
self._cprint(output)
def show(self, change_id):
data = self.get_review(change_id, comments=True)[0]
cmd = ['git', 'fetch', 'gerrit', data['currentPatchSet']['ref']]
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
cmd = ['git', 'show', 'FETCH_HEAD']
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
def checkout(self, change_id, patchset_number=None):
data = self.get_review(change_id, comments=True)[0]
ref = data['currentPatchSet']['ref']
if patchset_number:
ref = ref.rsplit('/', 1)[0] + '/' + patchset_number
else:
patchset_number = ref.rsplit('/', 1)[1]
cmd = ['git', 'fetch', 'gerrit', ref]
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
cmd = ['git', 'checkout', '-b',
'review-' + data.get('topic', change_id) +
'-ps' + patchset_number,
'FETCH_HEAD']
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
def diffsince(self, change_id, patchset_number=None):
data = self.get_review(change_id, comments=True)[0]
ref = data['currentPatchSet']['ref']
if patchset_number:
ref = ref.rsplit('/', 1)[0] + '/' + patchset_number
else:
patchset_number = ref.rsplit('/', 1)[1]
cmd = ['git', 'fetch', 'gerrit', ref]
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
cmd = ['git', 'diff', 'FETCH_HEAD..HEAD']
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
def submit(self):
if 'git-review' not in pkg_resources.working_set.by_key:
raise Exception('git-review is not installed')
cmd = ['git', 'review']
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
def draft(self):
if 'git-review' not in pkg_resources.working_set.by_key:
raise Exception('git-review is not installed')
cmd = ['git', 'review', '--draft']
error_code = subprocess.Popen(cmd).wait()
if error_code != 0:
raise Exception('Error code %d from %s' % (error_code, cmd))
|
|
# Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for the image of the sdk API."""
import six
import json
import hashlib
import os
import uuid
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk import utils
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi import util
_FILEACTION = None
CONF = config.CONF
LOG = log.LOG
CHUNKSIZE = 4096
INVALID_CONTENT_TYPE = {
'overallRC': returncode.errors['RESTAPI'][0]['overallRC'],
'modID': returncode.errors['RESTAPI'][0]['modID'],
'rc': returncode.errors['RESTAPI'][0]['overallRC'],
'rs': 1,
'errmsg': '',
'output': ''}
FILE_OPERATION_ERROR = {
'overallRC': returncode.errors['file'][0]['overallRC'],
'modID': returncode.errors['file'][0]['modID'],
'rc': returncode.errors['file'][0]['overallRC'],
'rs': 1,
'errmsg': '',
'output': ''}
class FileAction(object):
def __init__(self):
self._pathutils = utils.PathUtils()
def file_import(self, fileobj):
try:
importDir = self._pathutils.create_file_repository(
const.FILE_TYPE['IMPORT'])
fname = str(uuid.uuid1())
target_fpath = '/'.join([importDir, fname])
# The following steps save the imported file into sdkserver
checksum = hashlib.md5()
bytes_written = 0
with open(target_fpath, 'wb') as f:
for buf in fileChunkReadable(fileobj, CHUNKSIZE):
bytes_written += len(buf)
checksum.update(buf)
f.write(buf)
checksum_hex = checksum.hexdigest()
LOG.debug("Wrote %(bytes_written)d bytes to %(target_image)s"
" with checksum %(checksum_hex)s" %
{'bytes_written': bytes_written,
'target_image': target_fpath,
'checksum_hex': checksum_hex})
return_data = {'filesize_in_bytes': bytes_written,
'dest_url': 'file://' + target_fpath,
'md5sum': checksum_hex}
results = {'overallRC': 0, 'modID': None,
'rc': 0, 'rs': 0,
'errmsg': '',
'output': return_data}
except OSError as err:
msg = ("File import error: %s, please check access right to "
"specified file or folder" % six.text_type(err))
LOG.error(msg)
results = FILE_OPERATION_ERROR
results.update({'rs': 1, 'errmsg': msg, 'output': ''})
except Exception as err:
# Cleanup the file from file repository
self._pathutils.clean_temp_folder(target_fpath)
msg = ("Exception happened during file import: %s" %
six.text_type(err))
LOG.error(msg)
results = FILE_OPERATION_ERROR
results.update({'rs': 1, 'errmsg': msg, 'output': ''})
return results
def file_export(self, fpath):
try:
if not os.path.exists(fpath):
msg = ("The specific file %s for export does not exist" %
fpath)
LOG.error(msg)
results = FILE_OPERATION_ERROR
results.update({'rs': 2,
'errmsg': msg, 'output': ''})
return results
offset = 0
file_size = os.path.getsize(fpath)
# image_size here is the image_size in bytes
file_iter = iter(get_data(fpath,
offset=offset,
file_size=file_size))
return file_iter
except exception as err:
msg = ("Exception happened during file export with error %s " %
six.text_type(err))
LOG.error(msg)
results = FILE_OPERATION_ERROR.update({'rs': 2, 'errmsg': msg,
'output': ''})
return results
def get_action():
global _FILEACTION
if _FILEACTION is None:
_FILEACTION = FileAction()
return _FILEACTION
@util.SdkWsgify
@tokens.validate
def file_import(request):
def _import(file_obj):
action = get_action()
return action.file_import(file_obj)
# Check if the request content type is valid
content_type = request.content_type
info = _content_type_validation(content_type)
if not info:
file_obj = request.body_file
info = _import(file_obj)
info_json = json.dumps(info)
request.response.body = utils.to_utf8(info_json)
request.response.status = util.get_http_code_from_sdk_return(info)
request.response.content_type = 'application/json'
return request.response
def _content_type_validation(content_type):
results = {}
if content_type not in ['application/octet-stream']:
msg = ('Invalid content type %s found for file import/export, the '
'supported content type is application/octet-stream' %
content_type)
LOG.error(msg)
results = INVALID_CONTENT_TYPE.update({'errmsg': msg})
return results
@util.SdkWsgify
@tokens.validate
def file_export(request):
def _export(fpath):
action = get_action()
return action.file_export(fpath)
body = util.extract_json(request.body)
fpath = body['source_file']
results = _export(fpath)
# if results is dict, means error happened.
if isinstance(results, dict):
info_json = json.dumps(results)
request.response.body = utils.to_utf8(info_json)
request.response.status = util.get_http_code_from_sdk_return(
results)
request.response.content_type = 'application/json'
return request.response
# Result contains (image_iter, md5sum, image_size)
else:
request.response.headers['Content-Type'] = 'application/octet-stream'
request.response.app_iter = results
request.response.status_int = 200
return request.response
def fileChunkReadable(file_obj, chunk_size=65536):
"""
Return a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave file object unchanged.
:param file_obj: an iter which may be readable
:param chunk_size: maximum size of chunk
"""
if hasattr(file_obj, 'read'):
return fileChunkIter(file_obj, chunk_size)
else:
return file_obj
def fileChunkIter(file_object, file_chunk_size=65536):
"""
Return an iterator to a file-like object that yields fixed size chunks
:param file_object: a file-like object
:param file_chunk_size: maximum size of chunk
"""
while True:
chunk = file_object.read(file_chunk_size)
if chunk:
yield chunk
else:
break
def get_data(file_path, offset=0, file_size=None):
data = chunkedFile(file_path,
file_offset=offset,
file_chunk_size=CHUNKSIZE,
file_partial_length=file_size)
return get_chunk_data_iterator(data)
def get_chunk_data_iterator(data):
for chunk in data:
yield chunk
class chunkedFile(object):
"""
Send iterator to wsgi server so that it can iterate over a large file
"""
def __init__(self, file_path, file_offset=0, file_chunk_size=4096,
file_partial_length=None):
self.file_path = file_path
self.file_chunk_size = file_chunk_size
self.file_partial_length = file_partial_length
self.file_partial = self.file_partial_length is not None
self.file_object = open(self.file_path, 'rb')
if file_offset:
self.file_pointer.seek(file_offset)
def __iter__(self):
"""Return an iterator over the large file."""
try:
if self.file_object:
while True:
if self.file_partial:
size = min(self.file_chunk_size,
self.file_partial_length)
else:
size = self.file_chunk_size
chunk = self.file_object.read(size)
if chunk:
yield chunk
if self.file_partial:
self.file_partial_length -= len(chunk)
if self.file_partial_length <= 0:
break
else:
break
finally:
self.close()
def close(self):
"""Close the internal file pointer"""
if self.file_object:
self.file_object.close()
self.file_object = None
|
|
# Standard library imports
import argparse
import importlib
import json
import os
import sys
import textwrap
import time
from shutil import rmtree
# TODO:
# catch and log exceptions in examples files that fail to open
DIRECTORIES = {
'plotting-file' : '../../examples/plotting/file',
'plotting-notebook': '../../examples/plotting/notebook',
'server' : '../../examples/plotting/server',
'webgl' : '../../examples/webgl',
'models-file' : '../../examples/models/file',
'models-server' : '../../examples/models/server',
}
DEFAULT_TEST_FILES = [
'../../examples/plotting/file/stocks.py',
'../../examples/plotting/file/glucose.py',
'../../examples/plotting/server/hover.py',
]
SESSION_FILE = os.path.abspath("INTERACTIVE_TESTER_SESSION.json")
def get_parser():
"""Create the parser that will be used to add arguments to the script.
"""
parser = argparse.ArgumentParser(description=textwrap.dedent("""
Tests a selection of .py or .ipynb bokeh example files.
The --location option allows you to select a specific examples subdirectory to test all files in,
ignoring __init__.py
Location arguments can be any valid path to a folder with the examples, like:
-l /path/to/my/examplesyou can choose:
or any of the pre-built keywords that point to the related examples:
- plotting-file
- plotting-notebook
- server
- models-file
- models-server
"""), formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--no-log', action='store_true', dest='nolog', default=False,
help="don't save a log of any errors discovered")
parser.add_argument('-l', '--location', action='store', default=False,
help="example directory in which you wish to test")
parser.add_argument('--reuse-session', action='store_true', dest='reuseSession', default=False,
help="do not clean last session log and start from where you left")
parser.add_argument('--notebook-options', action='store', dest='notebookOptions', default="",
help="options to be forwarded to ipython notebook to customize it's behaviour")
return parser
def depend_check(dependency):
"""
Make sure a given dependency is installed
"""
try:
importlib.import_module(dependency)
found = True
except ImportError as e:
print("%s\nPlease use conda or pip to install the necessary dependency." % (e))
found = False
return found
def save_session(session):
"""
Save the session object to the SESSION_FILE
Args:
session(dict): dict with all the example files and results of each run
"""
with open(SESSION_FILE, 'w') as res_file:
json.dump(session, res_file)
def get_session():
"""
Return last stored session
"""
try:
with open(SESSION_FILE, 'r') as res_file:
return json.load(res_file)
except IOError:
return {}
def clean_session():
"""
Removes previous session file
"""
if os.path.exists(SESSION_FILE):
os.remove(SESSION_FILE)
def main(testing_ground=None, notebook_options=""):
"""
Collect and run .py or .ipynb examples from a set list or given examples directory, ignoring __init__.py
User input is collected to determine a properly or improperly displayed page
"""
# Create a testing directory if one does not exist, then cd into it
testing_directory = 'tmp_test'
if not os.path.exists(testing_directory):
os.mkdir(testing_directory)
os.chdir(testing_directory)
if testing_ground:
log_name = results.location
TestFiles = [
fileName for fileName in os.listdir('%s/.' % testing_ground)
if fileName.endswith(('.py', '.ipynb')) and fileName != '__init__.py'
]
else:
log_name = "fast"
TestFiles = DEFAULT_TEST_FILES
Log = []
lastSession = get_session()
for index, fileName in enumerate(TestFiles):
if testing_ground:
fileName = "%s/%s" % (testing_ground, fileName)
try:
if not fileName in lastSession:
lastSession[fileName] = "TESTING..."
save_session(lastSession)
command = get_cmd(fileName, notebook_options)
opener(fileName, command)
if results.nolog:
# Don't display 'next file' message after opening final file in a dir
if index != len(TestFiles)-1:
input("\nPress enter to open next file ") # lgtm [py/use-of-input]
else:
ErrorReport = test_status()
if ErrorReport:
Log.append("\n\n%s: \n %s" % (fileName, ErrorReport))
lastSession[fileName] = ErrorReport
save_session(lastSession)
else:
prevRes = lastSession[fileName]
if prevRes == "TESTING...":
print("RESULT OF %s LAST RUN NOT REGISTERED!!" % fileName)
ErrorReport = test_status()
lastSession[fileName] = ErrorReport
save_session(lastSession)
else:
print("%s detected in last session: SKIPPING" % fileName)
except (KeyboardInterrupt, EOFError):
break
# exit the testing directory and delete it
os.chdir('../')
rmtree(testing_directory)
if Log:
logger(Log, log_name)
def get_cmd(some_file, notebook_options=""):
"""Determines how to open a file depending
on whether it is a .py or a .ipynb file
"""
if some_file.endswith('.py'):
command = "python"
elif some_file.endswith('.ipynb'):
command = "ipython notebook %s" % notebook_options
return command
def opener(some_file, command):
"""Print to screen what file is being opened and then open the file using
the command method provided.
"""
print("\nOpening %s\n" % some_file.strip('../'))
os.system("%s %s" % (command, some_file))
def test_status():
"""Collect user input to determine if a file displayed correctly or incorrectly.
In the case of incorrectly displayed plots, an 'ErrorReport' string is returned.
"""
status = input("Did the plot(s) display correctly? (y/n) ")
while not status.startswith(('y', 'n')):
print("")
status = input("Unexpected answer. Please type y or n. ") # lgtm [py/use-of-input]
if status.startswith('n'):
ErrorReport = input("Please describe the problem: ") # lgtm [py/use-of-input]
return ErrorReport
def logger(error_array, name):
"""
Log errors by appending to a .txt file. The name and directory the file is saved into
is provided by the name and log_dir args.
"""
logfile = "%s_examples_testlog.txt" % name
if os.path.exists(logfile):
os.remove(logfile)
with open(logfile, 'a') as f:
print("")
print("\nWriting error log to %s" % logfile)
for error in error_array:
f.write("%s\n" % error)
if __name__ == '__main__':
if not depend_check('bokeh'):
sys.exit(1)
parser = get_parser()
results = parser.parse_args()
if results.location:
if results.location and results.location in DIRECTORIES:
target = results.location
test_dir = DIRECTORIES[target]
elif os.path.exists(results.location):
# in case target is not one of the recognized keys and is a
# valid path we can run the examples in that folder
test_dir = results.location
print("Running examples in custom location:", test_dir)
else:
print("Test location '%s' not recognized.\nPlease type 'python interactive_tester.py -h' for a list of valid test directories."
% results.location)
sys.exit(1)
else:
test_dir = None
if results.location == 'server' or test_dir is None:
print("Server examples require the bokeh server. Make sure you've typed 'bokeh serve' in another terminal tab.")
time.sleep(4)
if test_dir is None or 'notebook' in results.location:
print("Notebook examples require ipython-notebook. Make sure you have conda installed ipython-notebook")
time.sleep(4)
if not results.reuseSession:
print("cleaning previous session file...",)
clean_session()
print("OK")
main(test_dir, notebook_options=results.notebookOptions)
|
|
"""
The feed is an assembly of items of different content types.
For ease of querying, each different content type is housed in the FeedItem
model, which also houses metadata indicating the conditions under which it
should be included. So a feed is actually just a listing of FeedItem instances
that match the user's region and carrier.
Current content types able to be attached to FeedItem:
- `FeedApp` (via the `app` field)
- `FeedBrand` (via the `brand` field)
- `FeedCollection` (via the `collection` field)
"""
import os
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
import mkt
import mkt.carriers
import mkt.regions
from mkt.constants.categories import CATEGORY_CHOICES
from mkt.feed import indexers
from mkt.ratings.validators import validate_rating
from mkt.site.decorators import use_master
from mkt.site.fields import ColorField
from mkt.site.models import ManagerBase, ModelBase
from mkt.translations.fields import PurifiedField, TranslatedField, save_signal
from mkt.webapps.models import clean_slug, Preview, Webapp
from mkt.webapps.tasks import index_webapps
from .constants import (BRAND_LAYOUT_CHOICES, BRAND_TYPE_CHOICES,
COLLECTION_TYPE_CHOICES,
FEEDAPP_TYPE_CHOICES)
class BaseFeedCollection(ModelBase):
"""
On the feed, there are a number of types of feed items that share a similar
structure: a slug, one or more member apps with a maintained sort order,
and a number of methods and common views for operating on those apps. This
is a base class for those feed items, including:
- Editorial Brands: `FeedBrand`
- Collections: `FeedCollection`
- Operator Shelves: `FeedShelf`
A series of base classes wraps the common code for these:
- BaseFeedCollection
- BaseFeedCollectionMembership
- BaseFeedCollectionSerializer
- BaseFeedCollectionViewSet
Subclasses of BaseFeedCollection must do a few things:
- Define an M2M field named `_apps` with a custom through model that
inherits from `BaseFeedCollectionMembership`.
- Set the `membership_class` class property to the custom through model
used by `_apps`.
- Set the `membership_relation` class property to the name of the relation
on the model.
"""
_apps = None
slug = models.CharField(blank=True, max_length=30, unique=True,
help_text='Used in collection URLs.')
membership_class = None
membership_relation = None
objects = ManagerBase()
class Meta:
abstract = True
ordering = ('-id',)
def save(self, **kw):
self.clean_slug()
return super(BaseFeedCollection, self).save(**kw)
@use_master
def clean_slug(self):
clean_slug(self, 'slug')
def apps(self):
"""
Public apps on the collection, ordered by their position in the
CollectionMembership model.
Use this method everytime you want to display apps for a collection to
an user.
"""
filters = {
'disabled_by_user': False,
'status': mkt.STATUS_PUBLIC
}
return self._apps.order_by(self.membership_relation).filter(**filters)
def add_app(self, app, order=None):
"""
Add an app to this collection. If specified, the app will be created
with the specified `order`. If not, it will be added to the end of the
collection.
"""
qs = self.membership_class.objects.filter(obj=self)
if order is None:
aggregate = qs.aggregate(models.Max('order'))['order__max']
order = aggregate + 1 if aggregate is not None else 0
rval = self.membership_class.objects.create(obj=self, app=app,
order=order)
# Help django-cache-machine: it doesn't like many 2 many relations,
# the cache is never invalidated properly when adding a new object.
self.membership_class.objects.invalidate(*qs)
index_webapps.delay([app.pk])
return rval
def remove_app(self, app):
"""
Remove the passed app from this collection, returning a boolean
indicating whether a successful deletion took place.
"""
try:
membership = self.membership_class.objects.get(obj=self, app=app)
except self.membership_class.DoesNotExist:
return False
else:
membership.delete()
index_webapps.delay([app.pk])
return True
def remove_apps(self):
"""Remove all apps from collection."""
self.membership_class.objects.filter(obj=self).delete()
def set_apps(self, new_apps):
"""
Passed a list of app IDs, will remove all existing members on the
collection and create new ones for each of the passed apps, in order.
"""
self.remove_apps()
for app_id in new_apps:
self.add_app(Webapp.objects.get(pk=app_id))
index_webapps.delay(new_apps)
class BaseFeedImage(models.Model):
image_hash = models.CharField(default=None, max_length=8, null=True,
blank=True)
class Meta:
abstract = True
class GroupedAppsMixin(object):
"""
An app's membership to a `FeedShelf` class, used as the through model for
`FeedShelf._apps`.
"""
def add_app_grouped(self, app, group, order=None):
"""
Add an app to this collection, as a member of the passed `group`.
If specified, the app will be created with the specified `order`. If
not, it will be added to the end of the collection.
"""
qs = self.membership_class.objects.filter(obj=self)
if order is None:
aggregate = qs.aggregate(models.Max('order'))['order__max']
order = aggregate + 1 if aggregate is not None else 0
rval = self.membership_class.objects.create(obj_id=self.id, app_id=app,
group=group, order=order)
# Help django-cache-machine: it doesn't like many 2 many relations,
# the cache is never invalidated properly when adding a new object.
self.membership_class.objects.invalidate(*qs)
index_webapps.delay([app])
return rval
def set_apps_grouped(self, new_apps):
self.remove_apps()
for group in new_apps:
for app in group['apps']:
self.add_app_grouped(app, group['name'])
class BaseFeedCollectionMembership(ModelBase):
"""
A custom `through` model is required for the M2M field `_apps` on
subclasses of `BaseFeedCollection`. This model houses an `order` field that
maintains the order of apps in the collection. This model serves as an
abstract base class for the custom `through` models.
Subclasses must:
- Define a `ForeignKey` named `obj` that relates the app to the instance
being put on the feed.
"""
app = models.ForeignKey(Webapp)
order = models.SmallIntegerField(null=True)
obj = None
class Meta:
abstract = True
ordering = ('order',)
unique_together = ('obj', 'app',)
class FeedBrandMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedBrand` class, used as the through model for
`FeedBrand._apps`.
"""
obj = models.ForeignKey('FeedBrand')
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_brand_membership'
class FeedBrand(BaseFeedCollection):
"""
Model for "Editorial Brands", a special type of collection that allows
editors to quickly create content without involving localizers by choosing
from one of a number of predefined, prelocalized titles.
"""
_apps = models.ManyToManyField(Webapp, through=FeedBrandMembership,
related_name='app_feed_brands')
layout = models.CharField(choices=BRAND_LAYOUT_CHOICES, max_length=30)
type = models.CharField(choices=BRAND_TYPE_CHOICES, max_length=30)
membership_class = FeedBrandMembership
membership_relation = 'feedbrandmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_brand'
@classmethod
def get_indexer(self):
return indexers.FeedBrandIndexer
class FeedCollectionMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedCollection` class, used as the through model
for `FeedBrand._apps`.
"""
obj = models.ForeignKey('FeedCollection')
group = PurifiedField(blank=True, null=True)
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_collection_membership'
class FeedCollection(GroupedAppsMixin, BaseFeedCollection,
BaseFeedImage):
"""
Model for "Collections", a type of curated collection that allows more
complex grouping of apps than an Editorial Brand.
"""
_apps = models.ManyToManyField(Webapp, through=FeedCollectionMembership,
related_name='app_feed_collections')
color = models.CharField(max_length=20, null=True, blank=True)
name = TranslatedField()
description = PurifiedField(blank=True, null=True)
type = models.CharField(choices=COLLECTION_TYPE_CHOICES, max_length=30,
null=True)
# Deprecated.
background_color = models.CharField(max_length=7, null=True, blank=True)
membership_class = FeedCollectionMembership
membership_relation = 'feedcollectionmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_collection'
@classmethod
def get_indexer(self):
return indexers.FeedCollectionIndexer
def image_path(self, suffix=''):
return os.path.join(settings.FEED_COLLECTION_BG_PATH,
str(self.pk / 1000),
'feed_collection{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
class FeedShelfMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedShelf` class, used as the through model for
`FeedShelf._apps`.
"""
group = PurifiedField(blank=True, null=True)
obj = models.ForeignKey('FeedShelf')
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_shelf_membership'
class FeedShelf(GroupedAppsMixin, BaseFeedCollection, BaseFeedImage):
"""
Model for "Operator Shelves", a special type of collection that gives
operators a place to centralize content they wish to feature.
"""
_apps = models.ManyToManyField(Webapp, through=FeedShelfMembership,
related_name='app_shelves')
carrier = models.IntegerField(choices=mkt.carriers.CARRIER_CHOICES)
description = PurifiedField(null=True)
name = TranslatedField()
region = models.PositiveIntegerField(
choices=mkt.regions.REGIONS_CHOICES_ID)
# Shelf landing image.
image_landing_hash = models.CharField(default=None, max_length=8,
null=True, blank=True)
membership_class = FeedShelfMembership
membership_relation = 'feedshelfmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_shelf'
@classmethod
def get_indexer(self):
return indexers.FeedShelfIndexer
def image_path(self, suffix=''):
return os.path.join(settings.FEED_SHELF_BG_PATH,
str(self.pk / 1000),
'feed_shelf{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
@property
def is_published(self):
return self.feeditem_set.exists()
class FeedApp(BaseFeedImage, ModelBase):
"""
Model for "Custom Featured Apps", a feed item highlighting a single app
and some additional metadata (e.g. a review or a screenshot).
"""
app = models.ForeignKey(Webapp)
description = PurifiedField()
slug = models.CharField(max_length=30, unique=True)
color = models.CharField(max_length=20, null=True, blank=True)
type = models.CharField(choices=FEEDAPP_TYPE_CHOICES, max_length=30)
# Optionally linked to a Preview (screenshot or video).
preview = models.ForeignKey(Preview, null=True, blank=True)
# Optionally linked to a pull quote.
pullquote_attribution = models.CharField(max_length=50, null=True,
blank=True)
pullquote_rating = models.PositiveSmallIntegerField(
null=True, blank=True, validators=[validate_rating])
pullquote_text = PurifiedField(null=True)
# Deprecated.
background_color = ColorField(null=True)
class Meta:
db_table = 'mkt_feed_app'
@classmethod
def get_indexer(self):
return indexers.FeedAppIndexer
def clean(self):
"""
Require `pullquote_text` if `pullquote_rating` or
`pullquote_attribution` are set.
"""
if not self.pullquote_text and (self.pullquote_rating or
self.pullquote_attribution):
raise ValidationError('Pullquote text required if rating or '
'attribution is defined.')
super(FeedApp, self).clean()
def image_path(self, suffix=''):
return os.path.join(settings.FEATURED_APP_BG_PATH,
str(self.pk / 1000),
'featured_app{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
class FeedItem(ModelBase):
"""
A thin wrapper for all items that live on the feed, including metadata
describing the conditions that the feed item should be included in a user's
feed.
"""
category = models.CharField(null=True, blank=True, max_length=30,
choices=CATEGORY_CHOICES)
region = models.PositiveIntegerField(
default=None, null=True, blank=True, db_index=True,
choices=mkt.regions.REGIONS_CHOICES_ID)
carrier = models.IntegerField(default=None, null=True, blank=True,
choices=mkt.carriers.CARRIER_CHOICES,
db_index=True)
order = models.SmallIntegerField(null=True)
item_type = models.CharField(max_length=30)
# Types of objects that may be contained by a feed item.
app = models.ForeignKey(FeedApp, blank=True, null=True)
brand = models.ForeignKey(FeedBrand, blank=True, null=True)
collection = models.ForeignKey(FeedCollection, blank=True, null=True)
shelf = models.ForeignKey(FeedShelf, blank=True, null=True)
class Meta:
db_table = 'mkt_feed_item'
ordering = ('order',)
index_together = (('region', 'carrier'),
('category', 'region', 'carrier'))
@classmethod
def get_indexer(cls):
return indexers.FeedItemIndexer
# Maintain ElasticSearch index.
@receiver(models.signals.post_save, sender=FeedApp,
dispatch_uid='feedapp.search.index')
@receiver(models.signals.post_save, sender=FeedBrand,
dispatch_uid='feedbrand.search.index')
@receiver(models.signals.post_save, sender=FeedCollection,
dispatch_uid='feedcollection.search.index')
@receiver(models.signals.post_save, sender=FeedShelf,
dispatch_uid='feedshelf.search.index')
@receiver(models.signals.post_save, sender=FeedItem,
dispatch_uid='feeditem.search.index')
def update_search_index(sender, instance, **kw):
instance.get_indexer().index_ids([instance.id])
# Delete ElasticSearch index on delete.
@receiver(models.signals.post_delete, sender=FeedApp,
dispatch_uid='feedapp.search.unindex')
@receiver(models.signals.post_delete, sender=FeedBrand,
dispatch_uid='feedbrand.search.unindex')
@receiver(models.signals.post_delete, sender=FeedCollection,
dispatch_uid='feedcollection.search.unindex')
@receiver(models.signals.post_delete, sender=FeedShelf,
dispatch_uid='feedshelf.search.unindex')
@receiver(models.signals.post_delete, sender=FeedItem,
dispatch_uid='feeditem.search.unindex')
def delete_search_index(sender, instance, **kw):
instance.get_indexer().unindex(instance.id)
# Save translations when saving instance with translated fields.
models.signals.pre_save.connect(
save_signal, sender=FeedApp,
dispatch_uid='feedapp_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedCollection,
dispatch_uid='feedcollection_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedCollectionMembership,
dispatch_uid='feedcollectionmembership_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedShelf,
dispatch_uid='feedshelf_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedShelfMembership,
dispatch_uid='feedshelfmembership_translations')
# Delete membership instances when their apps are deleted.
def remove_memberships(*args, **kwargs):
instance = kwargs.get('instance')
for cls in [FeedBrandMembership, FeedCollectionMembership,
FeedShelfMembership]:
cls.objects.filter(app_id=instance.pk).delete()
post_delete.connect(remove_memberships, sender=Webapp, weak=False,
dispatch_uid='cleanup_feed_membership')
|
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from murano.dsl import dsl
from murano.dsl import exceptions
from murano.tests.unit.dsl.foundation import object_model as om
from murano.tests.unit.dsl.foundation import test_case
class TestContracts(test_case.DslTestCase):
def setUp(self):
super(TestContracts, self).setUp()
self._runner = self.new_runner(
om.Object(
'ContractExamples',
ordinaryProperty='PROPERTY',
sampleClass=om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=om.Object(
'SampleClass2',
class2Property='string2'))))
def test_string_contract(self):
result = self._runner.testStringContract('qwerty')
self.assertIsInstance(result, six.string_types)
self.assertEqual('qwerty', result)
def test_string_from_number_contract(self):
result = self._runner.testStringContract(123)
self.assertIsInstance(result, six.string_types)
self.assertEqual('123', result)
def test_string_null_contract(self):
self.assertIsNone(self._runner.testStringContract(None))
def test_int_contract(self):
result = self._runner.testIntContract(123)
self.assertIsInstance(result, int)
self.assertEqual(123, result)
def test_int_from_string_contract(self):
result = self._runner.testIntContract('456')
self.assertIsInstance(result, int)
self.assertEqual(456, result)
def test_int_from_string_contract_failure(self):
self.assertRaises(exceptions.ContractViolationException,
self._runner.testIntContract, 'nan')
def test_int_null_contract(self):
self.assertIsNone(self._runner.testIntContract(None))
def test_bool_contract(self):
result = self._runner.testBoolContract(True)
self.assertIsInstance(result, bool)
self.assertIs(result, True)
result = self._runner.testBoolContract(False)
self.assertIsInstance(result, bool)
self.assertIs(result, False)
def test_bool_from_int_contract(self):
result = self._runner.testBoolContract(10)
self.assertIsInstance(result, bool)
self.assertIs(result, True)
result = self._runner.testBoolContract(0)
self.assertIsInstance(result, bool)
self.assertIs(result, False)
def test_bool_from_string_contract(self):
result = self._runner.testBoolContract('something')
self.assertIsInstance(result, bool)
self.assertIs(result, True)
result = self._runner.testBoolContract('')
self.assertIsInstance(result, bool)
self.assertIs(result, False)
def test_bool_null_contract(self):
self.assertIsNone(self._runner.testIntContract(None))
def test_class_contract(self):
arg = om.Object('SampleClass2', class2Property='qwerty')
result = self._runner.testClassContract(arg)
self.assertIsInstance(result, dsl.MuranoObjectInterface)
def test_class_contract_by_ref(self):
arg = om.Object('SampleClass2', class2Property='qwerty')
result = self._runner.testClassContract(arg)
self.assertEqual(arg.id, result.id)
def test_class_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testClassContract, ['invalid type'])
def test_class_contract_by_ref_failure(self):
self.assertRaises(
exceptions.NoObjectFoundError,
self._runner.testClassContract, 'NoSuchIdExists')
def test_class_contract_from_dict(self):
self.assertEqual(
'SampleClass2',
self._runner.testClassContract({
'class2Property': 'str'}).type.name)
def test_class_from_id_contract(self):
object_id = self._runner.root.get_property('sampleClass').object_id
result = self._runner.testClassFromIdContract(object_id)
self.assertIsInstance(result, dsl.MuranoObjectInterface)
self.assertEqual(object_id, result.id)
def test_check_contract(self):
arg = om.Object('SampleClass2', class2Property='qwerty')
self.assertIsNone(self._runner.testCheckContract(arg, 100))
def test_check_contract_failure(self):
invalid_arg = om.Object('SampleClass2', class2Property='not qwerty')
self.assertRaises(exceptions.ContractViolationException,
self._runner.testCheckContract, invalid_arg, 100)
def test_owned_contract(self):
arg1 = self._runner.root.get_property('sampleClass')
arg2 = arg1.get_property('classProperty')
self.assertIsNone(self._runner.testOwnedContract(arg1, arg2))
def test_owned_contract_on_null(self):
self.assertIsNone(self._runner.testOwnedContract(None, None))
def test_owned_contract_failure(self):
arg1 = self._runner.root.get_property('sampleClass')
arg2 = arg1.get_property('classProperty')
invalid_arg2 = om.Object('SampleClass2', class2Property='string2')
invalid_arg1 = om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=invalid_arg2)
self.assertRaises(exceptions.ContractViolationException,
self._runner.testOwnedContract, invalid_arg1, arg2)
self.assertRaises(exceptions.ContractViolationException,
self._runner.testOwnedContract, invalid_arg2, arg1)
def test_not_owned_contract(self):
arg2 = om.Object('SampleClass2', class2Property='string2')
arg1 = om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=arg2)
self.assertIsNone(self._runner.testNotOwnedContract(arg1, arg2))
def test_not_owned_contract_on_null(self):
self.assertIsNone(self._runner.testNotOwnedContract(None, None))
def test_not_owned_contract_failure(self):
invalid_arg1 = self._runner.root.get_property('sampleClass')
invalid_arg2 = invalid_arg1.get_property('classProperty')
arg2 = om.Object('SampleClass2', class2Property='string2')
arg1 = om.Object(
'SampleClass1',
stringProperty='string1',
classProperty=arg2)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testNotOwnedContract, invalid_arg1, arg2)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testNotOwnedContract, invalid_arg2, arg1)
def test_scalar_contract(self):
self.assertEqual('fixed', self._runner.testScalarContract(
'fixed', 456, True))
def test_scalar_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testScalarContract,
'wrong', 456, True)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testScalarContract,
'fixed', 123, True)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testScalarContract,
'fixed', 456, False)
def test_list_contract(self):
self.assertEqual([3, 2, 1], self._runner.testListContract(
['3', 2, '1']))
def test_list_contract_from_scalar(self):
self.assertEqual([99], self._runner.testListContract('99'))
def test_list_contract_from_null(self):
self.assertEqual([], self._runner.testListContract(None))
def test_list_with_min_length_contract(self):
self.assertEqual(
[1, 2, 3],
self._runner.testListWithMinLengthContract([1, 2, 3]))
self.assertEqual(
[1, 2, 3, 4],
self._runner.testListWithMinLengthContract([1, 2, 3, 4]))
def test_list_with_min_length_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinLengthContract, None)
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinLengthContract, [1, 2])
def test_list_with_min_max_length_contract(self):
self.assertEqual(
[1, 2],
self._runner.testListWithMinMaxLengthContract([1, 2]))
self.assertEqual(
[1, 2, 3, 4],
self._runner.testListWithMinMaxLengthContract([1, 2, 3, 4]))
def test_list_with_min_max_length_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinMaxLengthContract, [1])
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testListWithMinMaxLengthContract, [1, 2, 3, 4, 5])
def test_dict_contract(self):
self.assertEqual(
{'A': '123', 'B': 456},
self._runner.testDictContract({'A': '123', 'B': '456'}))
self.assertEqual(
{'A': '123', 'B': 456},
self._runner.testDictContract({'A': '123', 'B': '456', 'C': 'qq'}))
self.assertEqual(
{'A': '123', 'B': None},
self._runner.testDictContract({'A': '123'}))
def test_dict_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testDictContract, 'str')
def test_dict_expressions_contract(self):
self.assertEqual(
{321: 'qwerty', 99: 'val', 'B': 456},
self._runner.testDictExprContract({
'321': 'qwerty', '99': 'val', 'B': 456}))
def test_dict_expressions_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testDictExprContract,
{'321': 'qwerty', 'str': 'val', 'B': 456})
def test_invalid_dict_expr_contract(self):
self.assertRaises(
exceptions.DslContractSyntaxError,
self._runner.testDictMultiExprContract,
{'321': 'qwerty', 'str': 'val', 'B': 456})
def test_not_null_contract(self):
self.assertEqual('value', self._runner.testNotNullContract('value'))
def test_not_null_contract_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self._runner.testNotNullContract, None)
def test_default(self):
self.assertEqual('value', self._runner.testDefault('value'))
self.assertEqual('DEFAULT', self._runner.testDefault())
def test_default_expression(self):
self.assertEqual('PROPERTY', self._runner.testDefaultExpression())
self.assertEqual('value', self._runner.testDefaultExpression('value'))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dateutil.parser
import posixpath
from tornado.web import HTTPError
from IPython.html.services.notebooks.nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Dict, TraitError, Unicode
from IPython.utils.tz import utcnow
import uuid
from swiftclient import Connection, ClientException
class SwiftNotebookManager(NotebookManager):
"""A notebook manager that uses OpenStack Swift object storage"""
# IPython assumes this variable exists FIXME
notebook_dir = ''
connection_args = Dict(
config=True,
help='OpenStack swift Connection parameters'
)
container = Unicode(
'notebooks', config=True,
help='Container name for notebooks.'
)
# connection is implemented as a lazy property because the
# connection needs to be established after __init__ as finished,
# so we can finish the configuration in a subclass.
# also startup is a great deal faster.
_connection = None
@property
def connection(self):
if self._connection:
return self._connection
try:
redacted_args = self.connection_args.copy()
redacted_args['key'] = 'XXX'
self.log.debug(redacted_args)
self._connection = Connection(**self.connection_args)
self._connection.put_container(self.container)
except ClientException as e:
if e.http_status == 404:
raise TraitError(
"Couldn't authenticate against the object store service: "
+ str(e))
else:
raise TraitError(
"Couldn't connect to notebook storage: " + str(e))
return self._connection
def path_exists(self, path):
self.log.debug(u"path_exists('{}')".format(path))
return True
def is_hidden(self, path):
self.log.debug(u"is_hidden('{}')".format(path))
return False
def _copy_object(self, old_path, new_path):
old_path = old_path.strip('/')
new_path = new_path.strip('/')
headers = {
u'X-Copy-From': posixpath.join(self.container, old_path)
}
self.connection.put_object(self.container, new_path,
contents=None, headers=headers)
def _move_object(self, old_path, new_path):
old_path = old_path.strip('/')
new_path = new_path.strip('/')
self._copy_object(old_path, new_path)
hdrs, conts = self.connection.get_object(self.container, old_path)
self.log.debug("before delete_object: {}".format(old_path))
self.log.debug(
"before delete_object:\nhdrs = {}\nconts = {}".format(hdrs, conts))
self.connection.delete_object(self.container, old_path)
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False."""
path = path.strip('/')
self.log.debug(u"notebook_exists('{}','{}')".format(name, path))
full_path = posixpath.join(path, name)
try:
self.connection.head_object(self.container, full_path)
return True
except ClientException as e:
if e.http_status == 404:
return False
else:
raise
# The method list_dirs is called by the server to identify
# the subdirectories in a given path.
def list_dirs(self, path):
"""List the directory models for a given API style path."""
self.log.debug(u"list_dirs('{}')".format(path))
return []
def list_notebooks(self, path=''):
"""Return a list of notebook dicts without content."""
path = path.strip('/')
self.log.debug(u"list_notebooks('{}')".format(path))
_, conts = self.connection.get_container(
self.container, prefix=path, delimiter='/')
notebooks = [{
'name': posixpath.basename(obj['name']),
'path': obj['name'],
'last_modified': dateutil.parser.parse(obj['last_modified']),
'created': dateutil.parser.parse(obj['last_modified']),
'type': 'notebook'}
for obj in conts if 'name' in obj]
notebooks = sorted(notebooks, key=lambda item: item['name'])
return notebooks
def get_notebook(self, name, path='', content=True):
"""Get the notebook model with or without content."""
path = path.strip('/')
self.log.debug(
u"get_notebook('{}','{}','{}')".format(name, path, content))
try:
full_path = posixpath.join(path, name)
if content:
hdrs, conts = self.connection.get_object(
self.container, full_path)
nb = current.reads(conts.decode('utf-8'), 'json')
self.mark_trusted_cells(nb, path, name)
last_modified = dateutil.parser.parse(hdrs['last-modified'])
created = dateutil.parser.parse(hdrs['last-modified'])
model = {
'name': name,
'path': path,
'last_modified': last_modified,
'created': created,
'type': 'notebook',
'content': nb
}
return model
else:
hdrs = self.connection.head_object(self.container, full_path)
last_modified = dateutil.parser.parse(hdrs['last-modified'])
created = dateutil.parser.parse(hdrs['last-modified'])
model = {
'name': name,
'path': path,
'last_modified': last_modified,
'created': created,
'type': 'notebook',
}
return model
except ClientException as e:
if e.http_status == 404:
raise HTTPError(
404, u'Notebook not found: %s' % full_path)
else:
raise
def save_notebook(self, model, name='', path=''):
"""Save the notebook model and return the model with no content."""
path = path.strip('/')
self.log.debug(
u"save_notebook('{}','{}','{}')".format(model, name, path))
if 'content' not in model:
raise HTTPError(400, u'No notebook JSON data provided')
# One checkpoint should always exist
# if (self.notebook_exists(name, path) and
# not self.list_checkpoints(name, path)):
# self.create_checkpoint(name, path)
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
full_path = posixpath.join(new_path, new_name)
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
nb = current.to_notebook_json(model['content'])
self.check_and_sign(nb, new_name, new_path)
if 'name' in nb['metadata']:
nb['metadata']['name'] = u''
data = current.writes(nb, u'json').encode('utf-8')
self.connection.put_object(self.container, full_path, data,
content_type='application/json')
# Return model
model = self.get_notebook(new_name, new_path, content=False)
return model
def update_notebook(self, model, name, path=''):
"""Update the notebook's path and/or name"""
path = path.strip('/')
self.log.debug(
u"update_notebook('{}','{}','{}')".format(model, name, path))
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
if path != new_path or name != new_name:
self._rename_notebook(name, path, new_name, new_path)
model = self.get_notebook(new_name, new_path, content=False)
return model
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
path = path.strip('/')
self.log.debug(u"delete_notebook('{}','{}')".format(name, path))
checkpoints = self.list_checkpoints(name, path)
for checkpoint in checkpoints:
self.delete_checkpoint(checkpoint['id'], name, path)
try:
full_path = posixpath.join(path, name)
self.connection.delete_object(self.container, full_path)
except ClientException as e:
if e.http_status == 404:
raise HTTPError(
404, u'Notebook not found: %s' % full_path)
else:
raise
def _rename_notebook(self, old_name, old_path, new_name, new_path):
"""Rename a notebook."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
self.log.debug(u"_rename_notebook('{}','{}','{}','{}')".format(
old_name, old_path, new_name, new_path))
if new_name == old_name and new_path == old_path:
return
# Should we proceed with the move?
if self.notebook_exists(new_name, new_path):
raise HTTPError(
409, u'Notebook with name already exists: %s' % new_path)
# Move the checkpoints
checkpoints = self.list_checkpoints(old_name, old_path)
for checkpoint in checkpoints:
old_checkpoint_path = self._checkpoint_path(
checkpoint['id'], old_name, old_path)
new_checkpoint_path = self._checkpoint_path(
checkpoint['id'], new_name, new_path)
self._move_object(old_checkpoint_path, new_checkpoint_path)
new_full_path = posixpath.join(new_path, new_name)
old_full_path = posixpath.join(old_path, old_name)
self._move_object(old_full_path, new_full_path)
def _checkpoint_path(self, checkpoint_id, name, path):
return posixpath.join(path, name, checkpoint_id)
def create_checkpoint(self, name, path=''):
"""Create a checkpoint of the current state of a notebook"""
path = path.strip('/')
self.log.debug(u"create_checkpoint('{}','{}')".format(name, path))
checkpoint_id = unicode(uuid.uuid4())
full_path = posixpath.join(path, name)
checkpoint_path = self._checkpoint_path(checkpoint_id, name, path)
self._copy_object(full_path, checkpoint_path)
last_modified = utcnow()
return {'id': checkpoint_id, 'last_modified': last_modified}
def list_checkpoints(self, name, path=''):
"""Return a list of checkpoints for a given notebook"""
path = path.strip('/')
self.log.debug(u"list_checkpoints('{}','{}')".format(name, path))
full_path = posixpath.join(path, name)
_, data = \
self.connection.get_container(self.container,
prefix=full_path + '/',
delimiter='/')
self.log.debug(u"prefix={}".format(full_path + '/'))
self.log.debug(u"{}".format(data))
checkpoints = [{
'id': posixpath.basename(obj['name']),
'last_modified': dateutil.parser.parse(obj['last_modified'])
} for obj in data]
checkpoints = sorted(
checkpoints, key=lambda item: item['last_modified'])
self.log.debug(u"Checkpoints to list: {}".format(checkpoints))
return checkpoints
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""Restore a notebook from one of its checkpoints"""
path = path.strip('/')
self.log.debug(u"restore_checkpoint('{}','{}','{}')"
.format(checkpoint_id, name, path))
assert name.endswith(self.filename_ext)
assert self.notebook_exists(name, path)
full_path = posixpath.join(path, name)
checkpoint_path = self._checkpoint_path(checkpoint_id, name, path)
self._copy_object(checkpoint_path, full_path)
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""Delete a checkpoint for a notebook"""
path = path.strip('/')
self.log.debug(u"delete_checkpoint('{}','{}','{}')"
.format(checkpoint_id, name, path))
try:
checkpoint_path = self._checkpoint_path(checkpoint_id, name, path)
self.connection.delete_object(self.container, checkpoint_path)
except ClientException as e:
if e.http_status == 404:
raise HTTPError(
404, u'Checkpoint not found: %s' % checkpoint_path)
else:
raise
def info_string(self):
info = (u"Serving notebooks from OpenStack Swift "
"storage container: {}")
return info.format(self.container)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, exc, joinedload
from quantum.common import exceptions as q_exc
from quantum.plugins.cisco.db import models
_ENGINE = None
_MAKER = None
BASE = models.BASE
def configure_db(options):
"""
Establish the database, create an engine if needed, and
register the models.
:param options: Mapping of configuration options
"""
global _ENGINE
if not _ENGINE:
_ENGINE = create_engine(options['sql_connection'],
echo=False,
echo_pool=True,
pool_recycle=3600)
register_models()
def clear_db():
global _ENGINE
assert _ENGINE
for table in reversed(BASE.metadata.sorted_tables):
_ENGINE.execute(table.delete())
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _MAKER, _ENGINE
if not _MAKER:
assert _ENGINE
_MAKER = sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
return _MAKER()
def register_models():
"""Register Models and create properties"""
global _ENGINE
assert _ENGINE
BASE.metadata.create_all(_ENGINE)
def unregister_models():
"""Unregister Models, useful clearing out data before testing"""
global _ENGINE
assert _ENGINE
BASE.metadata.drop_all(_ENGINE)
def _check_duplicate_net_name(tenant_id, net_name):
session = get_session()
try:
net = session.query(models.Network).\
filter_by(tenant_id=tenant_id, name=net_name).\
one()
raise q_exc.NetworkNameExists(tenant_id=tenant_id,
net_name=net_name, net_id=net.uuid)
except exc.NoResultFound:
# this is the "normal" path, as API spec specifies
# that net-names are unique within a tenant
pass
def network_create(tenant_id, name):
session = get_session()
_check_duplicate_net_name(tenant_id, name)
with session.begin():
net = models.Network(tenant_id, name)
session.add(net)
session.flush()
return net
def network_list(tenant_id):
session = get_session()
return session.query(models.Network).\
options(joinedload(models.Network.ports)). \
filter_by(tenant_id=tenant_id).\
all()
def network_get(net_id):
session = get_session()
try:
return session.query(models.Network).\
options(joinedload(models.Network.ports)). \
filter_by(uuid=net_id).\
one()
except exc.NoResultFound, e:
raise q_exc.NetworkNotFound(net_id=net_id)
def network_rename(tenant_id, net_id, new_name):
session = get_session()
net = network_get(net_id)
_check_duplicate_net_name(tenant_id, new_name)
net.name = new_name
session.merge(net)
session.flush()
return net
def network_destroy(net_id):
session = get_session()
try:
net = session.query(models.Network).\
filter_by(uuid=net_id).\
one()
session.delete(net)
session.flush()
return net
except exc.NoResultFound:
raise q_exc.NetworkNotFound(net_id=net_id)
def port_create(net_id, state=None):
# confirm network exists
network_get(net_id)
session = get_session()
with session.begin():
port = models.Port(net_id)
port['state'] = state or 'DOWN'
session.add(port)
session.flush()
return port
def port_list(net_id):
session = get_session()
return session.query(models.Port).\
options(joinedload(models.Port.network)). \
filter_by(network_id=net_id).\
all()
def port_get(net_id, port_id):
# confirm network exists
network_get(net_id)
session = get_session()
try:
return session.query(models.Port).\
filter_by(uuid=port_id).\
filter_by(network_id=net_id).\
one()
except exc.NoResultFound:
raise q_exc.PortNotFound(net_id=net_id, port_id=port_id)
def port_set_state(net_id, port_id, new_state):
if new_state not in ('ACTIVE', 'DOWN'):
raise q_exc.StateInvalid(port_state=new_state)
# confirm network exists
network_get(net_id)
port = port_get(net_id, port_id)
session = get_session()
port.state = new_state
session.merge(port)
session.flush()
return port
def port_set_attachment(net_id, port_id, new_interface_id):
# confirm network exists
network_get(net_id)
session = get_session()
port = port_get(net_id, port_id)
if new_interface_id != "":
# We are setting, not clearing, the attachment-id
if port['interface_id']:
raise q_exc.PortInUse(net_id=net_id, port_id=port_id,
att_id=port['interface_id'])
try:
port = session.query(models.Port).\
filter_by(interface_id=new_interface_id).\
one()
raise q_exc.AlreadyAttached(net_id=net_id,
port_id=port_id,
att_id=new_interface_id,
att_port_id=port['uuid'])
except exc.NoResultFound:
# this is what should happen
pass
port.interface_id = new_interface_id
session.merge(port)
session.flush()
return port
def port_unset_attachment(net_id, port_id):
# confirm network exists
network_get(net_id)
session = get_session()
port = port_get(net_id, port_id)
port.interface_id = None
session.merge(port)
session.flush()
return port
def port_destroy(net_id, port_id):
# confirm network exists
network_get(net_id)
session = get_session()
try:
port = session.query(models.Port).\
filter_by(uuid=port_id).\
filter_by(network_id=net_id).\
one()
if port['interface_id']:
raise q_exc.PortInUse(net_id=net_id, port_id=port_id,
att_id=port['interface_id'])
session.delete(port)
session.flush()
return port
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
#methods using just port_id
def port_get_by_id(port_id):
session = get_session()
try:
return session.query(models.Port).\
filter_by(uuid=port_id).one()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
def port_set_attachment_by_id(port_id, new_interface_id):
session = get_session()
port = port_get_by_id(port_id)
if new_interface_id != "":
if port['interface_id']:
raise q_exc.PortInUse(port_id=port_id,
att_id=port['interface_id'])
try:
port = session.query(models.Port).\
filter_by(interface_id=new_interface_id).\
one()
raise q_exc.AlreadyAttached(port_id=port_id,
att_id=new_interface_id,
att_port_id=port['uuid'])
except exc.NoResultFound:
pass
port.interface_id = new_interface_id
session.merge(port)
session.flush()
return port
def port_unset_attachment_by_id(port_id):
session = get_session()
port = port_get_by_id(port_id)
port.interface_id = None
session.merge(port)
session.flush()
return port
|
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import time
if sys.platform != 'win32':
import node_cli
else:
import node_api
import unittest
import config
class Node:
def __init__(self, nodeid, is_mtd=False):
if sys.platform != 'win32':
self.interface = node_cli.otCli(nodeid, is_mtd)
else:
self.interface = node_api.otApi(nodeid)
self.interface.clear_whitelist()
self.interface.disable_whitelist()
self.interface.set_timeout(100)
def __del__(self):
del self.interface
def set_mode(self, mode):
self.interface.set_mode(mode)
def debug(self, level):
self.interface.debug(level)
def interface_up(self):
self.interface.interface_up()
def interface_down(self):
self.interface.interface_down()
def thread_start(self):
self.interface.thread_start()
def thread_stop(self):
self.interface.thread_stop()
def commissioner_start(self):
self.interface.commissioner_start()
def commissioner_add_joiner(self, addr, psk):
self.interface.commissioner_add_joiner(addr, psk)
def joiner_start(self, pskd='', provisioning_url=''):
self.interface.joiner_start(pskd, provisioning_url)
def start(self):
self.interface.interface_up()
self.interface.thread_start()
def stop(self):
self.interface.thread_stop()
self.interface.interface_down()
def clear_whitelist(self):
self.interface.clear_whitelist()
def enable_whitelist(self):
self.interface.enable_whitelist()
def disable_whitelist(self):
self.interface.disable_whitelist()
def add_whitelist(self, addr, rssi=None):
self.interface.add_whitelist(addr, rssi)
def remove_whitelist(self, addr):
self.interface.remove_whitelist(addr)
def get_addr16(self):
return self.interface.get_addr16()
def get_router_id(self):
return self.interface.get_router_id()
def get_addr64(self):
return self.interface.get_addr64()
def get_eui64(self):
return self.interface.get_eui64()
def get_joiner_id(self):
return self.interface.get_joiner_id()
def get_channel(self):
return self.interface.get_channel()
def set_channel(self, channel):
self.interface.set_channel(channel)
def get_masterkey(self):
return self.interface.get_masterkey()
def set_masterkey(self, masterkey):
self.interface.set_masterkey(masterkey)
def get_key_sequence_counter(self):
return self.interface.get_key_sequence_counter()
def set_key_sequence_counter(self, key_sequence_counter):
self.interface.set_key_sequence_counter(key_sequence_counter)
def set_key_switch_guardtime(self, key_switch_guardtime):
self.interface.set_key_switch_guardtime(key_switch_guardtime)
def set_network_id_timeout(self, network_id_timeout):
self.interface.set_network_id_timeout(network_id_timeout)
def get_network_name(self):
return self.interface.get_network_name()
def set_network_name(self, network_name):
self.interface.set_network_name(network_name)
def get_panid(self):
return self.interface.get_panid()
def set_panid(self, panid = config.PANID):
self.interface.set_panid(panid)
def get_partition_id(self):
return self.interface.get_partition_id()
def set_partition_id(self, partition_id):
self.interface.set_partition_id(partition_id)
def set_router_upgrade_threshold(self, threshold):
self.interface.set_router_upgrade_threshold(threshold)
def set_router_downgrade_threshold(self, threshold):
self.interface.set_router_downgrade_threshold(threshold)
def release_router_id(self, router_id):
self.interface.release_router_id(router_id)
def get_state(self):
return self.interface.get_state()
def set_state(self, state):
self.interface.set_state(state)
def get_timeout(self):
return self.interface.get_timeout()
def set_timeout(self, timeout):
self.interface.set_timeout(timeout)
def set_max_children(self, number):
self.interface.set_max_children(number)
def get_weight(self):
return self.interface.get_weight()
def set_weight(self, weight):
self.interface.set_weight(weight)
def add_ipaddr(self, ipaddr):
self.interface.add_ipaddr(ipaddr)
def get_addrs(self):
return self.interface.get_addrs()
def add_service(self, enterpriseNumber, serviceData, serverData):
self.interface.add_service(enterpriseNumber, serviceData, serverData)
def remove_service(self, enterpriseNumber, serviceData):
self.interface.remove_service(enterpriseNumber, serviceData)
def get_ip6_address(self, address_type):
return self.interface.get_ip6_address(address_type)
def get_context_reuse_delay(self):
return self.interface.get_context_reuse_delay()
def set_context_reuse_delay(self, delay):
self.interface.set_context_reuse_delay(delay)
def add_prefix(self, prefix, flags, prf = 'med'):
self.interface.add_prefix(prefix, flags, prf)
def remove_prefix(self, prefix):
self.interface.remove_prefix(prefix)
def add_route(self, prefix, prf = 'med'):
self.interface.add_route(prefix, prf)
def remove_route(self, prefix):
self.interface.remove_route(prefix)
def register_netdata(self):
self.interface.register_netdata()
def energy_scan(self, mask, count, period, scan_duration, ipaddr):
self.interface.energy_scan(mask, count, period, scan_duration, ipaddr)
def panid_query(self, panid, mask, ipaddr):
self.interface.panid_query(panid, mask, ipaddr)
def scan(self):
return self.interface.scan()
def ping(self, ipaddr, num_responses=1, size=None, timeout=5000):
return self.interface.ping(ipaddr, num_responses, size, timeout)
def reset(self):
return self.interface.reset()
def set_router_selection_jitter(self, jitter):
self.interface.set_router_selection_jitter(jitter)
def set_active_dataset(self, timestamp, panid=None, channel=None, channel_mask=None, master_key=None):
self.interface.set_active_dataset(timestamp, panid, channel, channel_mask, master_key)
def set_pending_dataset(self, pendingtimestamp, activetimestamp, panid=None, channel=None):
self.interface.set_pending_dataset(pendingtimestamp, activetimestamp, panid, channel)
def announce_begin(self, mask, count, period, ipaddr):
self.interface.announce_begin(mask, count, period, ipaddr)
def send_mgmt_active_set(self, active_timestamp=None, channel=None, channel_mask=None, extended_panid=None,
panid=None, master_key=None, mesh_local=None, network_name=None, binary=None):
self.interface.send_mgmt_active_set(active_timestamp, channel, channel_mask, extended_panid, panid,
master_key, mesh_local, network_name, binary)
def send_mgmt_pending_set(self, pending_timestamp=None, active_timestamp=None, delay_timer=None, channel=None,
panid=None, master_key=None, mesh_local=None, network_name=None):
self.interface.send_mgmt_pending_set(pending_timestamp, active_timestamp, delay_timer, channel, panid,
master_key, mesh_local, network_name)
if __name__ == '__main__':
unittest.main()
|
|
import pyblish.api
class IntegrateAvalonAsset(pyblish.api.InstancePlugin):
"""Write to files and metadata
This plug-in exposes your data to others by encapsulating it
into a new version.
"""
label = "Asset"
order = pyblish.api.IntegratorOrder
families = [
"mindbender.model",
"mindbender.rig",
"mindbender.animation",
"mindbender.lookdev",
"mindbender.historyLookdev",
"mindbender.group",
"mindbender.imagesequence",
]
def process(self, instance):
import os
import errno
import shutil
from pprint import pformat
from avalon import api, io
from avalon.vendor import filelink
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
SILO = api.Session["AVALON_SILO"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"})
asset = io.find_one({"name": ASSET})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.debug("Next version: %i" % next_version)
version = {
"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": next_version,
"locations": [LOCATION] if LOCATION else [],
"data": {
"families": (
instance.data.get("families", list()) +
[instance.data["family"]]
),
# Enable overriding with current information from instance
"time": instance.data.get("time", context.data["time"]),
"author": instance.data.get("user", context.data["user"]),
"source": instance.data.get(
"source", context.data["currentFile"]).replace(
api.registered_root(), "{root}"
).replace("\\", "/"),
"comment": context.data.get("comment")
}
}
self.log.debug("Creating version: %s" % pformat(version))
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
template_data = {
"root": api.registered_root(),
"project": PROJECT,
"silo": SILO,
"asset": ASSET,
"subset": subset["name"],
"version": version["name"],
}
template_publish = project["config"]["template"]["publish"]
if "output" not in instance.data:
instance.data["output"] = list()
def copy(src, dst):
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
try:
filelink.create(src, dst)
self.log.info("Linking %s -> %s" % (src, dst))
except Exception:
# Revert to a normal copy
# TODO(marcus): Once filelink is proven stable,
# improve upon or remove this fallback.
shutil.copy(src, dst)
self.log.info("Linking failed, copying %s -> %s"
% (src, dst))
for _ in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(_, list):
collection = _
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
template_data["representation"] = ext[1:]
for fname in collection:
src = os.path.join(stagingdir, fname)
dst = os.path.join(
template_publish.format(**template_data),
fname
)
copy(src, dst)
instance.data["output"].append(dst)
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = _
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
dst = template_publish.format(**template_data)
copy(src, dst)
instance.data["output"].append(dst)
representation = {
"schema": "avalon-core:representation-2.0",
"type": "representation",
"parent": version_id,
"name": template_data["representation"],
"data": {},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context for performance reasons.
"context": {
"project": PROJECT,
"asset": ASSET,
"silo": SILO,
"subset": subset["name"],
"version": version["name"],
"representation": template_data["representation"]
}
}
io.insert_one(representation)
context.data["published_version"] = str(version_id)
self.log.info("Successfully integrated \"%s\" to \"%s\"" % (
instance, dst))
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import with_statement
import glob
import os
import hmac
import hashlib
import shutil
import socket
import subprocess
import struct
from twisted.internet import defer
from twisted.internet.interfaces import IProtocolFactory
from twisted.internet.endpoints import serverFromString
from zope.interface import implementer
try:
import GeoIP as _GeoIP
GeoIP = _GeoIP
except ImportError:
GeoIP = None
city = None
country = None
asn = None
# XXX probably better to depend on and use "six" for py2/3 stuff?
try:
unicode
except NameError:
py3k = True
basestring = str
else:
py3k = False
basestring = basestring
def create_geoip(fname):
# It's more "pythonic" to just wait for the exception,
# but GeoIP prints out "Can't open..." messages for you,
# which isn't desired here
if not os.path.isfile(fname):
raise IOError("Can't find %s" % fname)
if GeoIP is None:
return None
# just letting any errors make it out
return GeoIP.open(fname, GeoIP.GEOIP_STANDARD)
def maybe_create_db(path):
try:
return create_geoip(path)
except IOError:
return None
city, asn, country = list(map(maybe_create_db,
("/usr/share/GeoIP/GeoLiteCity.dat",
"/usr/share/GeoIP/GeoIPASNum.dat",
"/usr/share/GeoIP/GeoIP.dat")))
try:
import ipaddr as _ipaddr
ipaddr = _ipaddr
except ImportError:
ipaddr = None
def is_executable(path):
"""Checks if the given path points to an existing, executable file"""
return os.path.isfile(path) and os.access(path, os.X_OK)
def find_tor_binary(globs=('/usr/sbin/', '/usr/bin/',
'/Applications/TorBrowser_*.app/Contents/MacOS/'),
system_tor=True):
"""
Tries to find the tor executable using the shell first or in in the
paths whose glob-patterns is in the given 'globs'-tuple.
:param globs:
A tuple of shell-style globs of directories to use to find tor
(TODO consider making that globs to actual tor binary?)
:param system_tor:
This controls whether bash is used to seach for 'tor' or
not. If False, we skip that check and use only the 'globs'
tuple.
"""
# Try to find the tor executable using the shell
if system_tor:
try:
proc = subprocess.Popen(
('which tor'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True
)
except OSError:
pass
else:
stdout, _ = proc.communicate()
if proc.poll() == 0 and stdout != '':
return stdout.strip()
# the shell may not provide type and tor is usually not on PATH when using
# the browser-bundle. Look in specific places
for pattern in globs:
for path in glob.glob(pattern):
torbin = os.path.join(path, 'tor')
if is_executable(torbin):
return torbin
return None
def maybe_ip_addr(addr):
"""
Tries to return an IPAddress, otherwise returns a string.
TODO consider explicitly checking for .exit or .onion at the end?
"""
if ipaddr is not None:
try:
return ipaddr.IPAddress(addr)
except ValueError:
pass
return str(addr)
def find_keywords(args, key_filter=lambda x: not x.startswith("$")):
"""
This splits up strings like name=value, foo=bar into a dict. Does NOT deal
with quotes in value (e.g. key="value with space" will not work
By default, note that it takes OUT any key which starts with $ (i.e. a
single dollar sign) since for many use-cases the way Tor encodes nodes
with "$hash=name" looks like a keyword argument (but it isn't). If you
don't want this, override the "key_filter" argument to this method.
:return:
a dict of key->value (both strings) of all name=value type
keywords found in args.
"""
filtered = [x for x in args if '=' in x and key_filter(x.split('=')[0])]
return dict(x.split('=', 1) for x in filtered)
def delete_file_or_tree(*args):
"""
For every path in args, try to delete it as a file or a directory
tree. Ignores deletion errors.
"""
for f in args:
try:
os.unlink(f)
except OSError:
shutil.rmtree(f, ignore_errors=True)
def ip_from_int(ip):
""" Convert long int back to dotted quad string """
return socket.inet_ntoa(struct.pack('>I', ip))
def process_from_address(addr, port, torstate=None):
"""
Determines the PID from the address/port provided by using lsof
and returns it as an int (or None if it couldn't be
determined). In the special case the addr is '(Tor_internal)' then
the PID of the Tor process (as gotten from the torstate object) is
returned (or 0 if unavailable, e.g. a Tor which doesn't implement
'GETINFO process/pid'). In this case if no TorState instance is
given, None is returned.
"""
if addr is None:
return None
if "(tor_internal)" == str(addr).lower():
if torstate is None:
return None
return int(torstate.tor_pid)
proc = subprocess.Popen(['lsof', '-i', '4tcp@%s:%s' % (addr, port)],
stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
lines = stdout.split('\n')
if len(lines) > 1:
return int(lines[1].split()[1])
def hmac_sha256(key, msg):
"""
Adapted from rransom's tor-utils git repository. Returns the
digest (binary) of an HMAC with SHA256 over msg with key.
"""
return hmac.new(key, msg, hashlib.sha256).digest()
CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE = os.urandom(32)
def compare_via_hash(x, y):
"""
Taken from rransom's tor-utils git repository, to compare two
hashes in something resembling constant time (or at least, not
leaking timing info?)
"""
return (hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==
hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))
class NetLocation:
"""
Represents the location of an IP address, either city or country
level resolution depending on what GeoIP database was loaded. If
the ASN database is available you get that also.
"""
def __init__(self, ipaddr):
"ipaddr should be a dotted-quad"
self.ip = ipaddr
self.latlng = (None, None)
self.countrycode = None
self.city = None
self.asn = None
if self.ip is None or self.ip == 'unknown':
return
if city:
try:
r = city.record_by_addr(self.ip)
except:
r = None
if r is not None:
self.countrycode = r['country_code']
self.latlng = (r['latitude'], r['longitude'])
try:
self.city = (r['city'], r['region_code'])
except KeyError:
self.city = (r['city'], r['region_name'])
elif country:
self.countrycode = country.country_code_by_addr(ipaddr)
else:
self.countrycode = ''
if asn:
try:
self.asn = asn.org_by_addr(self.ip)
except:
self.asn = None
@implementer(IProtocolFactory)
class NoOpProtocolFactory:
"""
This is an IProtocolFactory that does nothing. Used for testing,
and for :method:`available_tcp_port`
"""
def noop(self, *args, **kw):
pass
buildProtocol = noop
doStart = noop
doStop = noop
@defer.inlineCallbacks
def available_tcp_port(reactor):
"""
Returns a Deferred firing an available TCP port on localhost.
It does so by listening on port 0; then stopListening and fires the
assigned port number.
"""
endpoint = serverFromString(reactor, 'tcp:0:interface=127.0.0.1')
port = yield endpoint.listen(NoOpProtocolFactory())
address = port.getHost()
yield port.stopListening()
defer.returnValue(address.port)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState", ("cell_state", "log_probs",
"finished", "lengths"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[T, batch_size, beam_width]`.
beam_search_output: An instance of `BeamSearchDecoderOutput` that describes
the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(
tiled, array_ops.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape(
[tiled_static_batch_size]).concatenate(t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def _check_maybe(t):
if isinstance(t, tensor_array_ops.TensorArray):
raise TypeError(
"TensorArray state is not supported by BeamSearchDecoder: %s" % t.name)
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
class BeamSearchDecoder(decoder.Decoder):
"""BeamSearch sampling decoder."""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0):
"""Initialize BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams,
initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=array_ops.zeros(
[self._batch_size, self._beam_width],
dtype=nest.flatten(self._initial_cell_state)[0].dtype),
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int32))
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int32` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
Returns:
outputs: An instance of FinalBeamSearchDecoderOutput where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of BeamSearchDecoderState.
"""
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids, outputs.parent_ids,
sequence_length=sequence_lengths)
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None if static_batch_size is None
else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t, array_ops.concat(
([self._batch_size * self._beam_width], t_shape[2:]), 0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t, array_ops.concat(
([self._batch_size, self._beam_width], t_shape[1:]), 0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?"
% (reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: Tensor of dimension [batch_size*beam_width, s]
s: Tensor, Python int, or TensorShape.
Returns:
Either a reshaped version of t with dimension
[batch_size, beam_width, s] if t's first dimension is of size
batch_size*beam_width or t if not.
Raises:
TypeError: If t is an instance of TensorArray.
ValueError: If the rank of t is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s]
s: Tensor, Python int, or TensorShape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
TypeError: If t is an instance of TensorArray.
ValueError: If the rank of t is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(
self._maybe_merge_batch_beams,
cell_state, self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams,
next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value
lengths_to_add = array_ops.one_hot(
indices=array_ops.tile(
array_ops.reshape(end_token, [1, 1]), [batch_size, beam_width]),
depth=vocab_size,
on_value=0,
off_value=1)
add_mask = (1 - math_ops.to_int32(previously_finished))
lengths_to_add = array_ops.expand_dims(add_mask, 2) * lengths_to_add
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_shape = array_ops.shape(scores)
scores_flat = control_flow_ops.cond(
time > 0,
lambda: array_ops.reshape(scores, [batch_size, -1]),
lambda: scores[:, 0])
num_available_beam = control_flow_ops.cond(
time > 0,
lambda: math_ops.reduce_prod(scores_shape[1:]),
lambda: math_ops.reduce_prod(scores_shape[2:]))
# Pick the next beams according to the specified successors function
next_beam_size = math_ops.minimum(
ops.convert_to_tensor(
beam_width, dtype=dtypes.int32, name="beam_width"),
num_available_beam)
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1])
next_word_ids = math_ops.to_int32(word_indices % vocab_size)
next_beam_ids = math_ops.to_int32(word_indices / vocab_size)
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(previously_finished,
math_ops.equal(next_word_ids, end_token))
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged
# 2. Beams that are now finished (EOS predicted) remain unchanged
# 3. Beams that are not yet finished have their length increased by 1
lengths_to_add = math_ops.to_int32(
math_ops.not_equal(next_word_ids, end_token))
lengths_to_add = (1 - math_ops.to_int32(next_finished)) * lengths_to_add
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
The scores normalized by the length_penalty.
"""
length_penality_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
return log_probs / length_penality_
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Args:
sequence_lengths: The sequence length of all hypotheses, a tensor
of shape [beam_size, vocab_size].
penalty_factor: A scalar that weights the length penalty.
Returns:
The length penalty factor, a tensor fo shape [beam_size].
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which
elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
finished_mask = array_ops.expand_dims(
math_ops.to_float(1. - math_ops.to_float(finished)), 2)
# These examples are not finished and we leave them
non_finished_examples = finished_mask * probs
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=0.,
off_value=probs.dtype.min)
finished_examples = (1. - finished_mask) * finished_row
return finished_examples + non_finished_examples
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (tensor_shape.TensorShape([static_batch_size])
.concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape)
output.set_shape(final_static_shape)
return output
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom Training Loop correctness test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
_NUM_SAMPLES = 66
_BATCH_SIZE = 32
_RANDOM_SEED = 1337
_NUM_EPOCHS = 2
_STEPS_PER_EPOCH = 2
class MaybeStrategyScope(object):
"""Provides a context allowing no distribution strategy."""
def __init__(self, strategy):
self._strategy = strategy
self._scope = None
def __enter__(self):
if self._strategy:
self._scope = self._strategy.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._strategy:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def get_model(sync_batchnorm=False):
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(
10, activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-4)))
if sync_batchnorm:
model.add(keras.layers.SyncBatchNormalization())
else:
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
return model
def get_data():
x_train = np.random.rand(_NUM_SAMPLES, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
train_dataset = dataset_ops.DatasetV2.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(_BATCH_SIZE)
return train_dataset
def compute_loss(labels, logits, reg_losses):
pred_loss = keras.losses.mean_squared_error(labels, logits)
scaled_loss = nn.compute_average_loss(
pred_loss, global_batch_size=_BATCH_SIZE)
l2_loss = nn.scale_regularization_loss(reg_losses)
return scaled_loss + l2_loss
def iteration_inside_func(initial_weights, dataset, optimizer_fn,
iteration_type, strategy=None, sync_batchnorm=None):
"""Helper function to test iterating over data inside a tf.function."""
with MaybeStrategyScope(strategy):
if strategy and sync_batchnorm:
model = get_model(sync_batchnorm)
else:
model = get_model()
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
'training_accuracy', dtype=dtypes.float32)
@def_function.function
def train_epoch(dist_input):
"""Training StepFn."""
def step_fn(inputs):
samples, labels = inputs
with backprop.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
total_loss = 0.0
num_batches = 0
if iteration_type == 'dataset':
for x in dist_input:
if strategy:
per_replica_losses = strategy.run(step_fn, args=(x,))
total_loss += strategy.reduce(reduce_util.ReduceOp.SUM,
per_replica_losses,
axis=None)
else:
total_loss += step_fn(x)
num_batches += 1
else:
iterator = iter(dist_input)
for _ in range(_STEPS_PER_EPOCH):
if strategy:
per_replica_losses = strategy.run(step_fn, args=(next(iterator),))
total_loss += strategy.reduce(reduce_util.ReduceOp.SUM,
per_replica_losses,
axis=None)
else:
total_loss += step_fn(next(iterator))
num_batches += 1
return total_loss / math_ops.cast(num_batches, dtype=dtypes.float32)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
for _ in range(_NUM_EPOCHS):
loss = train_epoch(dataset)
return (model.get_weights(),
loss,
training_accuracy.result())
def iteration_outside_func(initial_weights, dataset, optimizer_fn,
iteration_type, strategy=None, sync_batchnorm=None):
"""Helper function to test iterating over data outside a tf.function."""
with MaybeStrategyScope(strategy):
model = get_model(sync_batchnorm=sync_batchnorm)
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
'training_accuracy', dtype=dtypes.float32)
@def_function.function
def train_step(dist_inputs):
"""Training StepFn."""
def step_fn(inputs):
samples, labels = inputs
with backprop.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
if strategy:
per_replica_losses = strategy.run(step_fn, args=(dist_inputs,))
return strategy.reduce(reduce_util.ReduceOp.SUM,
per_replica_losses,
axis=None)
else:
return step_fn(dist_inputs)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
total_loss = 0.0
num_batches = 0
if iteration_type == 'dataset':
for _ in range(_NUM_EPOCHS):
for x in dataset:
total_loss += train_step(x)
num_batches += 1
else:
for _ in range(_NUM_EPOCHS):
iterator = iter(dataset)
for _ in range(_STEPS_PER_EPOCH):
total_loss += train_step(next(iterator))
num_batches += 1
return (model.get_weights(),
total_loss / math_ops.cast(num_batches, dtype=dtypes.float32),
training_accuracy.result())
class TestDistributionStrategyDnnCorrectness(test.TestCase,
parameterized.TestCase):
"""Test custom training loop correctness with a simple DNN model."""
def setUp(self):
super(TestDistributionStrategyDnnCorrectness, self).setUp()
v2_compat.enable_v2_behavior()
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
optimizer_fn=strategy_combinations.optimizers_v1_and_v2,
mode=['eager'],
iteration_type=['iterator', 'dataset'],
inside_func=[False, True],
sync_batchnorm=[True, False]
))
def test_dnn_correctness_minus_tpus(self, distribution, optimizer_fn,
iteration_type, inside_func,
sync_batchnorm):
# TODO(anjs): Identify why this particular V1 optimizer needs a higher tol.
if 'FtrlV1' in optimizer_fn._name and 'TPU' in type(distribution).__name__:
self.skipTest('Reduced tolerance of the order of 1e-1 required.')
self.dnn_correctness(distribution, optimizer_fn, iteration_type,
inside_func, sync_batchnorm)
def dnn_correctness(self, distribution, optimizer_fn, iteration_type,
inside_func, sync_batchnorm=None):
model = get_model(sync_batchnorm)
initial_weights = model.get_weights()
dataset = get_data()
if inside_func:
iteration_func = iteration_inside_func
else:
iteration_func = iteration_outside_func
wts_with_ds, loss_with_ds, acc_with_ds = iteration_func(
initial_weights, dataset, optimizer_fn, iteration_type,
strategy=distribution, sync_batchnorm=sync_batchnorm)
wts, loss, acc = iteration_func(initial_weights, dataset, optimizer_fn,
iteration_type,
sync_batchnorm=sync_batchnorm)
self.assertAllClose(wts, wts_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(loss, loss_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(acc, acc_with_ds, atol=1e-3, rtol=1e-3)
if __name__ == '__main__':
test.main()
|
|
"""
====================================================================================
"""
from rdflib import (
BNode,
Literal,
Namespace,
RDF,
URIRef,
Variable,
)
from rdflib.store import Store
from rdflib.graph import QuotedGraph, Graph
from rdflib.namespace import NamespaceManager
from rdflib import py3compat
from .BuiltinPredicates import FILTERS
try:
from functools import reduce
except ImportError:
pass
LOG = Namespace("http://www.w3.org/2000/10/swap/log#")
Any = None
RULE_LHS = 0
RULE_RHS = 1
class N3Builtin(object):
"""
An binary N3 Filter: A built-in which evaluates to a boolean
"""
def __init__(self, uri, func, argument, result):
self.uri = uri
self.argument = argument
self.result = result
self.func = func
self.variables = [arg for arg in
[self.argument, self.result] if isinstance(arg, Variable)]
def isSecondOrder(self):
return False
def ground(self, varMapping):
appliedKeys = set([self.argument, self.result]
).intersection(list(varMapping.keys()))
self.argument = varMapping.get(self.argument, self.argument)
self.result = varMapping.get(self.result, self.result)
return appliedKeys
def isGround(self):
for term in [self.result, self.argument]:
if isinstance(term, Variable):
return False
return True
def renameVariables(self, varMapping):
if varMapping:
self.argument = varMapping.get(self.argument, self.argument)
self.result = varMapping.get(self.result, self.result)
def binds(self, var):
return True
def toRDFTuple(self):
return (self.argument, self.uri, self.result)
def render(self, argument, result):
return "<%s>(%s, %s)" % (self.uri, argument, result)
def __iter__(self):
for f in [self.uri, self.argument, self.result]:
yield f
def __repr__(self):
return "<%s>(%s, %s)" % (
self.uri,
isinstance(self.argument, Variable) and '?%s' % self.argument or self.argument,
isinstance(self.result, Variable) and '?%s' % self.result or self.result)
class Formula(object):
"""
An N3 Formula. Consists of an (internal) identifier
and a *list* of triples
"""
def __init__(self, identifier):
self.identifier = identifier
self.triples = []
def __len__(self):
return len(self.triples)
def __repr__(self):
return "{%s}" % (repr(self.triples))
def __getitem__(self, key):
return self.triples[key]
def __iter__(self):
for item in self.triples:
yield item
def extend(self, other):
self.triples.extend(other)
def append(self, other):
self.triples.append(other)
class Rule(object):
"""
An N3 Rule. consists of two formulae associated via log:implies
"""
def __init__(self, LHS, RHS):
self.lhs = LHS
self.rhs = RHS
def __repr__(self):
return "{%s} => {%s}" % (self.lhs, self.rhs)
def SetupRuleStore(n3Stream=None, additionalBuiltins=None, makeNetwork=False):
"""
Sets up a N3RuleStore, a Graph (that uses it as a store, and )
"""
ruleStore = N3RuleStore(additionalBuiltins=additionalBuiltins)
nsMgr = NamespaceManager(Graph(ruleStore))
ruleGraph = Graph(ruleStore, namespace_manager=nsMgr)
if n3Stream:
ruleGraph.parse(n3Stream, format='n3')
if makeNetwork:
from .Network import ReteNetwork
closureDeltaGraph = Graph()
network = ReteNetwork(ruleStore, inferredTarget=closureDeltaGraph)
return ruleStore, ruleGraph, network
return ruleStore, ruleGraph
class N3RuleStore(Store):
doc = """
A specialized Store which maintains order of statements
and creates N3Filters, Rules, Formula objects, and other facts
Ensures builtin filters refer to variables that have preceded
>>> s = N3RuleStore()
>>> g = Graph(s)
>>> src = \"\"\"
... @prefix : <http://metacognition.info/FuXi/test#>.
... @prefix str: <http://www.w3.org/2000/10/swap/string#>.
... @prefix math: <http://www.w3.org/2000/10/swap/math#>.
... @prefix log: <http://www.w3.org/2000/10/swap/log#>.
... @prefix m: <http://metacognition.info/FuXi/test#>.
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
... @prefix owl: <http://www.w3.org/2002/07/owl#>.
... m:a a rdfs:Class;
... m:prop1 1;
... m:prop2 4.
... m:b a owl:Class;
... m:prop1 2;
... m:prop2 4, 1, 5.
... (1 2) :relatedTo (3 4).
... { ?X a owl:Class. ?X :prop1 ?M. ?X :prop2 ?N. ?N math:equalTo 3 } => { [] :selected (?M ?N) }.\"\"\"
>>> g = g.parse(data=src, format='n3')
>>> s._finalize()
>>> len([pred for subj, pred, obj in s.facts if pred == %(u)s'http://metacognition.info/FuXi/test#relatedTo']) #doctest: +SKIP
1
>>> len(s.rules)
1
>>> print(len(s.rules[0][RULE_LHS]))
4
>>> print(len(s.rules[0][RULE_RHS]))
5
>>> print(s.rules[0][RULE_LHS][1])
(?X, rdflib.term.URIRef(%(u)s'http://metacognition.info/FuXi/test#prop1'), ?M)
>>> print(s.rules[0][RULE_LHS][-1])
<http://www.w3.org/2000/10/swap/math#equalTo>(?N, 3)
Description Rule Patterns Compilation
>>> s = N3RuleStore()
>>> g = Graph(s)
>>> src = \"\"\"
... @prefix math: <http://www.w3.org/2000/10/swap/math#>.
... @prefix : <http://metacognition.info/FuXi/test#>.
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
... @prefix owl: <http://www.w3.org/2002/07/owl#>.
... { ?S a [ rdfs:subClassOf ?C ] } => { ?S a ?C }.\"\"\"
>>> g = g.parse(data=src, format='n3')
>>> s._finalize()
>>> assert s.rules
>>> assert [pattern for pattern in s.rules[0][RULE_LHS] if isinstance(pattern, tuple) and [term for term in pattern if isinstance(term, BNode) ]], repr(s.rules[0][RULE_LHS])
Test single fact with collection
>>> s = N3RuleStore()
>>> g = Graph(s)
>>> src = \"\"\"
... @prefix math: <http://www.w3.org/2000/10/swap/math#>.
... @prefix : <http://metacognition.info/FuXi/test#>.
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
... @prefix owl: <http://www.w3.org/2002/07/owl#>.
... (1 2) :relatedTo owl:Class.\"\"\"
>>> g = g.parse(data=src, format='n3')
>>> s._finalize()
>>> print(len(s.facts))
5
RHS can only include RDF triples
>>> s = N3RuleStore()
>>> g = Graph(s)
>>> src = \"\"\"
... @prefix math: <http://www.w3.org/2000/10/swap/math#>.
... @prefix : <http://metacognition.info/FuXi/test#>.
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
... @prefix owl: <http://www.w3.org/2002/07/owl#>.
... {} => { 3 math:lessThan 2}.\"\"\"
>>> g = g.parse(data=src, format='n3')
>>> try:
... s._finalize()
... except Exception as e:
... print(e)
Rule RHS must only include RDF triples (<http://www.w3.org/2000/10/swap/math#lessThan>(3, 2))
BuiltIn used out of order
>>> s = N3RuleStore()
>>> g = Graph(s)
>>> src = \"\"\"
... @prefix math: <http://www.w3.org/2000/10/swap/math#>.
... @prefix : <http://metacognition.info/FuXi/test#>.
... { ?M math:lessThan ?Z. ?R :value ?M; :value2 ?Z} => { ?R a :Selected. }.\"\"\"
>>> try:
... g = g.parse(data=src, format='n3')
... except Exception as e:
... print(e) #doctest: +SKIP
Builtin refers to variables without previous reference (<http://www.w3.org/2000/10/swap/math#lessThan>(?M, ?Z))
Empty LHS & RHS
>>> s = N3RuleStore()
>>> g = Graph(s)
>>> src = \"\"\"
... @prefix math: <http://www.w3.org/2000/10/swap/math#>.
... @prefix : <http://metacognition.info/FuXi/test#>.
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
... @prefix owl: <http://www.w3.org/2002/07/owl#>.
... {} => {rdf:nil :allClasses ?C}.
... {?C owl:oneOf ?L. ?X a ?C. ?L :notItem ?X} => {}.\"\"\"
>>> g = g.parse(data=src, format='n3')
>>> len(s.formulae)
2
>>> s._finalize()
>>> len(s.rules[0][0])
0
>>> len(s.rules[1][-1])
0
"""
__doc__ = py3compat.format_doctest_out(doc)
context_aware = True
formula_aware = True
def __init__(self, identifier=None, additionalBuiltins=None):
self.formulae = {}
self.facts = []
self.rootFormula = None
self._lists = {}
self.currentList = None
self._listBuffer = []
self.rules = []
self.referencedVariables = set()
self.nsMgr = {u'skolem': URIRef('http://code.google.com/p/python-dlp/wiki/SkolemTerm#')}
self.filters = {}
self.filters.update(FILTERS)
if additionalBuiltins:
self.filters.update(additionalBuiltins)
def namespace(self, prefix):
return self.nsMgr.get(prefix)
def bind(self, prefix, namespace, override=True):
if override or prefix not in self.nsMgr:
self.nsMgr[prefix] = namespace
def prefix(self, namespace):
return dict([(v, k) for
k, v in list(self.nsMgr.items())]).get(namespace)
def _unrollList(self, l, listName):
listTriples = []
lastItemName = None
for linkItem in l:
linkName = l.index(linkItem) == 0 and listName or BNode()
if lastItemName:
listTriples.append((lastItemName, RDF.rest, linkName))
listTriples.append((linkName, RDF.first, linkItem))
lastItemName = linkName
listTriples.append((lastItemName, RDF.rest, RDF.nil))
return listTriples
def _finalize(self):
def unrollFunc(left, right):
leftListsToUnroll = []
rightListsToUnroll = []
if isinstance(left, tuple):
s, p, o = left
leftListsToUnroll = [term for term in [s, o] if term in self._lists]
if leftListsToUnroll:
leftListsToUnroll = reduce(lambda x, y: x + y, [
self._unrollList(self._lists[l], l) for l in leftListsToUnroll])
left = [left]
elif isinstance(left, N3Builtin):
left = [left]
if isinstance(right, tuple):
s, p, o = right
rightListsToUnroll = [term for term in [s, o] if term in self._lists]
if rightListsToUnroll:
rightListsToUnroll = reduce(lambda x, y: x + y, [
self._unrollList(self._lists[l], l) for l in rightListsToUnroll])
right = [right]
elif isinstance(right, N3Builtin):
right = [right]
return left + leftListsToUnroll + right + rightListsToUnroll
if len(self.facts) == 1:
s, p, o = self.facts[0]
listsToUnroll = [term for term in [s, o] if term in self._lists]
if listsToUnroll:
self.facts.extend(reduce(lambda x, y: x + y, [
self._unrollList(self._lists[l], l) for l in listsToUnroll]))
elif self.facts:
self.facts = reduce(unrollFunc, self.facts)
for formula in list(self.formulae.values()):
if len(formula) == 1:
if isinstance(formula[0], tuple):
s, p, o = formula[0]
listsToUnroll = [term for term in [s, o] if term in self._lists]
if listsToUnroll:
listTriples = reduce(lambda x, y: x + y, [
self._unrollList(self._lists[l], l) for l in listsToUnroll])
formula.extend(listTriples)
elif len(formula):
formula.triples = reduce(unrollFunc, [i for i in formula])
for lhs, rhs in self.rules:
for item in self.formulae.get(rhs, []):
assert isinstance(item, tuple), \
"Rule RHS must only include RDF triples (%s)" % item
self.rules = [(self.formulae.get(lhs, Formula(lhs)),
self.formulae.get(rhs, Formula(rhs)))
for lhs, rhs in self.rules]
def _checkVariableReferences(self, referencedVariables, terms, funcObj):
for term in [i for i in terms if isinstance(i, Variable)]:
if term not in referencedVariables:
raise Exception("Builtin refers to variables without previous reference (%s)" % funcObj)
def add(self, triple, context=None, quoted=False):
(subject, predicate, obj) = triple
if predicate == RDF.first and not isinstance(subject, Variable) and not isinstance(object, Variable):
if not self.currentList:
self._listBuffer.append(obj)
self.currentList = subject
else:
self._listBuffer.append(obj)
elif predicate == RDF.rest and not isinstance(subject, Variable) and not isinstance(object, Variable):
if obj == RDF.nil:
self._lists[self.currentList] = [item for item in self._listBuffer]
self._listBuffer = []
self.currentList = None
elif not isinstance(context, QuotedGraph):
if not self.rootFormula:
self.rootFormula = context.identifier
if predicate == LOG.implies:
self.rules.append(
(isinstance(subject, URIRef) and subject or subject.identifier,
isinstance(obj, (URIRef, Literal)) and obj or obj.identifier))
else:
self.facts.append((subject, predicate, obj))
else:
formula = self.formulae.get(context.identifier, Formula(context.identifier))
if predicate in self.filters:
newFilter = N3Builtin(predicate, self.filters[predicate](subject, obj), subject, obj)
#@attention: The non-deterministic parse order of an RDF graph makes this
#check hard to enforce
#self._checkVariableReferences(self.referencedVariables, [subject, obj], newFilter)
formula.append(newFilter)
else:
#print("(%s, %s, %s) pattern in %s"%(subject, predicate, obj, context.identifier))
variables = [arg for arg in [subject, predicate, obj] if isinstance(arg, Variable)]
self.referencedVariables.update(variables)
formula.append((subject, predicate, obj))
self.formulae[context.identifier] = formula
def __repr__(self):
return ""
def __len__(self, context=None):
return 0
def optimizeRules(self):
patternDict = {}
for lhs, rhs in self.rules:
for pattern in lhs:
if not isinstance(pattern, N3Builtin):
_hashList = [isinstance(term, (Variable, BNode)) and '\t' or term for term in pattern]
patternDict.setdefault(
reduce(lambda x, y: x + y, _hashList), set()).add(pattern)
for key, vals in list(patternDict.items()):
if len(vals) > 1:
print("###### Similar Patterns ######")
for val in vals:
print(val)
print("##############################")
def test():
import doctest
doctest.testmod()
def test2():
s = N3RuleStore()
g = Graph(s)
src = """
@prefix math: <http://www.w3.org/2000/10/swap/math#>.
@prefix : <http://metacognition.info/FuXi/test#>.
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
@prefix owl: <http://www.w3.org/2002/07/owl#>.
:subj :pred obj.
{} => { 3 math:lessThan 2}."""
g = g.parse(data=src, format='n3')
s._finalize()
if __name__ == '__main__':
test()
#test2()
# from FuXi.Rete.RuleStore import Formula
# from FuXi.Rete.RuleStore import N3Builtin
# from FuXi.Rete.RuleStore import N3RuleStore
# from FuXi.Rete.RuleStore import Rule
# from FuXi.Rete.RuleStore import SetupRuleStore
|
|
#!/usr/bin/env python
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import getopt
import sys
import os
import boto
from boto.compat import six
try:
# multipart portions copyright Fabian Topfstedt
# https://gist.github.com/924094
import math
import mimetypes
from multiprocessing import Pool
from boto.s3.connection import S3Connection
from filechunkio import FileChunkIO
multipart_capable = True
usage_flag_multipart_capable = """ [--multipart]"""
usage_string_multipart_capable = """
multipart - Upload files as multiple parts. This needs filechunkio.
Requires ListBucket, ListMultipartUploadParts,
ListBucketMultipartUploads and PutObject permissions."""
except ImportError as err:
multipart_capable = False
usage_flag_multipart_capable = ""
if six.PY2:
attribute = 'message'
else:
attribute = 'msg'
usage_string_multipart_capable = '\n\n "' + \
getattr(err, attribute)[len('No module named '):] + \
'" is missing for multipart support '
DEFAULT_REGION = 'us-east-1'
usage_string = """
SYNOPSIS
s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-b/--bucket <bucket_name> [-c/--callback <num_cb>]
[-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
[-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
[-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced]
[--header] [--region <name>] [--host <s3_host>]""" + \
usage_flag_multipart_capable + """ path [path...]
Where
access_key - Your AWS Access Key ID. If not supplied, boto will
use the value of the environment variable
AWS_ACCESS_KEY_ID
secret_key - Your AWS Secret Access Key. If not supplied, boto
will use the value of the environment variable
AWS_SECRET_ACCESS_KEY
bucket_name - The name of the S3 bucket the file(s) should be
copied to.
path - A path to a directory or file that represents the items
to be uploaded. If the path points to an individual file,
that file will be uploaded to the specified bucket. If the
path points to a directory, it will recursively traverse
the directory and upload all files to the specified bucket.
debug_level - 0 means no debug output (default), 1 means normal
debug output from boto, and 2 means boto debug output
plus request/response output from httplib
ignore_dirs - a comma-separated list of directory names that will
be ignored and not uploaded to S3.
num_cb - The number of progress callbacks to display. The default
is zero which means no callbacks. If you supplied a value
of "-c 10" for example, the progress callback would be
called 10 times for each file transferred.
prefix - A file path prefix that will be stripped from the full
path of the file when determining the key name in S3.
For example, if the full path of a file is:
/home/foo/bar/fie.baz
and the prefix is specified as "-p /home/foo/" the
resulting key name in S3 will be:
/bar/fie.baz
The prefix must end in a trailing separator and if it
does not then one will be added.
key_prefix - A prefix to be added to the S3 key name, after any
stripping of the file path is done based on the
"-p/--prefix" option.
reduced - Use Reduced Redundancy storage
grant - A canned ACL policy that will be granted on each file
transferred to S3. The value of provided must be one
of the "canned" ACL policies supported by S3:
private|public-read|public-read-write|authenticated-read
no_overwrite - No files will be overwritten on S3, if the file/key
exists on s3 it will be kept. This is useful for
resuming interrupted transfers. Note this is not a
sync, even if the file has been updated locally if
the key exists on s3 the file on s3 will not be
updated.
header - key=value pairs of extra header(s) to pass along in the
request
region - Manually set a region for buckets that are not in the US
classic region. Normally the region is autodetected, but
setting this yourself is more efficient.
host - Hostname override, for using an endpoint other then AWS S3
""" + usage_string_multipart_capable + """
If the -n option is provided, no files will be transferred to S3 but
informational messages will be printed about what would happen.
"""
def usage(status=1):
print(usage_string)
sys.exit(status)
def submit_cb(bytes_so_far, total_bytes):
print('%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes))
def get_key_name(fullpath, prefix, key_prefix):
if fullpath.startswith(prefix):
key_name = fullpath[len(prefix):]
else:
key_name = fullpath
l = key_name.split(os.sep)
return key_prefix + '/'.join(l)
def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
source_path, offset, bytes, debug, cb, num_cb,
amount_of_retries=10):
"""
Uploads a part with retries.
"""
if debug == 1:
print("_upload_part(%s, %s, %s)" % (source_path, offset, bytes))
def _upload(retries_left=amount_of_retries):
try:
if debug == 1:
print('Start uploading part #%d ...' % part_num)
conn = S3Connection(aws_key, aws_secret)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
for mp in bucket.get_all_multipart_uploads():
if mp.id == multipart_id:
with FileChunkIO(source_path, 'r', offset=offset,
bytes=bytes) as fp:
mp.upload_part_from_file(fp=fp, part_num=part_num,
cb=cb, num_cb=num_cb)
break
except Exception as exc:
if retries_left:
_upload(retries_left=retries_left - 1)
else:
print('Failed uploading part #%d' % part_num)
raise exc
else:
if debug == 1:
print('... Uploaded part #%d' % part_num)
_upload()
def check_valid_region(conn, region):
if conn is None:
print('Invalid region (%s)' % region)
sys.exit(1)
def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
reduced, debug, cb, num_cb, acl='private', headers={},
guess_mimetype=True, parallel_processes=4,
region=DEFAULT_REGION):
"""
Parallel multipart upload.
"""
conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret)
check_valid_region(conn, region)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
if guess_mimetype:
mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
headers.update({'Content-Type': mtype})
mp = bucket.initiate_multipart_upload(keyname, headers=headers,
reduced_redundancy=reduced)
source_size = os.stat(source_path).st_size
bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
5242880)
chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
pool = Pool(processes=parallel_processes)
for i in range(chunk_amount):
offset = i * bytes_per_chunk
remaining_bytes = source_size - offset
bytes = min([bytes_per_chunk, remaining_bytes])
part_num = i + 1
pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
part_num, source_path, offset, bytes,
debug, cb, num_cb])
pool.close()
pool.join()
if len(mp.get_all_parts()) == chunk_amount:
mp.complete_upload()
key = bucket.get_key(keyname)
key.set_acl(acl)
else:
mp.cancel_upload()
def singlepart_upload(bucket, key_name, fullpath, *kargs, **kwargs):
"""
Single upload.
"""
k = bucket.new_key(key_name)
k.set_contents_from_filename(fullpath, *kargs, **kwargs)
def expand_path(path):
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return os.path.abspath(path)
def main():
# default values
aws_access_key_id = None
aws_secret_access_key = None
bucket_name = ''
ignore_dirs = []
debug = 0
cb = None
num_cb = 0
quiet = False
no_op = False
prefix = '/'
key_prefix = ''
grant = None
no_overwrite = False
reduced = False
headers = {}
host = None
multipart_requested = False
region = None
try:
opts, args = getopt.getopt(
sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet',
'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart',
'host=', 'region='])
except:
usage(1)
# parse opts
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
if o in ('-a', '--access_key'):
aws_access_key_id = a
if o in ('-b', '--bucket'):
bucket_name = a
if o in ('-c', '--callback'):
num_cb = int(a)
cb = submit_cb
if o in ('-d', '--debug'):
debug = int(a)
if o in ('-g', '--grant'):
grant = a
if o in ('-i', '--ignore'):
ignore_dirs = a.split(',')
if o in ('-n', '--no_op'):
no_op = True
if o in ('-w', '--no_overwrite'):
no_overwrite = True
if o in ('-p', '--prefix'):
prefix = a
if prefix[-1] != os.sep:
prefix = prefix + os.sep
prefix = expand_path(prefix)
if o in ('-k', '--key_prefix'):
key_prefix = a
if o in ('-q', '--quiet'):
quiet = True
if o in ('-s', '--secret_key'):
aws_secret_access_key = a
if o in ('-r', '--reduced'):
reduced = True
if o == '--header':
(k, v) = a.split("=", 1)
headers[k] = v
if o == '--host':
host = a
if o == '--multipart':
if multipart_capable:
multipart_requested = True
else:
print("multipart upload requested but not capable")
sys.exit(4)
if o == '--region':
regions = boto.s3.regions()
for region_info in regions:
if region_info.name == a:
region = a
break
else:
raise ValueError('Invalid region %s specified' % a)
if len(args) < 1:
usage(2)
if not bucket_name:
print("bucket name is required!")
usage(3)
connect_args = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if host:
connect_args['host'] = host
c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args)
check_valid_region(c, region or DEFAULT_REGION)
c.debug = debug
b = c.get_bucket(bucket_name, validate=False)
# Attempt to determine location and warn if no --host or --region
# arguments were passed. Then try to automagically figure out
# what should have been passed and fix it.
if host is None and region is None:
try:
location = b.get_location()
# Classic region will be '', any other will have a name
if location:
print('Bucket exists in %s but no host or region given!' % location)
# Override for EU, which is really Ireland according to the docs
if location == 'EU':
location = 'eu-west-1'
print('Automatically setting region to %s' % location)
# Here we create a new connection, and then take the existing
# bucket and set it to use the new connection
c = boto.s3.connect_to_region(location, **connect_args)
c.debug = debug
b.connection = c
except Exception as e:
if debug > 0:
print(e)
print('Could not get bucket region info, skipping...')
existing_keys_to_check_against = []
files_to_check_for_upload = []
for path in args:
path = expand_path(path)
# upload a directory of files recursively
if os.path.isdir(path):
if no_overwrite:
if not quiet:
print('Getting list of existing keys to check against')
for key in b.list(get_key_name(path, prefix, key_prefix)):
existing_keys_to_check_against.append(key.name)
for root, dirs, files in os.walk(path):
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
for path in files:
if path.startswith("."):
continue
files_to_check_for_upload.append(os.path.join(root, path))
# upload a single file
elif os.path.isfile(path):
fullpath = os.path.abspath(path)
key_name = get_key_name(fullpath, prefix, key_prefix)
files_to_check_for_upload.append(fullpath)
existing_keys_to_check_against.append(key_name)
# we are trying to upload something unknown
else:
print("I don't know what %s is, so i can't upload it" % path)
for fullpath in files_to_check_for_upload:
key_name = get_key_name(fullpath, prefix, key_prefix)
if no_overwrite and key_name in existing_keys_to_check_against:
if b.get_key(key_name):
if not quiet:
print('Skipping %s as it exists in s3' % fullpath)
continue
if not quiet:
print('Copying %s to %s/%s' % (fullpath, bucket_name, key_name))
if not no_op:
# 0-byte files don't work and also don't need multipart upload
if os.stat(fullpath).st_size != 0 and multipart_capable and \
multipart_requested:
multipart_upload(bucket_name, aws_access_key_id,
aws_secret_access_key, fullpath, key_name,
reduced, debug, cb, num_cb,
grant or 'private', headers,
region=region or DEFAULT_REGION)
else:
singlepart_upload(b, key_name, fullpath, cb=cb, num_cb=num_cb,
policy=grant, reduced_redundancy=reduced,
headers=headers)
if __name__ == "__main__":
main()
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
def testShouldReturnNullWhenGettingTheValueOfAnAttributeThatIsNotListed(driver, pages):
pages.load("simpleTest.html")
head = driver.find_element(By.XPATH, "/html")
attribute = head.get_attribute("cheese")
assert attribute is None
def testShouldReturnNullWhenGettingSrcAttributeOfInvalidImgTag(driver, pages):
pages.load("simpleTest.html")
img = driver.find_element(By.ID, "invalidImgTag")
img_attr = img.get_attribute("src")
assert img_attr is None
def testShouldReturnAnAbsoluteUrlWhenGettingSrcAttributeOfAValidImgTag(driver, pages):
pages.load("simpleTest.html")
img = driver.find_element(By.ID, "validImgTag")
img_attr = img.get_attribute("src")
assert "icon.gif" in img_attr
def testShouldReturnAnAbsoluteUrlWhenGettingHrefAttributeOfAValidAnchorTag(driver, pages):
pages.load("simpleTest.html")
img = driver.find_element(By.ID, "validAnchorTag")
img_attr = img.get_attribute("href")
assert "icon.gif" in img_attr
def testShouldReturnEmptyAttributeValuesWhenPresentAndTheValueIsActuallyEmpty(driver, pages):
pages.load("simpleTest.html")
body = driver.find_element(By.XPATH, "//body")
assert "" == body.get_attribute("style")
def testShouldReturnTheValueOfTheDisabledAttributeAsFalseIfNotSet(driver, pages):
pages.load("formPage.html")
inputElement = driver.find_element(By.XPATH, "//input[@id='working']")
assert inputElement.get_attribute("disabled") is None
assert inputElement.is_enabled()
pElement = driver.find_element(By.ID, "peas")
assert pElement.get_attribute("disabled") is None
assert pElement.is_enabled()
def testShouldReturnTheValueOfTheIndexAttributeEvenIfItIsMissing(driver, pages):
pages.load("formPage.html")
multiSelect = driver.find_element(By.ID, "multi")
options = multiSelect.find_elements(By.TAG_NAME, "option")
assert "1" == options[1].get_attribute("index")
def testShouldIndicateTheElementsThatAreDisabledAreNotIs_enabled(driver, pages):
pages.load("formPage.html")
inputElement = driver.find_element(By.XPATH, "//input[@id='notWorking']")
assert not inputElement.is_enabled()
inputElement = driver.find_element(By.XPATH, "//input[@id='working']")
assert inputElement.is_enabled()
def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(driver, pages):
pages.load("formPage.html")
disabledTextElement1 = driver.find_element(By.ID, "disabledTextElement1")
assert not disabledTextElement1.is_enabled()
disabledTextElement2 = driver.find_element(By.ID, "disabledTextElement2")
assert not disabledTextElement2.is_enabled()
disabledSubmitElement = driver.find_element(By.ID, "disabledSubmitElement")
assert not disabledSubmitElement.is_enabled()
def testShouldIndicateWhenATextAreaIsDisabled(driver, pages):
pages.load("formPage.html")
textArea = driver.find_element(By.XPATH, "//textarea[@id='notWorkingArea']")
assert not textArea.is_enabled()
@pytest.mark.xfail_safari
def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(driver, pages):
pages.load("formPage.html")
disabledTextElement1 = driver.find_element(By.ID, "disabledTextElement1")
with pytest.raises(WebDriverException):
disabledTextElement1.send_keys("foo")
assert "" == disabledTextElement1.text
disabledTextElement2 = driver.find_element(By.ID, "disabledTextElement2")
with pytest.raises(WebDriverException):
disabledTextElement2.send_keys("bar")
assert "" == disabledTextElement2.text
def testShouldIndicateWhenASelectIsDisabled(driver, pages):
pages.load("formPage.html")
enabled = driver.find_element(By.NAME, "selectomatic")
disabled = driver.find_element(By.NAME, "no-select")
assert enabled.is_enabled()
assert not disabled.is_enabled()
def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element(By.XPATH, "//input[@id='checky']")
assert checkbox.get_attribute("checked") is None
checkbox.click()
assert "true" == checkbox.get_attribute("checked")
def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(driver, pages):
pages.load("formPage.html")
neverSelected = driver.find_element(By.ID, "cheese")
initiallyNotSelected = driver.find_element(By.ID, "peas")
initiallySelected = driver.find_element(By.ID, "cheese_and_peas")
assert neverSelected.get_attribute("checked") is None
assert initiallyNotSelected.get_attribute("checked") is None
assert "true" == initiallySelected.get_attribute("checked")
initiallyNotSelected.click()
assert neverSelected.get_attribute("selected") is None
assert "true" == initiallyNotSelected.get_attribute("checked")
assert initiallySelected.get_attribute("checked") is None
def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(driver, pages):
pages.load("formPage.html")
selectBox = driver.find_element(By.XPATH, "//select[@name='selectomatic']")
options = selectBox.find_elements(By.TAG_NAME, "option")
one = options[0]
two = options[1]
assert one.is_selected()
assert not two.is_selected()
assert "true" == one.get_attribute("selected")
assert two.get_attribute("selected") is None
def testShouldReturnValueOfClassAttributeOfAnElement(driver, pages):
pages.load("xhtmlTest.html")
heading = driver.find_element(By.XPATH, "//h1")
classname = heading.get_attribute("class")
assert "header" == classname
# Disabled due to issues with Frames
# def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(driver, pages):
# pages.load("iframes.html")
# driver.switch_to.frame("iframe1")
#
# wallace = driver.find_element(By.XPATH, "//div[@id='wallace']")
# classname = wallace.get_attribute("class")
# assert "gromit" == classname
def testShouldReturnTheContentsOfATextAreaAsItsValue(driver, pages):
pages.load("formPage.html")
value = driver.find_element(By.ID, "withText").get_attribute("value")
assert "Example text" == value
def testShouldReturnTheContentsOfATextAreaAsItsValueWhenSetToNonNorminalTrue(driver, pages):
pages.load("formPage.html")
e = driver.find_element(By.ID, "withText")
driver.execute_script("arguments[0].value = 'tRuE'", e)
value = e.get_attribute("value")
assert "tRuE" == value
def testShouldTreatReadonlyAsAValue(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "readonly")
readOnlyAttribute = element.get_attribute("readonly")
textInput = driver.find_element(By.NAME, "x")
notReadOnly = textInput.get_attribute("readonly")
assert readOnlyAttribute != notReadOnly
def testShouldGetNumericAtribute(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.ID, "withText")
assert "5" == element.get_attribute("rows")
def testCanReturnATextApproximationOfTheStyleAttribute(driver, pages):
pages.load("javascriptPage.html")
style = driver.find_element(By.ID, "red-item").get_attribute("style")
assert "background-color" in style.lower()
def testShouldCorrectlyReportValueOfColspan(driver, pages):
pages.load("tables.html")
th1 = driver.find_element(By.ID, "th1")
td2 = driver.find_element(By.ID, "td2")
assert "th1" == th1.get_attribute("id")
assert "3" == th1.get_attribute("colspan")
assert "td2" == td2.get_attribute("id")
assert "2" == td2.get_attribute("colspan")
def testCanRetrieveTheCurrentValueOfATextFormField_textInput(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.ID, "working")
assert "" == element.get_attribute("value")
element.send_keys("hello world")
assert "hello world" == element.get_attribute("value")
def testCanRetrieveTheCurrentValueOfATextFormField_emailInput(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.ID, "email")
assert "" == element.get_attribute("value")
element.send_keys("hello@example.com")
assert "hello@example.com" == element.get_attribute("value")
def testCanRetrieveTheCurrentValueOfATextFormField_textArea(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.ID, "emptyTextArea")
assert "" == element.get_attribute("value")
element.send_keys("hello world")
assert "hello world" == element.get_attribute("value")
def testShouldReturnNullForNonPresentBooleanAttributes(driver, pages):
pages.load("booleanAttributes.html")
element1 = driver.find_element(By.ID, "working")
assert element1.get_attribute("required") is None
@pytest.mark.xfail_ie
def testShouldReturnTrueForPresentBooleanAttributes(driver, pages):
pages.load("booleanAttributes.html")
element1 = driver.find_element(By.ID, "emailRequired")
assert "true" == element1.get_attribute("required")
element2 = driver.find_element(By.ID, "emptyTextAreaRequired")
assert "true" == element2.get_attribute("required")
element3 = driver.find_element(By.ID, "inputRequired")
assert "true" == element3.get_attribute("required")
element4 = driver.find_element(By.ID, "textAreaRequired")
assert "true" == element4.get_attribute("required")
@pytest.mark.xfail_chrome
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
def testShouldGetUnicodeCharsFromAttribute(driver, pages):
pages.load("formPage.html")
title = driver.find_element(By.ID, "vsearchGadget").get_attribute("title")
assert 'Hvad s\xf8ger du?' == title
@pytest.mark.xfail_chrome
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
def testShouldGetValuesAndNotMissItems(driver, pages):
pages.load("attributes.html")
expected = "4b273a33fbbd29013nN93dy4F1A~"
result = driver.find_element(By.CSS_SELECTOR, "li").get_attribute("value")
assert expected == result
|
|
# django imports
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test import TestCase
from django.test.client import Client
# permissions imports
from permissions.models import PrincipalRoleRelation
from permissions.models import Role
import permissions.utils
# workflows imports
from workflows.models import Transition
# portlets imports
from portlets.models import Portlet
from portlets.models import PortletAssignment
from portlets.models import PortletBlocking
from portlets.models import PortletRegistration
from portlets.models import Slot
# lfc imports
import lfc.utils.registration
from lfc.models import Portal
from lfc.tests.utils import create_request
# lfc_page imports
from lfc_page.models import Page
class InheritancePermissionTestCase(TestCase):
"""
"""
fixtures = ["superuser.xml"]
class LFCPermissionTestCase2(TestCase):
"""
"""
fixtures = ["superuser.xml"]
def setUp(self):
"""
"""
Portal.objects.create()
self.editor = Role.objects.create(name="editor")
self.user = User.objects.create(username="user", is_active=True)
self.user.set_password("secret")
self.user.save()
self.group = Group.objects.create(name="group")
self.page_1 = Page.objects.create(title="Page 1", slug="page-1")
self.page_2 = Page.objects.create(title="Page 2", slug="page-2", parent=self.page_1)
self.page_3 = Page.objects.create(title="Page 3", slug="page-3", parent=self.page_2)
def test_delete_user(self):
"""
"""
request = create_request()
roles = PrincipalRoleRelation.objects.all()
self.assertEqual(len(roles), 0)
permissions.utils.add_local_role(self.page_1, self.user, self.editor)
roles = PrincipalRoleRelation.objects.all()
self.assertEqual(len(roles), 1)
from lfc.manage.views import delete_user
delete_user(request, self.user.id)
roles = PrincipalRoleRelation.objects.all()
self.assertEqual(len(roles), 0)
def test_delete_group(self):
"""
"""
request = create_request()
roles = PrincipalRoleRelation.objects.all()
self.assertEqual(len(roles), 0)
permissions.utils.add_local_role(self.page_1, self.group, self.editor)
roles = PrincipalRoleRelation.objects.all()
self.assertEqual(len(roles), 1)
from lfc.manage.views import delete_group
delete_group(request, self.group.id)
roles = PrincipalRoleRelation.objects.all()
self.assertEqual(len(roles), 0)
def test_local_roles_from_parent_1(self):
"""
"""
permissions.utils.add_local_role(self.page_1, self.user, self.editor)
roles = permissions.utils.get_roles(self.user, self.page_1)
self.assertEqual(list(roles), [self.editor])
roles = permissions.utils.get_roles(self.user, self.page_2)
self.assertEqual(list(roles), [self.editor])
roles = permissions.utils.get_roles(self.user, self.page_3)
self.assertEqual(list(roles), [self.editor])
def test_local_roles_from_parent_2(self):
"""
"""
permissions.utils.add_local_role(self.page_2, self.user, self.editor)
roles = permissions.utils.get_roles(self.user, self.page_1)
self.assertEqual(list(roles), [])
roles = permissions.utils.get_roles(self.user, self.page_2)
self.assertEqual(list(roles), [self.editor])
roles = permissions.utils.get_roles(self.user, self.page_3)
self.assertEqual(list(roles), [self.editor])
def test_local_roles_from_parent_3(self):
"""
"""
permissions.utils.add_local_role(self.page_3, self.user, self.editor)
roles = permissions.utils.get_roles(self.user, self.page_1)
self.assertEqual(list(roles), [])
roles = permissions.utils.get_roles(self.user, self.page_2)
self.assertEqual(list(roles), [])
roles = permissions.utils.get_roles(self.user, self.page_3)
self.assertEqual(list(roles), [self.editor])
def test_local_roles_from_group_1(self):
"""
"""
# Add user to group
self.user.groups.add(self.group)
# Assign "editor" to group on page 3
permissions.utils.add_local_role(self.page_1, self.group, self.editor)
roles = permissions.utils.get_roles(self.user, self.page_1)
self.assertEqual(list(roles), [self.editor])
roles = permissions.utils.get_roles(self.user, self.page_2)
self.assertEqual(list(roles), [self.editor])
roles = permissions.utils.get_roles(self.user, self.page_3)
self.assertEqual(list(roles), [self.editor])
def test_local_roles_from_group_2(self):
"""
"""
# Add user to group
self.user.groups.add(self.group)
# Assign "editor" to group on page 2
permissions.utils.add_local_role(self.page_2, self.group, self.editor)
roles = permissions.utils.get_roles(self.user, self.page_1)
self.assertEqual(list(roles), [])
roles = permissions.utils.get_roles(self.user, self.page_2)
self.assertEqual(list(roles), [self.editor])
roles = permissions.utils.get_roles(self.user, self.page_3)
self.assertEqual(list(roles), [self.editor])
def test_local_roles_from_group_3(self):
"""
"""
# Add user to group
self.user.groups.add(self.group)
# Assign "editor" to group on page 3
permissions.utils.add_local_role(self.page_3, self.group, self.editor)
roles = permissions.utils.get_roles(self.user, self.page_1)
self.assertEqual(list(roles), [])
roles = permissions.utils.get_roles(self.user, self.page_2)
self.assertEqual(list(roles), [])
roles = permissions.utils.get_roles(self.user, self.page_3)
self.assertEqual(list(roles), [self.editor])
def test_local_roles_from_group_4(self):
"""
"""
# Add user to group
self.user.groups.add(self.group)
# Assign "editor" to group on page 2 and 3
permissions.utils.add_local_role(self.page_3, self.group, self.editor)
permissions.utils.add_local_role(self.page_2, self.group, self.editor)
roles = permissions.utils.get_roles(self.user, self.page_1)
self.assertEqual(list(roles), [])
roles = permissions.utils.get_roles(self.user, self.page_2)
self.assertEqual(list(roles), [self.editor])
roles = permissions.utils.get_roles(self.user, self.page_3)
self.assertEqual(list(roles), [self.editor])
class LFCPermissionTestCase(TestCase):
"""
"""
fixtures = ["superuser.xml"]
def setUp(self):
# Initialize LFC
from lfc.management.commands.lfc_init import Command
Command().handle()
# Create a slot
self.left_slot = Slot.objects.create(name="Left")
# Create a page
self.page = Page.objects.filter()[0]
self.ctype = ContentType.objects.get_for_model(self.page)
self.portal = lfc.utils.get_portal()
self.portal_ctype = ContentType.objects.get_for_model(self.portal)
def test_reviewer(self):
"""Tests access rights of a reviewer.
"""
reviewer = User.objects.create(username="reviewer", is_active=True)
reviewer.set_password("reviewer")
reviewer.save()
role = Role.objects.get(name="Reviewer")
role.add_principal(reviewer)
self.assertEqual(role.get_users(), [reviewer])
self.client = Client()
result = self.client.login(username="reviewer", password="reviewer")
self.assertEqual(result, True)
# Page is public, so anonymous should be able to view that page
result = self.client.get(reverse("lfc_base_view", kwargs={"slug": "welcome-to-lfc"}))
self.failIf(result.content.find("Welcome to LFC") == -1)
result = self.client.get(reverse("lfc_manage_users"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# portal
result = self.client.post(reverse("lfc_save_portal_core"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# applications
result = self.client.get(reverse("lfc_applications"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_install_application", kwargs={"name": "dummy"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_uninstall_application", kwargs={"name": "dummy"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_reinstall_application", kwargs={"name": "dummy"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# content types
result = self.client.get(reverse("lfc_content_types"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_content_type", kwargs={"id": 0}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# manage content
# page is public so the review can view it within the manage screens.
result = self.client.get(reverse("lfc_manage_object", kwargs={"id": self.page.id}))
self.assertEqual(result.status_code, 200)
result = self.client.get(reverse("lfc_add_object", kwargs={"id": 0}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_add_top_object"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_add_object", kwargs={"id": 0, "language": "en"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_object", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_save_object_core_data", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_save_meta_data", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_save_seo", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# object images
result = self.client.post(reverse("lfc_add_images", kwargs={"id": self.page.id}), {"sessionid": self.client.session.session_key})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_images", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_object_images", kwargs={"id": self.page.id}))
self.failIf(result.content.find("images") == -1)
# portal images
result = self.client.post(reverse("lfc_add_portal_images"), {"sessionid": self.client.session.session_key})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_portal_images"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_portal_images"))
self.assertEqual(result.status_code, 200)
# object files
result = self.client.post(reverse("lfc_add_files", kwargs={"id": self.page.id}), {"sessionid": self.client.session.session_key})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_files", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_object_files", kwargs={"id": self.page.id}))
self.failIf(result.content.find("files") == -1)
# portal files
result = self.client.post(reverse("lfc_add_portal_files"), {"sessionid": self.client.session.session_key})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_portal_files"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_portal_files"))
self.assertEqual(result.status_code, 200)
# comments
result = self.client.get(reverse("lfc_update_comments", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# children
result = self.client.get(reverse("lfc_update_object_children", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portal_children"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# permissions
result = self.client.get(reverse("lfc_update_object_permissions", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portal_permissions"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# workflows
transition = Transition.objects.get(name="Reject", workflow__name="Portal")
result = self.client.get(reverse("lfc_manage_do_transition", kwargs={"id": self.page.id}) + "?transition=" + str(transition.id))
self.assertEqual(result.status_code, 200)
result = self.client.get(reverse("lfc_manage_workflow", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_workflow"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_workflow"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_workflow", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_workflow_data", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_workflow_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_workflow_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_workflow_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_workflow_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_workflow_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_workflow_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# object portlets
from lfc_portlets.models import ContentPortlet
self.portlet = ContentPortlet()
self.portlet.id = 1
# Assign the portlet to th page
self.pa = PortletAssignment.objects.create(
slot=self.left_slot, content=self.page, portlet=self.portlet, position=1)
result = self.client.get(reverse("lfc_add_portlet", kwargs={"object_type_id": self.ctype.id, "object_id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portlets_blocking", kwargs={"object_type_id": self.ctype.id, "object_id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_edit_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# portal portlets
# Assign the portlet to th page
self.pa = PortletAssignment.objects.create(
slot=self.left_slot, content=self.portal, portlet=self.portlet, position=1)
result = self.client.get(reverse("lfc_add_portlet", kwargs={"object_type_id": self.portal_ctype.id, "object_id": self.portal.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portlets_blocking", kwargs={"object_type_id": self.portal_ctype.id, "object_id": self.portal.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_edit_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# review
result = self.client.get(reverse("lfc_manage_review"))
self.failIf(result.content.find("There are no objects to review") == -1)
# local roles
result = self.client.get(reverse("lfc_manage_save_local_roles", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_local_roles_add_form", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_local_roles_search", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_local_roles", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# translation
result = self.client.post(reverse("lfc_save_translation"), {"canonical_id": self.page.id})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_translate_object", kwargs={"id": self.page.id, "language": "en"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# language
# Note: All logged in user are allowed to change the language
result = self.client.get(reverse("lfc_set_navigation_tree_language", kwargs={"language": "en"}))
self.assertEqual(result.status_code, 200)
result = self.client.get(reverse("lfc_manage_set_language", kwargs={"language": "en"}), HTTP_REFERER = "/")
self.failIf(result._headers["location"][1].startswith("http://testserver/login"))
# template
result = self.client.post(reverse("lfc_set_template"), {"obj_id": self.page.id})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# users
result = self.client.get(reverse("lfc_manage_users"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_user", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_user"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_save_user_data", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_add_user"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_user", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_change_users"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_change_password", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_reset_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_users_page"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_reset_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_reset_user_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_user_page"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# groups
result = self.client.get(reverse("lfc_manage_group"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_group", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_group"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_group", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_group", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# roles
result = self.client.get(reverse("lfc_manage_role"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_role", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_role"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_role", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_role", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# portal
result = self.client.get(reverse("lfc_manage_portal"))
self.assertEqual(result.status_code, 200)
def test_anonymous(self):
"""Tests access rights of an anonymous user.
"""
self.client = Client()
result = self.client.logout()
# Page is public, so anonymous should be able to view that page
result = self.client.get(reverse("lfc_base_view", kwargs={"slug": "welcome-to-lfc"}))
self.failIf(result.content.find("Welcome to LFC") == -1)
result = self.client.get(reverse("lfc_manage_users"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# portal
result = self.client.post(reverse("lfc_save_portal_core"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# applications
result = self.client.get(reverse("lfc_applications"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_install_application", kwargs={"name": "dummy"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_uninstall_application", kwargs={"name": "dummy"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_reinstall_application", kwargs={"name": "dummy"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# content types
result = self.client.get(reverse("lfc_content_types"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_content_type", kwargs={"id": 0}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# manage content
result = self.client.get(reverse("lfc_manage_object", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_add_object", kwargs={"id": 0}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_add_top_object"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_add_object", kwargs={"id": 0, "language": "en"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_object", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_save_object_core_data", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_save_meta_data", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_save_seo", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# object images
result = self.client.post(reverse("lfc_add_images", kwargs={"id": self.page.id}), {"sessionid": "dummy"})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_images", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_object_images", kwargs={"id": self.page.id}))
self.failIf(result.content.find("images") == -1)
# portal images
result = self.client.post(reverse("lfc_add_portal_images"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_portal_images"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_portal_images"))
self.failUnless(result.status_code, 200)
# object files
result = self.client.post(reverse("lfc_add_files", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_files", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_object_files", kwargs={"id": self.page.id}))
self.failIf(result.content.find("files") == -1)
# portal files
result = self.client.post(reverse("lfc_add_portal_files"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.post(reverse("lfc_update_portal_files"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_load_portal_files"))
self.failUnless(result.status_code, 200)
# comments
result = self.client.get(reverse("lfc_update_comments", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# children
result = self.client.get(reverse("lfc_update_object_children", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portal_children"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# permissions
result = self.client.get(reverse("lfc_update_object_permissions", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portal_permissions"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# workflows
transition = Transition.objects.get(name="Reject", workflow__name="Portal")
result = self.client.get(reverse("lfc_manage_do_transition", kwargs={"id": self.page.id}) + "?transition=" + str(transition.id))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_workflow", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_workflow"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_workflow"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_workflow", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_workflow_data", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_workflow_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_workflow_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_workflow_state", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_workflow_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_workflow_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_workflow_transition", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# object portlets
from lfc_portlets.models import ContentPortlet
self.portlet = ContentPortlet()
self.portlet.id = 1
# Assign the portlet to th page
self.pa = PortletAssignment.objects.create(
slot=self.left_slot, content=self.page, portlet=self.portlet, position=1)
result = self.client.get(reverse("lfc_add_portlet", kwargs={"object_type_id": self.ctype.id, "object_id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portlets_blocking", kwargs={"object_type_id": self.ctype.id, "object_id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_edit_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# portal portlets
# Assign the portlet to th page
self.pa = PortletAssignment.objects.create(
slot=self.left_slot, content=self.portal, portlet=self.portlet, position=1)
result = self.client.get(reverse("lfc_add_portlet", kwargs={"object_type_id": self.portal_ctype.id, "object_id": self.portal.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_update_portlets_blocking", kwargs={"object_type_id": self.portal_ctype.id, "object_id": self.portal.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_edit_portlet", kwargs={"portletassignment_id": self.pa.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# reviewe
result = self.client.get(reverse("lfc_manage_review"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# local roles
result = self.client.get(reverse("lfc_manage_save_local_roles", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_local_roles_add_form", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_local_roles_search", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_local_roles", kwargs={"id": self.page.id}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# translation
result = self.client.post(reverse("lfc_save_translation"), {"canonical_id": self.page.id})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_translate_object", kwargs={"id": self.page.id, "language": "en"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# language
result = self.client.get(reverse("lfc_set_navigation_tree_language", kwargs={"language": "en"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_language", kwargs={"language": "en"}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# template
result = self.client.post(reverse("lfc_set_template"), {"obj_id": self.page.id})
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# users
result = self.client.get(reverse("lfc_manage_users"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_user", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_user"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_save_user_data", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_add_user"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_delete_user", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_change_users"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_change_password", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_reset_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_users_page"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_reset_users_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_reset_user_filter"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_set_user_page"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# groups
result = self.client.get(reverse("lfc_manage_group"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_group", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_group"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_group", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_group", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# roles
result = self.client.get(reverse("lfc_manage_role"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_role", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_add_role"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_save_role", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
result = self.client.get(reverse("lfc_manage_delete_role", kwargs={"id": 1}))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
# portal
result = self.client.get(reverse("lfc_manage_portal"))
self.failUnless(result._headers["location"][1].startswith("http://testserver/login"))
|
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.tapit.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
path = os.path.basename(js_file)
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
method = compiler.compile_commonjs_file(path,js_file)
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
method += '\treturn filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);'
f = os.path.join(cwd,'Classes','ComTapitModuleAssets.m')
c = open(f).read()
templ_search = ' moduleAsset\n{\n'
idx = c.find(templ_search) + len(templ_search)
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','com.tapit.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from cinder.api.openstack.volume import snapshots
from cinder import db
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder import test
from cinder import volume
from cinder.tests.api.openstack import fakes
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
UUID = '00000000-0000-0000-0000-000000000001'
INVALID_UUID = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {
'id': UUID,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
}
def stub_snapshot_create(self, context, volume_id, name, description):
snapshot = _get_default_snapshot_param()
snapshot['volume_id'] = volume_id
snapshot['display_name'] = name
snapshot['display_description'] = description
return snapshot
def stub_snapshot_delete(self, context, snapshot):
if snapshot['id'] != UUID:
raise exception.NotFound
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != UUID:
raise exception.NotFound
param = _get_default_snapshot_param()
return param
def stub_snapshot_get_all(self, context, search_opts=None):
param = _get_default_snapshot_param()
return [param]
class SnapshotApiTest(test.TestCase):
def setUp(self):
super(SnapshotApiTest, self).setUp()
self.controller = snapshots.SnapshotsController()
self.stubs.Set(db, 'snapshot_get_all_by_project',
fakes.stub_snapshot_get_all_by_project)
self.stubs.Set(db, 'snapshot_get_all',
fakes.stub_snapshot_get_all)
def test_snapshot_create(self):
self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
snapshot = {"volume_id": '12',
"force": False,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
snapshot['display_description'])
def test_snapshot_create_force(self):
self.stubs.Set(volume.api.API, "create_snapshot_force",
stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
snapshot = {"volume_id": '12',
"force": True,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
snapshot['display_description'])
snapshot = {"volume_id": "12",
"force": "**&&^^%%$$##@@",
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
self.assertRaises(exception.InvalidParameterValue,
self.controller.create,
req,
body)
def test_snapshot_update(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
self.stubs.Set(volume.api.API, "update_snapshot",
fakes.stub_snapshot_update)
updates = {
"display_name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
res_dict = self.controller.update(req, UUID, body)
expected = {'snapshot': {
'id': UUID,
'volume_id': 12,
'status': 'available',
'size': 100,
'created_at': None,
'display_name': 'Updated Test Name',
'display_description': 'Default description',
}}
self.assertEquals(expected, res_dict)
def test_snapshot_update_missing_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update, req, UUID, body)
def test_snapshot_update_invalid_body(self):
body = {'display_name': 'missing top level snapshot key'}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update, req, UUID, body)
def test_snapshot_update_not_found(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
updates = {
"display_name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v1/snapshots/not-the-uuid')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req,
'not-the-uuid', body)
def test_snapshot_delete(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
snapshot_id = UUID
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
resp = self.controller.delete(req, snapshot_id)
self.assertEqual(resp.status_int, 202)
def test_snapshot_delete_invalid_id(self):
self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
snapshot_id)
def test_snapshot_show(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
resp_dict = self.controller.show(req, UUID)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['id'], UUID)
def test_snapshot_show_invalid_id(self):
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req,
snapshot_id)
def test_snapshot_detail(self):
self.stubs.Set(volume.api.API, "get_all_snapshots",
stub_snapshot_get_all)
req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
resp_dict = self.controller.detail(req)
self.assertTrue('snapshots' in resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(len(resp_snapshots), 1)
resp_snapshot = resp_snapshots.pop()
self.assertEqual(resp_snapshot['id'], UUID)
def test_snapshot_list_by_status(self):
def stub_snapshot_get_all_by_project(context, project_id):
return [
fakes.stub_snapshot(1, display_name='backup1',
status='available'),
fakes.stub_snapshot(2, display_name='backup2',
status='available'),
fakes.stub_snapshot(3, display_name='backup3',
status='creating'),
]
self.stubs.Set(db, 'snapshot_get_all_by_project',
stub_snapshot_get_all_by_project)
# no status filter
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 3)
# single match
req = fakes.HTTPRequest.blank('/v1/snapshots?status=creating')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEqual(resp['snapshots'][0]['status'], 'creating')
# multiple match
req = fakes.HTTPRequest.blank('/v1/snapshots?status=available')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 2)
for snapshot in resp['snapshots']:
self.assertEquals(snapshot['status'], 'available')
# no match
req = fakes.HTTPRequest.blank('/v1/snapshots?status=error')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 0)
def test_snapshot_list_by_volume(self):
def stub_snapshot_get_all_by_project(context, project_id):
return [
fakes.stub_snapshot(1, volume_id='vol1', status='creating'),
fakes.stub_snapshot(2, volume_id='vol1', status='available'),
fakes.stub_snapshot(3, volume_id='vol2', status='available'),
]
self.stubs.Set(db, 'snapshot_get_all_by_project',
stub_snapshot_get_all_by_project)
# single match
req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol2')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol2')
# multiple match
req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol1')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 2)
for snapshot in resp['snapshots']:
self.assertEqual(snapshot['volume_id'], 'vol1')
# multiple filters
req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol1'
'&status=available')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol1')
self.assertEqual(resp['snapshots'][0]['status'], 'available')
def test_snapshot_list_by_name(self):
def stub_snapshot_get_all_by_project(context, project_id):
return [
fakes.stub_snapshot(1, display_name='backup1'),
fakes.stub_snapshot(2, display_name='backup2'),
fakes.stub_snapshot(3, display_name='backup3'),
]
self.stubs.Set(db, 'snapshot_get_all_by_project',
stub_snapshot_get_all_by_project)
# no display_name filter
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 3)
# filter by one name
req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup2')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEquals(resp['snapshots'][0]['display_name'], 'backup2')
# filter no match
req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup4')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 0)
def test_admin_list_snapshots_limited_to_project(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(1, len(res['snapshots']))
def test_admin_list_snapshots_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(3, len(res['snapshots']))
def test_all_tenants_non_admin_gets_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1')
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(1, len(res['snapshots']))
def test_non_admin_get_by_project(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots')
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(1, len(res['snapshots']))
class SnapshotSerializerTest(test.TestCase):
def _verify_snapshot(self, snap, tree):
self.assertEqual(tree.tag, 'snapshot')
for attr in ('id', 'status', 'size', 'created_at',
'display_name', 'display_description', 'volume_id'):
self.assertEqual(str(snap[attr]), tree.get(attr))
def test_snapshot_show_create_serializer(self):
serializer = snapshots.SnapshotTemplate()
raw_snapshot = dict(
id='snap_id',
status='snap_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap_name',
display_description='snap_desc',
volume_id='vol_id',
)
text = serializer.serialize(dict(snapshot=raw_snapshot))
print text
tree = etree.fromstring(text)
self._verify_snapshot(raw_snapshot, tree)
def test_snapshot_index_detail_serializer(self):
serializer = snapshots.SnapshotsTemplate()
raw_snapshots = [dict(
id='snap1_id',
status='snap1_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap1_name',
display_description='snap1_desc',
volume_id='vol1_id',
),
dict(
id='snap2_id',
status='snap2_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap2_name',
display_description='snap2_desc',
volume_id='vol2_id',
)]
text = serializer.serialize(dict(snapshots=raw_snapshots))
print text
tree = etree.fromstring(text)
self.assertEqual('snapshots', tree.tag)
self.assertEqual(len(raw_snapshots), len(tree))
for idx, child in enumerate(tree):
self._verify_snapshot(raw_snapshots[idx], child)
class SnapshotsUnprocessableEntityTestCase(test.TestCase):
"""
Tests of places we throw 422 Unprocessable Entity from
"""
def setUp(self):
super(SnapshotsUnprocessableEntityTestCase, self).setUp()
self.controller = snapshots.SnapshotsController()
def _unprocessable_snapshot_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
def test_create_no_body(self):
self._unprocessable_snapshot_create(body=None)
def test_create_missing_snapshot(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_snapshot_create(body=body)
def test_create_malformed_entity(self):
body = {'snapshot': 'string'}
self._unprocessable_snapshot_create(body=body)
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER
from reportlab.lib.units import inch
from reportlab.platypus import Paragraph
from traits.api import Bool, Float, Str
from traitsui.api import VGroup, Tabbed, Item
from pychron.canvas.canvas2D.irradiation_canvas import IrradiationCanvas
from pychron.canvas.utils import markup_canvas, load_holder_canvas
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pdf.base_table_pdf_writer import BasePDFTableWriter
from pychron.core.pdf.items import Row
from pychron.core.pdf.options import BasePDFOptions, dumpable
from pychron.dvc.meta_repo import irradiation_geometry_holes, irradiation_chronology
from pychron.loading.component_flowable import ComponentFlowable
from pychron.pychron_constants import DEFAULT_MONITOR_NAME
MATERIAL_MAP = {"GroundmassConcentrate": "GMC"}
def fontsize(x, f):
return "<font size={}>{}</font>".format(f, x)
class RotatedParagraph(Paragraph):
rotation = 0
def draw(self):
self.canv.saveState()
self.canv.rotate(self.rotation)
self.canv.translate(-self.width / 2.0 - 100, -self.height)
Paragraph.draw(self)
self.canv.restoreState()
class IrradiationPDFTableOptions(BasePDFOptions):
status_width = dumpable(Float(0.25))
position_width = dumpable(Float(0.5))
identifier_width = dumpable(Float(0.6))
sample_width = dumpable(Float(1.25))
material_width = dumpable(Float(0.5))
project_width = dumpable(Float(1.25))
only_selected_level = dumpable(Bool(False))
_persistence_name = "irradiation_pdf_table_options"
def widths(self, units=inch):
return [
getattr(self, "{}_width".format(w)) * units
for w in (
"status",
"position",
"identifier",
"sample",
"material",
"project",
)
]
def traits_view(self):
layout_grp = self._get_layout_group()
layout_grp.show_border = False
width_grp = VGroup(
Item("status_width", label="Status (in)"),
Item("position_width", label="Pos. (in)"),
Item("identifier_width", label="L# (in)"),
Item("sample_width", label="Sample (in)"),
Item("material_width", label="Material (in)"),
Item("project_width", label="Project (in)"),
label="Column Widths",
)
main_grp = VGroup(
Item("only_selected_level", label="Only Selected Level"), label="Main"
)
v = okcancel_view(
Tabbed(main_grp, layout_grp, width_grp), title="PDF Save Options"
)
return v
class IrradiationPDFWriter(BasePDFTableWriter):
page_break_between_levels = Bool(True)
show_page_numbers = True
selected_level = None
_options_klass = IrradiationPDFTableOptions
monitor_name = Str
def _build(self, doc, irrad, *args, **kw):
if not self.options.only_selected_level:
self.options.page_number_format = "{} {{page:d}} - {{total:d}}".format(
irrad.name
)
flowables = self._make_levels(irrad)
return flowables, None
def _make_levels(self, irrad, progress=None):
irradname = irrad.name
flowables = []
if self.options.only_selected_level:
levels = [l for l in irrad.levels if l.name == self.selected_level]
else:
# make coversheet
summary = self._make_summary(irrad)
summary_table = self._make_summary_table(irrad)
flowables = [summary, self._vspacer(1), summary_table, self._page_break()]
levels = sorted(irrad.levels, key=lambda x: x.name)
for level in levels:
if progress is not None:
progress.change_message("Making {}{}".format(irradname, level.name))
c = self._make_canvas(level)
fs = self._make_level_table(irrad, level, c)
if c:
c = ComponentFlowable(c)
flowables.append(self._make_table_title(irrad, level))
flowables.append(c)
flowables.append(self._page_break())
flowables.extend(fs)
return flowables
def _make_summary(self, irrad):
name = irrad.name
levels = ", ".join(sorted([li.name for li in irrad.levels]))
chron = irradiation_chronology(name)
dur = chron.total_duration_seconds
date = chron.start_date
dur /= 60 * 60.0
date = "Irradiation Date: {}".format(date)
dur = "Irradiation Duration: {:0.1f} hrs".format(dur)
name = fontsize(name, 40)
txt = "<br/>".join((name, levels, date, dur))
klass = Paragraph
rotation = 0
if self.options.orientation == "landscape":
klass = RotatedParagraph
rotation = 90
p = self._new_paragraph(
txt, klass=klass, s="Title", textColor=colors.black, alignment=TA_CENTER
)
p.rotation = rotation
return p
def _make_summary_table(self, irrad):
ts = self._new_style(header_line_idx=0)
header = Row()
header.add_item(value="<b>Level</b>")
header.add_item(value="<b>Tray</b>")
header.add_item(value="<b>Project</b>")
def make_row(level):
row = Row()
row.add_item(value=level.name)
row.add_item(value=level.holder)
row.add_item(value=", ".join(level.projects))
return row
rows = [make_row(li) for li in sorted(irrad.levels, key=lambda x: x.name)]
rows.insert(0, header)
t = self._new_table(ts, rows, col_widths=[0.5 * inch, 1 * inch, 5 * inch])
return t
def _make_level_table(self, irrad, level, c):
row = Row()
row.add_item(span=-1, value=self._make_table_title(irrad, level), fontsize=18)
rows = [row]
row = Row()
for v in ("", "Pos.", "L#", "Sample", "Material", "Project", "PI", "Note"):
row.add_item(value=self._new_paragraph("<b>{}</b>".format(v)))
rows.append(row)
srows = []
spos = sorted(level.positions, key=lambda x: x.position)
for i in range(c.scene.nholes):
pos = i + 1
item = next((p for p in spos if p.position == pos), None)
if not item:
row = self._make_blank_row(pos)
else:
row = self._make_row(item, c)
spos.remove(item)
srows.append(row)
rows.extend(srows)
ts = self._new_style(header_line_idx=0, header_line_width=2)
ts.add("LINEBELOW", (0, 1), (-1, -1), 1.0, colors.black)
cw = self.options.widths()
t = self._new_table(ts, rows, colWidths=cw)
t.repeatRows = 2
flowables = [t]
if self.page_break_between_levels:
flowables.append(self._page_break())
else:
flowables.append(self._new_spacer(0, 0.25 * inch))
return flowables
def _make_table_title(self, irrad, level):
t = "{}{} {}".format(irrad.name, level.name, level.holder)
p = self._new_paragraph(t, s="Heading1", alignment=TA_CENTER)
return p
def _make_blank_row(self, pos):
r = Row()
r.add_item(value="[ ]")
r.add_item(value=pos)
for i in range(6):
r.add_item(value="")
return r
def _make_row(self, pos, canvas):
r = Row()
sample = pos.sample
project, pi, material = "", "", ""
if sample:
if sample.material:
material = sample.material.name
material = MATERIAL_MAP.get(material, material)
material = material[:15]
project = sample.project.name
pi = sample.project.principal_investigator.name
sample = sample.name
if sample == DEFAULT_MONITOR_NAME:
project, pi, material = "", "", ""
r.add_item(value="[ ]")
r.add_item(value=pos.position)
r.add_item(value=pos.identifier or "")
r.add_item(value=sample or "")
r.add_item(value=material, fontsize=8)
r.add_item(value=project, fontsize=8)
r.add_item(value=pi, fontsize=8)
r.add_item(value="")
if sample:
item = canvas.scene.get_item(pos.position)
item.fill = True
return r
def _make_canvas(self, level):
if level.holder:
holes = irradiation_geometry_holes(level.holder)
canvas = IrradiationCanvas()
load_holder_canvas(canvas, holes)
markup_canvas(canvas, level.positions, self.monitor_name)
return canvas
class LabbookPDFWriter(IrradiationPDFWriter):
title = "New Mexico Geochronology Research Laboratory"
def _build(self, doc, irrads, progress=None, *args, **kw):
flowables = []
flowables.extend(self._make_title_page(irrads))
for irrad in irrads:
self.options.page_number_format = "{} {{page:d}} - {{total:d}}".format(
irrad.name
)
fs = self._make_levels(irrad, progress)
flowables.extend(fs)
return flowables, None
def _make_title_page(self, irrads):
start = irrads[0].name
end = irrads[-1].name
l1 = self.title
l2 = "Irradiation Labbook"
if start != end:
l3 = "{} to {}".format(start, end)
else:
l3 = start
t = "<br/>".join((l1, l2, l3))
p = self._new_paragraph(t, s="Title")
return p, self._page_break()
def _make_summary(self, irrad):
name = irrad.name
levels = ", ".join(sorted([li.name for li in irrad.levels]))
chron = irradiation_chronology(name)
dur = chron.total_duration_seconds
date = chron.start_date
dur /= 60 * 60.0
date = "Irradiation Date: {}".format(date)
dur = "Irradiation Duration: {:0.1f} hrs".format(dur)
name = fontsize(name, 40)
txt = "<br/>".join((name, levels, date, dur))
p = self._new_paragraph(
txt, s="Title", textColor=colors.green, alignment=TA_CENTER
)
return p
# ============= EOF =============================================
|
|
"""
homeassistant.components.mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MQTT component, using paho-mqtt. This component needs a MQTT broker like
Mosquitto or Mosca. The Eclipse Foundation is running a public MQTT server
at iot.eclipse.org. If you prefer to use that one, keep in mind to adjust
the topic/client ID and that your messages are public.
Configuration:
To use MQTT you will need to add something like the following to your
config/configuration.yaml.
mqtt:
broker: 127.0.0.1
Or, if you want more options:
mqtt:
broker: 127.0.0.1
port: 1883
client_id: home-assistant-1
keepalive: 60
username: your_username
password: your_secret_password
Variables:
broker
*Required
This is the IP address of your MQTT broker, e.g. 192.168.1.32.
port
*Optional
The network port to connect to. Default is 1883.
client_id
*Optional
Client ID that Home Assistant will use. Has to be unique on the server.
Default is a random generated one.
keepalive
*Optional
The keep alive in seconds for this client. Default is 60.
"""
import logging
import socket
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt"
MQTT_CLIENT = None
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
SERVICE_PUBLISH = 'publish'
EVENT_MQTT_MESSAGE_RECEIVED = 'MQTT_MESSAGE_RECEIVED'
DEPENDENCIES = []
REQUIREMENTS = ['paho-mqtt==1.1']
CONF_BROKER = 'broker'
CONF_PORT = 'port'
CONF_CLIENT_ID = 'client_id'
CONF_KEEPALIVE = 'keepalive'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
ATTR_QOS = 'qos'
def publish(hass, topic, payload, qos=0):
""" Send an MQTT message. """
data = {
ATTR_TOPIC: topic,
ATTR_PAYLOAD: payload,
ATTR_QOS: qos,
}
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
def subscribe(hass, topic, callback, qos=0):
""" Subscribe to a topic. """
def mqtt_topic_subscriber(event):
""" Match subscribed MQTT topic. """
if _match_topic(topic, event.data[ATTR_TOPIC]):
callback(event.data[ATTR_TOPIC], event.data[ATTR_PAYLOAD],
event.data[ATTR_QOS])
hass.bus.listen(EVENT_MQTT_MESSAGE_RECEIVED, mqtt_topic_subscriber)
if topic not in MQTT_CLIENT.topics:
MQTT_CLIENT.subscribe(topic, qos)
def setup(hass, config):
""" Get the MQTT protocol service. """
if not validate_config(config, {DOMAIN: ['broker']}, _LOGGER):
return False
conf = config[DOMAIN]
broker = conf[CONF_BROKER]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
client_id = util.convert(conf.get(CONF_CLIENT_ID), str)
keepalive = util.convert(conf.get(CONF_KEEPALIVE), int, DEFAULT_KEEPALIVE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
global MQTT_CLIENT
try:
MQTT_CLIENT = MQTT(hass, broker, port, client_id, keepalive, username,
password)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker "
"itself.")
return False
def stop_mqtt(event):
""" Stop MQTT component. """
MQTT_CLIENT.stop()
def start_mqtt(event):
""" Launch MQTT component when Home Assistant starts up. """
MQTT_CLIENT.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mqtt)
def publish_service(call):
""" Handle MQTT publish service calls. """
msg_topic = call.data.get(ATTR_TOPIC)
payload = call.data.get(ATTR_PAYLOAD)
qos = call.data.get(ATTR_QOS)
if msg_topic is None or payload is None:
return
MQTT_CLIENT.publish(msg_topic, payload, qos)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mqtt)
hass.services.register(DOMAIN, SERVICE_PUBLISH, publish_service)
return True
# This is based on one of the paho-mqtt examples:
# http://git.eclipse.org/c/paho/org.eclipse.paho.mqtt.python.git/tree/examples/sub-class.py
# pylint: disable=too-many-arguments
class MQTT(object): # pragma: no cover
""" Implements messaging service for MQTT. """
def __init__(self, hass, broker, port, client_id, keepalive, username,
password):
import paho.mqtt.client as mqtt
self.hass = hass
self._progress = {}
self.topics = {}
if client_id is None:
self._mqttc = mqtt.Client()
else:
self._mqttc = mqtt.Client(client_id)
if username is not None:
self._mqttc.username_pw_set(username, password)
self._mqttc.on_subscribe = self._mqtt_on_subscribe
self._mqttc.on_unsubscribe = self._mqtt_on_unsubscribe
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_message = self._mqtt_on_message
self._mqttc.connect(broker, port, keepalive)
def publish(self, topic, payload, qos):
""" Publish a MQTT message. """
self._mqttc.publish(topic, payload, qos)
def unsubscribe(self, topic):
""" Unsubscribe from topic. """
result, mid = self._mqttc.unsubscribe(topic)
_raise_on_error(result)
self._progress[mid] = topic
def start(self):
""" Run the MQTT client. """
self._mqttc.loop_start()
def stop(self):
""" Stop the MQTT client. """
self._mqttc.loop_stop()
def subscribe(self, topic, qos):
""" Subscribe to a topic. """
if topic in self.topics:
return
result, mid = self._mqttc.subscribe(topic, qos)
_raise_on_error(result)
self._progress[mid] = topic
self.topics[topic] = None
def _mqtt_on_connect(self, mqttc, obj, flags, result_code):
""" On connect, resubscribe to all topics we were subscribed to. """
old_topics = self.topics
self._progress = {}
self.topics = {}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
self._mqttc.subscribe(topic, qos)
def _mqtt_on_subscribe(self, mqttc, obj, mid, granted_qos):
""" Called when subscribe succesfull. """
topic = self._progress.pop(mid, None)
if topic is None:
return
self.topics[topic] = granted_qos
def _mqtt_on_unsubscribe(self, mqttc, obj, mid, granted_qos):
""" Called when subscribe succesfull. """
topic = self._progress.pop(mid, None)
if topic is None:
return
self.topics.pop(topic, None)
def _mqtt_on_message(self, mqttc, obj, msg):
""" Message callback """
self.hass.bus.fire(EVENT_MQTT_MESSAGE_RECEIVED, {
ATTR_TOPIC: msg.topic,
ATTR_QOS: msg.qos,
ATTR_PAYLOAD: msg.payload.decode('utf-8'),
})
def _raise_on_error(result): # pragma: no cover
""" Raise error if error result. """
if result != 0:
raise HomeAssistantError('Error talking to MQTT: {}'.format(result))
def _match_topic(subscription, topic):
""" Returns if topic matches subscription. """
if subscription.endswith('#'):
return (subscription[:-2] == topic or
topic.startswith(subscription[:-1]))
sub_parts = subscription.split('/')
topic_parts = topic.split('/')
return (len(sub_parts) == len(topic_parts) and
all(a == b for a, b in zip(sub_parts, topic_parts) if a != '+'))
|
|
import inspect
import types
import unittest
from test.support import import_module
asyncio = import_module("asyncio")
class AwaitException(Exception):
pass
@types.coroutine
def awaitable(*, throw=False):
if throw:
yield ('throw',)
else:
yield ('result',)
def run_until_complete(coro):
exc = False
while True:
try:
if exc:
exc = False
fut = coro.throw(AwaitException)
else:
fut = coro.send(None)
except StopIteration as ex:
return ex.args[0]
if fut == ('throw',):
exc = True
def to_list(gen):
async def iterate():
res = []
async for i in gen:
res.append(i)
return res
return run_until_complete(iterate())
class AsyncGenSyntaxTest(unittest.TestCase):
def test_async_gen_syntax_01(self):
code = '''async def foo():
await abc
yield from 123
'''
with self.assertRaisesRegex(SyntaxError, 'yield from.*inside async'):
exec(code, {}, {})
def test_async_gen_syntax_02(self):
code = '''async def foo():
yield from 123
'''
with self.assertRaisesRegex(SyntaxError, 'yield from.*inside async'):
exec(code, {}, {})
def test_async_gen_syntax_03(self):
code = '''async def foo():
await abc
yield
return 123
'''
with self.assertRaisesRegex(SyntaxError, 'return.*value.*async gen'):
exec(code, {}, {})
def test_async_gen_syntax_04(self):
code = '''async def foo():
yield
return 123
'''
with self.assertRaisesRegex(SyntaxError, 'return.*value.*async gen'):
exec(code, {}, {})
def test_async_gen_syntax_05(self):
code = '''async def foo():
if 0:
yield
return 12
'''
with self.assertRaisesRegex(SyntaxError, 'return.*value.*async gen'):
exec(code, {}, {})
class AsyncGenTest(unittest.TestCase):
def compare_generators(self, sync_gen, async_gen):
def sync_iterate(g):
res = []
while True:
try:
res.append(g.__next__())
except StopIteration:
res.append('STOP')
break
except Exception as ex:
res.append(str(type(ex)))
return res
def async_iterate(g):
res = []
while True:
an = g.__anext__()
try:
while True:
try:
an.__next__()
except StopIteration as ex:
if ex.args:
res.append(ex.args[0])
break
else:
res.append('EMPTY StopIteration')
break
except StopAsyncIteration:
raise
except Exception as ex:
res.append(str(type(ex)))
break
except StopAsyncIteration:
res.append('STOP')
break
return res
sync_gen_result = sync_iterate(sync_gen)
async_gen_result = async_iterate(async_gen)
self.assertEqual(sync_gen_result, async_gen_result)
return async_gen_result
def test_async_gen_iteration_01(self):
async def gen():
await awaitable()
a = yield 123
self.assertIs(a, None)
await awaitable()
yield 456
await awaitable()
yield 789
self.assertEqual(to_list(gen()), [123, 456, 789])
def test_async_gen_iteration_02(self):
async def gen():
await awaitable()
yield 123
await awaitable()
g = gen()
ai = g.__aiter__()
an = ai.__anext__()
self.assertEqual(an.__next__(), ('result',))
try:
an.__next__()
except StopIteration as ex:
self.assertEqual(ex.args[0], 123)
else:
self.fail('StopIteration was not raised')
an = ai.__anext__()
self.assertEqual(an.__next__(), ('result',))
try:
an.__next__()
except StopAsyncIteration as ex:
self.assertFalse(ex.args)
else:
self.fail('StopAsyncIteration was not raised')
def test_async_gen_exception_03(self):
async def gen():
await awaitable()
yield 123
await awaitable(throw=True)
yield 456
with self.assertRaises(AwaitException):
to_list(gen())
def test_async_gen_exception_04(self):
async def gen():
await awaitable()
yield 123
1 / 0
g = gen()
ai = g.__aiter__()
an = ai.__anext__()
self.assertEqual(an.__next__(), ('result',))
try:
an.__next__()
except StopIteration as ex:
self.assertEqual(ex.args[0], 123)
else:
self.fail('StopIteration was not raised')
with self.assertRaises(ZeroDivisionError):
ai.__anext__().__next__()
def test_async_gen_exception_05(self):
async def gen():
yield 123
raise StopAsyncIteration
with self.assertRaisesRegex(RuntimeError,
'async generator.*StopAsyncIteration'):
to_list(gen())
def test_async_gen_exception_06(self):
async def gen():
yield 123
raise StopIteration
with self.assertRaisesRegex(RuntimeError,
'async generator.*StopIteration'):
to_list(gen())
def test_async_gen_exception_07(self):
def sync_gen():
try:
yield 1
1 / 0
finally:
yield 2
yield 3
yield 100
async def async_gen():
try:
yield 1
1 / 0
finally:
yield 2
yield 3
yield 100
self.compare_generators(sync_gen(), async_gen())
def test_async_gen_exception_08(self):
def sync_gen():
try:
yield 1
finally:
yield 2
1 / 0
yield 3
yield 100
async def async_gen():
try:
yield 1
await awaitable()
finally:
await awaitable()
yield 2
1 / 0
yield 3
yield 100
self.compare_generators(sync_gen(), async_gen())
def test_async_gen_exception_09(self):
def sync_gen():
try:
yield 1
1 / 0
finally:
yield 2
yield 3
yield 100
async def async_gen():
try:
await awaitable()
yield 1
1 / 0
finally:
yield 2
await awaitable()
yield 3
yield 100
self.compare_generators(sync_gen(), async_gen())
def test_async_gen_exception_10(self):
async def gen():
yield 123
with self.assertRaisesRegex(TypeError,
"non-None value .* async generator"):
gen().__anext__().send(100)
def test_async_gen_exception_11(self):
def sync_gen():
yield 10
yield 20
def sync_gen_wrapper():
yield 1
sg = sync_gen()
sg.send(None)
try:
sg.throw(GeneratorExit())
except GeneratorExit:
yield 2
yield 3
async def async_gen():
yield 10
yield 20
async def async_gen_wrapper():
yield 1
asg = async_gen()
await asg.asend(None)
try:
await asg.athrow(GeneratorExit())
except GeneratorExit:
yield 2
yield 3
self.compare_generators(sync_gen_wrapper(), async_gen_wrapper())
def test_async_gen_api_01(self):
async def gen():
yield 123
g = gen()
self.assertEqual(g.__name__, 'gen')
g.__name__ = '123'
self.assertEqual(g.__name__, '123')
self.assertIn('.gen', g.__qualname__)
g.__qualname__ = '123'
self.assertEqual(g.__qualname__, '123')
self.assertIsNone(g.ag_await)
self.assertIsInstance(g.ag_frame, types.FrameType)
self.assertFalse(g.ag_running)
self.assertIsInstance(g.ag_code, types.CodeType)
self.assertTrue(inspect.isawaitable(g.aclose()))
class AsyncGenAsyncioTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
self.loop = None
asyncio.set_event_loop_policy(None)
async def to_list(self, gen):
res = []
async for i in gen:
res.append(i)
return res
def test_async_gen_asyncio_01(self):
async def gen():
yield 1
await asyncio.sleep(0.01)
yield 2
await asyncio.sleep(0.01)
return
yield 3
res = self.loop.run_until_complete(self.to_list(gen()))
self.assertEqual(res, [1, 2])
def test_async_gen_asyncio_02(self):
async def gen():
yield 1
await asyncio.sleep(0.01)
yield 2
1 / 0
yield 3
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(self.to_list(gen()))
def test_async_gen_asyncio_03(self):
loop = self.loop
class Gen:
async def __aiter__(self):
yield 1
await asyncio.sleep(0.01)
yield 2
res = loop.run_until_complete(self.to_list(Gen()))
self.assertEqual(res, [1, 2])
def test_async_gen_asyncio_anext_04(self):
async def foo():
yield 1
await asyncio.sleep(0.01)
try:
yield 2
yield 3
except ZeroDivisionError:
yield 1000
await asyncio.sleep(0.01)
yield 4
async def run1():
it = foo().__aiter__()
self.assertEqual(await it.__anext__(), 1)
self.assertEqual(await it.__anext__(), 2)
self.assertEqual(await it.__anext__(), 3)
self.assertEqual(await it.__anext__(), 4)
with self.assertRaises(StopAsyncIteration):
await it.__anext__()
with self.assertRaises(StopAsyncIteration):
await it.__anext__()
async def run2():
it = foo().__aiter__()
self.assertEqual(await it.__anext__(), 1)
self.assertEqual(await it.__anext__(), 2)
try:
it.__anext__().throw(ZeroDivisionError)
except StopIteration as ex:
self.assertEqual(ex.args[0], 1000)
else:
self.fail('StopIteration was not raised')
self.assertEqual(await it.__anext__(), 4)
with self.assertRaises(StopAsyncIteration):
await it.__anext__()
self.loop.run_until_complete(run1())
self.loop.run_until_complete(run2())
def test_async_gen_asyncio_anext_05(self):
async def foo():
v = yield 1
v = yield v
yield v * 100
async def run():
it = foo().__aiter__()
try:
it.__anext__().send(None)
except StopIteration as ex:
self.assertEqual(ex.args[0], 1)
else:
self.fail('StopIteration was not raised')
try:
it.__anext__().send(10)
except StopIteration as ex:
self.assertEqual(ex.args[0], 10)
else:
self.fail('StopIteration was not raised')
try:
it.__anext__().send(12)
except StopIteration as ex:
self.assertEqual(ex.args[0], 1200)
else:
self.fail('StopIteration was not raised')
with self.assertRaises(StopAsyncIteration):
await it.__anext__()
self.loop.run_until_complete(run())
def test_async_gen_asyncio_anext_06(self):
DONE = 0
# test synchronous generators
def foo():
try:
yield
except:
pass
g = foo()
g.send(None)
with self.assertRaises(StopIteration):
g.send(None)
# now with asynchronous generators
async def gen():
nonlocal DONE
try:
yield
except:
pass
DONE = 1
async def run():
nonlocal DONE
g = gen()
await g.asend(None)
with self.assertRaises(StopAsyncIteration):
await g.asend(None)
DONE += 10
self.loop.run_until_complete(run())
self.assertEqual(DONE, 11)
def test_async_gen_asyncio_anext_tuple(self):
async def foo():
try:
yield (1,)
except ZeroDivisionError:
yield (2,)
async def run():
it = foo().__aiter__()
self.assertEqual(await it.__anext__(), (1,))
with self.assertRaises(StopIteration) as cm:
it.__anext__().throw(ZeroDivisionError)
self.assertEqual(cm.exception.args[0], (2,))
with self.assertRaises(StopAsyncIteration):
await it.__anext__()
self.loop.run_until_complete(run())
def test_async_gen_asyncio_anext_stopiteration(self):
async def foo():
try:
yield StopIteration(1)
except ZeroDivisionError:
yield StopIteration(3)
async def run():
it = foo().__aiter__()
v = await it.__anext__()
self.assertIsInstance(v, StopIteration)
self.assertEqual(v.value, 1)
with self.assertRaises(StopIteration) as cm:
it.__anext__().throw(ZeroDivisionError)
v = cm.exception.args[0]
self.assertIsInstance(v, StopIteration)
self.assertEqual(v.value, 3)
with self.assertRaises(StopAsyncIteration):
await it.__anext__()
self.loop.run_until_complete(run())
def test_async_gen_asyncio_aclose_06(self):
async def foo():
try:
yield 1
1 / 0
finally:
await asyncio.sleep(0.01)
yield 12
async def run():
gen = foo()
it = gen.__aiter__()
await it.__anext__()
await gen.aclose()
with self.assertRaisesRegex(
RuntimeError,
"async generator ignored GeneratorExit"):
self.loop.run_until_complete(run())
def test_async_gen_asyncio_aclose_07(self):
DONE = 0
async def foo():
nonlocal DONE
try:
yield 1
1 / 0
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE += 1
DONE += 1000
async def run():
gen = foo()
it = gen.__aiter__()
await it.__anext__()
await gen.aclose()
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
def test_async_gen_asyncio_aclose_08(self):
DONE = 0
fut = asyncio.Future(loop=self.loop)
async def foo():
nonlocal DONE
try:
yield 1
await fut
DONE += 1000
yield 2
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE += 1
DONE += 1000
async def run():
gen = foo()
it = gen.__aiter__()
self.assertEqual(await it.__anext__(), 1)
await gen.aclose()
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
# Silence ResourceWarnings
fut.cancel()
self.loop.run_until_complete(asyncio.sleep(0.01))
def test_async_gen_asyncio_gc_aclose_09(self):
DONE = 0
async def gen():
nonlocal DONE
try:
while True:
yield 1
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE = 1
async def run():
g = gen()
await g.__anext__()
await g.__anext__()
del g
await asyncio.sleep(0.1)
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
def test_async_gen_asyncio_aclose_10(self):
DONE = 0
# test synchronous generators
def foo():
try:
yield
except:
pass
g = foo()
g.send(None)
g.close()
# now with asynchronous generators
async def gen():
nonlocal DONE
try:
yield
except:
pass
DONE = 1
async def run():
nonlocal DONE
g = gen()
await g.asend(None)
await g.aclose()
DONE += 10
self.loop.run_until_complete(run())
self.assertEqual(DONE, 11)
def test_async_gen_asyncio_aclose_11(self):
DONE = 0
# test synchronous generators
def foo():
try:
yield
except:
pass
yield
g = foo()
g.send(None)
with self.assertRaisesRegex(RuntimeError, 'ignored GeneratorExit'):
g.close()
# now with asynchronous generators
async def gen():
nonlocal DONE
try:
yield
except:
pass
yield
DONE += 1
async def run():
nonlocal DONE
g = gen()
await g.asend(None)
with self.assertRaisesRegex(RuntimeError, 'ignored GeneratorExit'):
await g.aclose()
DONE += 10
self.loop.run_until_complete(run())
self.assertEqual(DONE, 10)
def test_async_gen_asyncio_asend_01(self):
DONE = 0
# Sanity check:
def sgen():
v = yield 1
yield v * 2
sg = sgen()
v = sg.send(None)
self.assertEqual(v, 1)
v = sg.send(100)
self.assertEqual(v, 200)
async def gen():
nonlocal DONE
try:
await asyncio.sleep(0.01)
v = yield 1
await asyncio.sleep(0.01)
yield v * 2
await asyncio.sleep(0.01)
return
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE = 1
async def run():
g = gen()
v = await g.asend(None)
self.assertEqual(v, 1)
v = await g.asend(100)
self.assertEqual(v, 200)
with self.assertRaises(StopAsyncIteration):
await g.asend(None)
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
def test_async_gen_asyncio_asend_02(self):
DONE = 0
async def sleep_n_crash(delay):
await asyncio.sleep(delay)
1 / 0
async def gen():
nonlocal DONE
try:
await asyncio.sleep(0.01)
v = yield 1
await sleep_n_crash(0.01)
DONE += 1000
yield v * 2
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE = 1
async def run():
g = gen()
v = await g.asend(None)
self.assertEqual(v, 1)
await g.asend(100)
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
def test_async_gen_asyncio_asend_03(self):
DONE = 0
async def sleep_n_crash(delay):
fut = asyncio.ensure_future(asyncio.sleep(delay),
loop=self.loop)
self.loop.call_later(delay / 2, lambda: fut.cancel())
return await fut
async def gen():
nonlocal DONE
try:
await asyncio.sleep(0.01)
v = yield 1
await sleep_n_crash(0.01)
DONE += 1000
yield v * 2
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE = 1
async def run():
g = gen()
v = await g.asend(None)
self.assertEqual(v, 1)
await g.asend(100)
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
def test_async_gen_asyncio_athrow_01(self):
DONE = 0
class FooEr(Exception):
pass
# Sanity check:
def sgen():
try:
v = yield 1
except FooEr:
v = 1000
yield v * 2
sg = sgen()
v = sg.send(None)
self.assertEqual(v, 1)
v = sg.throw(FooEr)
self.assertEqual(v, 2000)
with self.assertRaises(StopIteration):
sg.send(None)
async def gen():
nonlocal DONE
try:
await asyncio.sleep(0.01)
try:
v = yield 1
except FooEr:
v = 1000
await asyncio.sleep(0.01)
yield v * 2
await asyncio.sleep(0.01)
# return
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE = 1
async def run():
g = gen()
v = await g.asend(None)
self.assertEqual(v, 1)
v = await g.athrow(FooEr)
self.assertEqual(v, 2000)
with self.assertRaises(StopAsyncIteration):
await g.asend(None)
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
def test_async_gen_asyncio_athrow_02(self):
DONE = 0
class FooEr(Exception):
pass
async def sleep_n_crash(delay):
fut = asyncio.ensure_future(asyncio.sleep(delay),
loop=self.loop)
self.loop.call_later(delay / 2, lambda: fut.cancel())
return await fut
async def gen():
nonlocal DONE
try:
await asyncio.sleep(0.01)
try:
v = yield 1
except FooEr:
await sleep_n_crash(0.01)
yield v * 2
await asyncio.sleep(0.01)
# return
finally:
await asyncio.sleep(0.01)
await asyncio.sleep(0.01)
DONE = 1
async def run():
g = gen()
v = await g.asend(None)
self.assertEqual(v, 1)
try:
await g.athrow(FooEr)
except asyncio.CancelledError:
self.assertEqual(DONE, 1)
raise
else:
self.fail('CancelledError was not raised')
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
def test_async_gen_asyncio_athrow_03(self):
DONE = 0
# test synchronous generators
def foo():
try:
yield
except:
pass
g = foo()
g.send(None)
with self.assertRaises(StopIteration):
g.throw(ValueError)
# now with asynchronous generators
async def gen():
nonlocal DONE
try:
yield
except:
pass
DONE = 1
async def run():
nonlocal DONE
g = gen()
await g.asend(None)
with self.assertRaises(StopAsyncIteration):
await g.athrow(ValueError)
DONE += 10
self.loop.run_until_complete(run())
self.assertEqual(DONE, 11)
def test_async_gen_asyncio_athrow_tuple(self):
async def gen():
try:
yield 1
except ZeroDivisionError:
yield (2,)
async def run():
g = gen()
v = await g.asend(None)
self.assertEqual(v, 1)
v = await g.athrow(ZeroDivisionError)
self.assertEqual(v, (2,))
with self.assertRaises(StopAsyncIteration):
await g.asend(None)
self.loop.run_until_complete(run())
def test_async_gen_asyncio_athrow_stopiteration(self):
async def gen():
try:
yield 1
except ZeroDivisionError:
yield StopIteration(2)
async def run():
g = gen()
v = await g.asend(None)
self.assertEqual(v, 1)
v = await g.athrow(ZeroDivisionError)
self.assertIsInstance(v, StopIteration)
self.assertEqual(v.value, 2)
with self.assertRaises(StopAsyncIteration):
await g.asend(None)
self.loop.run_until_complete(run())
def test_async_gen_asyncio_shutdown_01(self):
finalized = 0
async def waiter(timeout):
nonlocal finalized
try:
await asyncio.sleep(timeout)
yield 1
finally:
await asyncio.sleep(0)
finalized += 1
async def wait():
async for _ in waiter(1):
pass
t1 = self.loop.create_task(wait())
t2 = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1))
# Silence warnings
t1.cancel()
t2.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t1)
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t2)
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(finalized, 2)
def test_async_gen_expression_01(self):
async def arange(n):
for i in range(n):
await asyncio.sleep(0.01)
yield i
def make_arange(n):
# This syntax is legal starting with Python 3.7
return (i * 2 async for i in arange(n))
async def run():
return [i async for i in make_arange(10)]
res = self.loop.run_until_complete(run())
self.assertEqual(res, [i * 2 for i in range(10)])
def test_async_gen_expression_02(self):
async def wrap(n):
await asyncio.sleep(0.01)
return n
def make_arange(n):
# This syntax is legal starting with Python 3.7
return (i * 2 for i in range(n) if await wrap(i))
async def run():
return [i async for i in make_arange(10)]
res = self.loop.run_until_complete(run())
self.assertEqual(res, [i * 2 for i in range(1, 10)])
def test_asyncgen_nonstarted_hooks_are_cancellable(self):
# See https://bugs.python.org/issue38013
messages = []
def exception_handler(loop, context):
messages.append(context)
async def async_iterate():
yield 1
yield 2
async def main():
loop = asyncio.get_running_loop()
loop.set_exception_handler(exception_handler)
async for i in async_iterate():
break
asyncio.run(main())
self.assertEqual([], messages)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
import os
import json
import shutil
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import StructureEnvironments
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import LightStructureEnvironments
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
<<<<<<< HEAD
=======
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import MultiWeightsChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import AngleNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import CNBiasNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DeltaCSMNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DistanceAngleAreaNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import NormalizedAngleDistanceNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SelfCSMNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.voronoi import DetailedVoronoiContainer
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
from pymatgen.core.structure import Structure
import numpy as np
json_files_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..",
'test_files', "chemenv", "json_test_files")
se_files_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..",
'test_files', "chemenv", "structure_environments_files")
class ReadWriteChemenvTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lgf = LocalGeometryFinder()
cls.lgf.setup_parameters(centering_type='standard')
os.makedirs('tmp_dir')
<<<<<<< HEAD
def test_read_structure_environments(self):
=======
def test_read_write_structure_environments(self):
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
f = open("{}/{}".format(json_files_dir, 'test_T--4_FePO4_icsd_4266.json'), 'r')
dd = json.load(f)
f.close()
atom_indices = dd['atom_indices']
struct = Structure.from_dict(dd['structure'])
self.lgf.setup_structure(struct)
<<<<<<< HEAD
se = self.lgf.compute_structure_environments_detailed_voronoi(only_indices=atom_indices,
maximum_distance_factor=2.25)
=======
se = self.lgf.compute_structure_environments(only_indices=atom_indices,
maximum_distance_factor=2.25)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
f = open('tmp_dir/se.json', 'w')
json.dump(se.as_dict(), f)
f.close()
f = open('tmp_dir/se.json', 'r')
dd = json.load(f)
f.close()
se2 = StructureEnvironments.from_dict(dd)
self.assertEqual(se, se2)
<<<<<<< HEAD
_strategy = SimplestChemenvStrategy()
light_se = LightStructureEnvironments(_strategy, se)
f = open('tmp_dir/light_se.json', 'w')
json.dump(light_se.as_dict(), f)
f.close()
f = open('tmp_dir/light_se.json', 'r')
dd = json.load(f)
f.close()
light_se2 = LightStructureEnvironments.from_dict(dd)
self.assertEqual(light_se._strategy, light_se2._strategy)
self.assertEqual(light_se._structure, light_se2._structure)
self.assertEqual(light_se._bva_valences, light_se2._bva_valences)
self.assertEqual(light_se._coordination_environments, light_se2._coordination_environments)
self.assertEqual(light_se._neighbors, light_se2._neighbors)
self.assertEqual(light_se, light_se2)
=======
strategy = SimplestChemenvStrategy()
lse = LightStructureEnvironments.from_structure_environments(structure_environments=se, strategy=strategy,
valences='undefined')
f = open('tmp_dir/lse.json', 'w')
json.dump(lse.as_dict(), f)
f.close()
f = open('tmp_dir/lse.json', 'r')
dd = json.load(f)
f.close()
lse2 = LightStructureEnvironments.from_dict(dd)
self.assertEqual(lse, lse2)
def test_structure_environments_neighbors_sets(self):
f = open("{}/{}".format(se_files_dir, 'se_mp-7000.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
isite = 6
nb_set = se.neighbors_sets[isite][4][0]
nb_set_surface_points = np.array([[1.0017922780870239, 0.99301365328679292],
[1.0017922780870239, 0.0],
[2.2237615554448569, 0.0],
[2.2237615554448569, 0.0060837],
[2.25, 0.0060837],
[2.25, 0.99301365328679292]])
self.assertTrue(np.allclose(np.array(nb_set.voronoi_grid_surface_points()), nb_set_surface_points))
neighb_sites = nb_set.neighb_sites
coords = [np.array([0.2443798, 1.80409653, -1.13218359]), np.array([1.44020353, 1.11368738, 1.13218359]),
np.array([2.75513098, 2.54465207, -0.70467298]), np.array([0.82616785, 3.65833945, 0.70467298])]
np.testing.assert_array_almost_equal(coords[0], neighb_sites[0].coords)
np.testing.assert_array_almost_equal(coords[1], neighb_sites[1].coords)
np.testing.assert_array_almost_equal(coords[2], neighb_sites[2].coords)
np.testing.assert_array_almost_equal(coords[3], neighb_sites[3].coords)
neighb_coords = nb_set.coords
np.testing.assert_array_almost_equal(coords, neighb_coords[1:])
np.testing.assert_array_almost_equal(nb_set.structure[nb_set.isite].coords, neighb_coords[0])
normdist = nb_set.normalized_distances
self.assertAlmostEqual(sorted(normdist),
sorted([1.0017922783963027, 1.0017922780870239, 1.000000000503177, 1.0]))
normang = nb_set.normalized_angles
self.assertAlmostEqual(sorted(normang),
sorted([0.9999999998419052, 1.0, 0.9930136530585189, 0.9930136532867929]))
dist = nb_set.distances
self.assertAlmostEqual(sorted(dist),
sorted([1.6284399814843944, 1.6284399809816534, 1.6255265861208676, 1.6255265853029401]))
ang = nb_set.angles
self.assertAlmostEqual(sorted(ang),
sorted([3.117389876236432, 3.117389876729275, 3.095610709498583, 3.0956107102102024]))
nb_set_info = nb_set.info
self.assertAlmostEqual(nb_set_info['normalized_angles_mean'], 0.996506826547)
self.assertAlmostEqual(nb_set_info['normalized_distances_std'], 0.000896138995037)
self.assertAlmostEqual(nb_set_info['angles_std'], 0.0108895833142)
self.assertAlmostEqual(nb_set_info['distances_std'], 0.00145669776056)
self.assertAlmostEqual(nb_set_info['distances_mean'], 1.62698328347)
self.assertEqual(nb_set.__str__(), 'Neighbors Set for site #6 :\n'
' - Coordination number : 4\n'
' - Voronoi indices : 1, 4, 5, 6\n')
self.assertFalse(nb_set.__ne__(nb_set))
self.assertEqual(nb_set.__hash__(), 4)
def test_strategies(self):
simplest_strategy_1 = SimplestChemenvStrategy()
simplest_strategy_2 = SimplestChemenvStrategy(distance_cutoff=1.5, angle_cutoff=0.5)
self.assertFalse(simplest_strategy_1 == simplest_strategy_2)
simplest_strategy_1_from_dict = SimplestChemenvStrategy.from_dict(simplest_strategy_1.as_dict())
self.assertTrue(simplest_strategy_1, simplest_strategy_1_from_dict)
effective_csm_estimator = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
self_csm_weight = SelfCSMNbSetWeight()
surface_definition = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.1, 'upper': 1.9},
'angle_bounds': {'lower': 0.1, 'upper': 0.9}}
surface_definition_2 = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.1, 'upper': 1.9},
'angle_bounds': {'lower': 0.1, 'upper': 0.95}}
da_area_weight = DistanceAngleAreaNbSetWeight(weight_type='has_intersection',
surface_definition=surface_definition,
nb_sets_from_hints='fallback_to_source',
other_nb_sets='0_weight',
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB)
da_area_weight_2 = DistanceAngleAreaNbSetWeight(weight_type='has_intersection',
surface_definition=surface_definition_2,
nb_sets_from_hints='fallback_to_source',
other_nb_sets='0_weight',
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB)
weight_estimator = {'function': 'smootherstep',
'options': {'delta_csm_min': 0.5,
'delta_csm_max': 3.0}}
symmetry_measure_type = 'csm_wcs_ctwcc'
delta_weight = DeltaCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type)
bias_weight = CNBiasNbSetWeight.linearly_equidistant(weight_cn1=1.0, weight_cn13=4.0)
bias_weight_2 = CNBiasNbSetWeight.linearly_equidistant(weight_cn1=1.0, weight_cn13=5.0)
angle_weight = AngleNbSetWeight()
nad_weight = NormalizedAngleDistanceNbSetWeight(average_type='geometric', aa=1, bb=1)
multi_weights_strategy_1 = MultiWeightsChemenvStrategy(dist_ang_area_weight=da_area_weight,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type)
multi_weights_strategy_2 = MultiWeightsChemenvStrategy(dist_ang_area_weight=da_area_weight,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight_2,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type)
multi_weights_strategy_3 = MultiWeightsChemenvStrategy(dist_ang_area_weight=da_area_weight_2,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type)
multi_weights_strategy_1_from_dict = MultiWeightsChemenvStrategy.from_dict(multi_weights_strategy_1.as_dict())
self.assertTrue(multi_weights_strategy_1 == multi_weights_strategy_1_from_dict)
self.assertFalse(simplest_strategy_1 == multi_weights_strategy_1)
self.assertFalse(multi_weights_strategy_1 == multi_weights_strategy_2)
self.assertFalse(multi_weights_strategy_1 == multi_weights_strategy_3)
self.assertFalse(multi_weights_strategy_2 == multi_weights_strategy_3)
def test_read_write_voronoi(self):
f = open("{}/{}".format(json_files_dir, 'test_T--4_FePO4_icsd_4266.json'), 'r')
dd = json.load(f)
f.close()
struct = Structure.from_dict(dd['structure'])
valences = [site.specie.oxi_state for site in struct]
detailed_voronoi_container = DetailedVoronoiContainer(structure=struct, valences=valences)
f = open('tmp_dir/se.json', 'w')
json.dump(detailed_voronoi_container.as_dict(), f)
f.close()
f = open('tmp_dir/se.json', 'r')
dd = json.load(f)
f.close()
detailed_voronoi_container2 = DetailedVoronoiContainer.from_dict(dd)
self.assertEqual(detailed_voronoi_container, detailed_voronoi_container2)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
@classmethod
def tearDownClass(cls):
#Remove the directory in which the temporary files have been created
shutil.rmtree('tmp_dir')
if __name__ == "__main__":
<<<<<<< HEAD
unittest.main()
=======
unittest.main()
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
|
|
# Reads an array and turns it into simple features for machine learning
#
# To make this work you will probably need to install the
# following packages:
# pip install Pillow
# pip install tesserocr
# pip install python-dateutil
# pip install visual-logging
import cv2
import glob
import sys
import numpy as np
from PIL import Image
from tesserocr import PyTessBaseAPI, RIL
from difflib import SequenceMatcher as SM
import re
import datetime
from time import strptime
from logging import FileHandler
import logging
from collections import deque
sys.path.insert(0, '../utilities')
from tradingDays import tradingDays, holidays
# vlogging required patch in __init__.py
# It's probably here:
# ~/.pyenv/versions/3.6.0/envs/main/lib/python3.6/site-packages/vlogging
# Added .decode() to line with base64 print
# "data": base64.b64encode(data).decode(),
# Left message on github.
from vlogging import VisualRecord
class arrayImage(object):
def __init__(self, showImages=True, printImages=True):
self.scale = 2
# Include security and date, security and timeframe
self.topMargin4Date = int(round(0*self.scale))
# Bottom of data, security, and timeframe
self.bottomMargin4Date = int(round(130*self.scale))
self.topMargin4Array = int(round(330*self.scale))
self.bottomMargin4Array = int(round(840*self.scale))
self.leftMargin = 0 * self.scale # For entire array
self.leftMargin4JustBars = int(round(210*self.scale))
self.leftMargin = self.leftMargin4JustBars
self.leftMargin = int(round(20*self.scale)) # Want left labels now
self.rightMargin = int(round(750*self.scale))
self.rightMarginWords = int(round(240*self.scale))
self.imageLoc = ""
# whiteList = list(ascii_letters + digits + -)
self.whiteList = ""
gen = (i for j in (range(ord('a'), ord('z')+1),
range(ord('A'), ord('Z')+1),
range(ord('0'), ord('9')+1)) for i in j)
for i in gen:
self.whiteList = self.whiteList+chr(i)
self.whiteList = self.whiteList+chr(ord('-'))
# [classification of line, box info of text, box info, text,
# coordinate which we use to find bar if relevant.]
self.arrayDict = {} # representation of the array
self.adClassOfLinei = 0
self.adBoxTexti = 1
self.adBoxi = 2
self.adTexti = 3
self.adCoordinatei = 4
# Used to classify array and sub-parts of the array
self.unclassified = "unclassified"
self.dateUnit = "dateUnit"
self.timeUnit = "timeUnit"
self.granularity = "granularity"
self.year = "year"
self.splitTimeUnit = "splitTimeUnit"
# One of Aggregate, Transverse..Overnight Volatility
self.ensembleSegment = "ensembleSegment"
self.unInteresting = "unInteresting"
self.aggregate = "Aggregate"
self.transverse = "Transverse"
self.empirical = "Empirical"
self.longTerm = "Long Term"
self.tradingCycle = "Trading Cycle"
self.directionChange = "Direction Change"
self.panicCycle = "Panic Cycle"
self.internalVolatility = "Internal Volatility"
self.overnightVolatility = "Overnight Volatility"
self.daily = "DAILY FORECAST"
self.weekly = "WEEKLY FORECAST"
self.monthly = "MONTHLY FORECAST"
self.quarterly = "QUARTERLY FORECAST"
self.yearly = "YEARLY FORECAST"
self.allGranularities = [self.daily, self.weekly, self.monthly,
self.quarterly, self.yearly]
self.ensemble = [self.aggregate, self.transverse, self.longTerm,
self.empirical, self.tradingCycle,
self.directionChange, self.panicCycle,
self.panicCycle, self.internalVolatility,
self.overnightVolatility]
self.yearIndicator = ['2015', '2016', '2017', '2018', '2019',
'2020', '2021', '2022', '2023', '2024',
'2025', '2026', '2027', '2028', '2029',
'2030', '2031', '2032', '2033', '2034']
self.timePeriods = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',
'2015', '2016', '2017', '2018', '2019',
'2020', '2021', '2022', '2023', '2024',
'2025', '2026', '2027', '2028', '2029',
'2030', '2031', '2032', '2033', '2034']
def readArray(self, loc, printFile="imgReadIn.png"):
self.imageLoc = loc
self.image = cv2.imread(loc)
return self.image
def image2Gray(self, img, printFile="grayTransform.png"):
self.grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return self.grayImage
# Will need generalized cropping function to accomidate
# different image sources. Use this simple code for now.
def cropArray(self, img):
return img[self.topMargin4Array:self.bottomMargin4Array,
self.leftMargin:self.rightMargin]
def cropDateInfo(self, img):
return img[self.topMargin4Date:self.bottomMargin4Date,
self.leftMargin:self.rightMargin]
def cropWords(self, img):
return img[self.topMargin4Array:self.bottomMargin4Array,
self.leftMargin:self.rightMarginWords]
def segmentWithThreshold(self, img):
ret, thresh = cv2.threshold(img, 0, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
return thresh
def cleanImage(self, img):
# Note: didn't try bilateral filtering which might be good:
# blur = cv2.bilateralFilter(img,9,75,75)
#
# INTER_LANCZOS4 is Similar to INTER_CUBIC
# Magnify
img = cv2.resize(img, None, fx=self.scale, fy=self.scale,
interpolation=cv2.INTER_CUBIC)
# logger.debug(VisualRecord("Original Image", img, "End image"))
# b&w for better recognition
grayImg = arrayDaily.image2Gray(img)
(thresh, im_bw) = cv2.threshold(grayImg, 128, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh = arrayDaily.segmentWithThreshold(im_bw)
# Sharpen
blur = cv2.GaussianBlur(thresh, (1, 1), 0)
sharp = blur.copy()
alpha = 0.5
alpha = 1.5
gamma = 0.2
gamma = 0
cv2.addWeighted(blur, alpha, sharp, 1-alpha, gamma, sharp)
# Denoise
clean = sharp.copy()
cv2.fastNlMeansDenoising(sharp, clean, 55, 5, 21)
return(clean)
def ocrSegment(self, sharp):
print("input image sharp shape:", sharp.shape)
# Given a (hopefully) sharpened image, ocr the segment
logger.debug(VisualRecord("sharp image ***", sharp, "End image"))
img = Image.fromarray(sharp, mode='L')
# logger.debug(VisualRecord("Tesseract Input", img, "End image"))
# Read off the labels one at a time. Need a margin
# since otherwise the cropping is too close for tesseract.
with PyTessBaseAPI() as api:
# api.Init(".","eng",OEM_DEFAULT)
api.SetVariable("tessedit_char_whitelist", self.whiteList)
api.SetImage(img)
timg = api.GetThresholdedImage()
logger.debug(VisualRecord("Tesseract's image ***", timg,
"End image"))
boxes = api.GetComponentImages(RIL.TEXTLINE, True)
i = 0
granularityOfArray = ""
yearOfArray = 0
for i, (im, box, _, _) in enumerate(boxes):
margin = 5
api.SetRectangle(box['x'], box['y'],
box['w']+margin, box['h']+margin)
croppedSegment = sharp[box['y']:box['y']+box['h']+margin,
box['x']:box['x']+box['w']+margin]
ocrResult = api.GetUTF8Text()
# Mean confidences
conf = api.MeanTextConf()
# print("confidences: ", api.AllWordConfidences())
print(ocrResult)
# print (dir(api.GetBoxText(0)))
print("==>", self.classifyEntry(ocrResult, i))
# Still need to split time units and aggregate
# when necessary with date
classOfLine = self.classifyEntry(ocrResult, i)
if self.granularity in classOfLine:
granularityOfArray = classOfLine.split(' ')[1]
if self.year in classOfLine:
yearOfArray = int(classOfLine.split(' ')[1])
# [classification of line, box info of text, box info, text,
# coordinate which we use to find bar if relevant.]
self.arrayDict[i] = [classOfLine, api.GetBoxText(0), box,
ocrResult, 0]
# split, find, etc defined for this.
# print(api.GetBoxText(0)) # Letter coordinates
tailPrint = "\n"+ocrResult+"end image"
logger.debug(VisualRecord("ocrResult",
croppedSegment, tailPrint))
print(repr(box))
fixedDates = self.fixDates(granularityOfArray, yearOfArray)
self.barHeight(img)
# print(self.arrayDict)
def barHeight(self, pilimg):
# Must use PIL image, raw image pixels don't match OCR or
# (0,0) is upper left corner
maxCoord = 1000000000 # largest image we can imagine getting in pixels
leftMostX = maxCoord # Number approaches 0
topMostY = maxCoord # Number approaches 0
bottomMostY = 0 # Number approaches 0
monthXcoord = []
ensembleYcoord = {}
for line in self.arrayDict:
# Need this for where to start reading pixels for daily arrays
if self.arrayDict[line][self.adClassOfLinei] == self.timeUnit:
if (bottomMostY == 0)\
or (bottomMostY >
(self.arrayDict[line][self.adBoxi]['y'] -
self.arrayDict[line][self.adBoxi]['h'])):
bottomMostY = self.arrayDict[line][self.adBoxi]['y'] -\
self.arrayDict[line][self.adBoxi]['h']
if self.arrayDict[line][self.adClassOfLinei] == self.dateUnit:
# Get the X coordinate for the middle letter of each month
# along the bottom of the array. Store in list: monthXcoord
months = self.arrayDict[line][self.adTexti].split(' ')
# Months are 3 character abbreviations
# From the middle looking up the image pixels we
# should be able to see where the bars are.
# Then we match on the individual letters with their
# coordinates that OCR gives us.
middleMonthLetters = deque([m[1] for m in months])
positionsOfLettersQue = deque(self.arrayDict[line][self.adBoxTexti].split('\n'))
while len(middleMonthLetters) > 0:
letter = middleMonthLetters.popleft()
while len(positionsOfLettersQue) > 0:
letterBox = positionsOfLettersQue.popleft()
if letter in letterBox:
# x is first coord
monthXcoord.append(int(letterBox.split(' ')[1]))
break
if self.arrayDict[line][self.adBoxi]['x'] < leftMostX:
leftMostX = self.arrayDict[line][self.adBoxi]['x']
if self.arrayDict[line][self.adBoxi]['y'] < topMostY:
topMostY = self.arrayDict[line][self.adBoxi]['y']
print("monthXcoord:", monthXcoord)
print(pilimg.getpixel((self.arrayDict[line][self.adBoxi]['x'],
self.arrayDict[line][self.adBoxi]['y'])))
print("topMostY: ", topMostY)
print("dateTimeUnit:", self.arrayDict[line])
# The higher you go in the image the lower the Y coordinate
if (bottomMostY == 0)\
or (bottomMostY >
(self.arrayDict[line][self.adBoxi]['y'] -
self.arrayDict[line][self.adBoxi]['h'])):
bottomMostY = self.arrayDict[line][self.adBoxi]['y'] -\
self.arrayDict[line][self.adBoxi]['h']
subImg = pilimg.crop((self.arrayDict[line][self.adBoxi]['x'],
topMostY,
self.arrayDict[line][self.adBoxi]['x']
+ 100,
self.arrayDict[line][self.adBoxi]['y']))
# subImg.show()
if self.arrayDict[line][self.adClassOfLinei]\
== self.ensembleSegment:
ensembleYcoord[int(self.arrayDict[line][self.adBoxi]['y'])] = \
self.arrayDict[line][self.adTexti].strip()
if self.arrayDict[line][self.adBoxi]['x'] < leftMostX:
leftMostX = self.arrayDict[line][self.adBoxi]['x']
if self.arrayDict[line][self.adBoxi]['y'] < topMostY:
topMostY = self.arrayDict[line][self.adBoxi]['y']
print("ensembleSegment:", self.arrayDict[line])
print("bottomMostY: ", bottomMostY)
print("ensembleYcoord:", ensembleYcoord)
for monthX in monthXcoord:
pixelList = []
for p in range(topMostY, bottomMostY):
pixelList.append(pilimg.getpixel((monthX, p)))
print(pixelList)
subImg = pilimg.crop((monthX-10, topMostY, monthX+50, bottomMostY))
# subImg.show()
def fixDates(self, granularityOfArray, yearOfArray):
# TODO: There's an edge case when we are in December
# projecting out to the next year.
#
# TODO: Heuristics wont work reliably. Tesseract is too flaky.
# Create ML model instead. Fine example of replacing code
# with ML.
#
# For now to test this out we will just take the first
# date month pair and then count out the number of slots
# we find using the nyse trading days gist.
#
# Modify self.arrayDict so that dates and times are combined
# if needed and then split into properly labeled single
# entries. Also add mid-point used to find the X axis
# coordinate we follow to get the heights of bars.
#
# Get the locations of date related data
timeUnitIndex = []
dateUnitIndex = []
self.ocrStringIndex = 3
# used to find number of days in a daily array.
# Needs to be safely more than number of days in array
# to account for weekends and holidays
daysRange = 20
daysInArray = 12
for line in self.arrayDict:
if self.arrayDict[line][0] == self.dateUnit:
dateUnitIndex.append(line)
if self.arrayDict[line][0] == self.timeUnit:
timeUnitIndex.append(line)
theTimes = []
theDates = []
if len(timeUnitIndex) > 0:
for tu in timeUnitIndex:
times = self.arrayDict[tu][self.ocrStringIndex].split()
for t in times:
try:
theTimes.append(int(re.findall('[0-9]+', t)[0]))
except IndexError as ie:
print("index out of range")
except Exception as e:
print(e.args)
if len(dateUnitIndex) > 0:
for d in dateUnitIndex:
for month in self.arrayDict[d][self.ocrStringIndex].split():
theDates.append(strptime(month, '%b').tm_mon)
print("granularityOfArray: ", granularityOfArray)
# These are the problematic ones for tesseract.
if (granularityOfArray in self.daily):
for i in range(0, daysRange):
fixed = list(tradingDays(datetime.datetime(yearOfArray,
theDates[0],
theTimes[0]),
datetime.datetime(yearOfArray,
theDates[0], theTimes[0])
+ datetime.timedelta(days=i)))
if len(fixed) == daysInArray:
break
elif (granularityOfArray in self.weekly):
fixed = list((tradingDays(datetime.datetime(yearOfArray,
theDates[0],
theTimes[0]),
datetime.datetime(yearOfArray,
theDates[0], theTimes[0]) +
datetime.timedelta(weeks=11))))
print("weeks:", fixed)
else:
print("other than daily and weekly")
fixed = theDates
return(fixed)
def classifyEntry(self, ocrResult, ocrLineNumber):
# Simple heuristic based classifier
# So we can do i,j for height of bar chart
# i = one of aggregate..overnightVol.
# j = one of the time units, possibly a compound one
# In the case of compound time, date and month are here.
if ocrLineNumber < 5: # Near top of array there could be a time stamp
for y in self.yearIndicator:
if y in ocrResult:
return(self.year+" "+y)
countDateUnits = 0
for t in self.timePeriods:
countDateUnits += ocrResult.count(t)
if countDateUnits > 10:
return(self.dateUnit)
for e in self.ensemble:
if (SM(None, e, ocrResult).ratio() > .8):
return(self.ensembleSegment)
countNumbers = 0
for n in range(1, 31):
countNumbers += ocrResult.count(str(n))
if countNumbers > 8:
return(self.timeUnit)
for g in self.allGranularities:
if g in ocrResult:
return(self.granularity+" "+g)
return(self.unclassified)
def extractData(self, sharp, orig):
pass
# Ocr output to document representation
# we need a numpy array for k-means, but the data
# associated with an entry is rather complex and regular python
# data structures would be convinient. So we treat two structures
# like two database tables with an id as a key between the two.
# The numpy array is the height we will cluster on, id.
# The python structure is a dict indexed by id and contains a list
# of box, boxText.
#
# height of bar, color of bar, x, y, w, h, Text,
#
# classify document rows:
# (array label, array indicator, timeUnit1, timeUnit2, miscText)
# Get bar arrayLabel*(1..timeUnits)
# Extract bar height in pixels
# Build/Use bar height classifier 0..5, probably gaussian mixture model
# K-means will probably work better
# Represent array data and save
# Produces outline of bars
def segmentWithCanny(self, img):
edges = cv2.Canny(img, 100, 200)
return edges
@property
def image(self):
return self._image
@image.setter
def image(self, value):
self._image = value
@property
def grayImage(self):
return self._grayImage
@grayImage.setter
def grayImage(self, value):
self._grayImage = value
dirWithArrays = "../../arrays/daily/Dow/array/"
dirWithArrays = "../../../Dropbox/Dow/array/"
iteration = 0
# BEGIN Set up debugging system
logger = logging.getLogger("demo")
logFile = "../../bin/test"+str(iteration)+".html"
logFile = "../bin/test"+str(iteration)+".html"
fh = FileHandler(logFile, mode="w")
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
# END Set up debugging system
# imageLoc = "../arrays/daily/Dow/array/daily-Dow-array-2017-05-30-20.27.08.png"
for imageLoc in glob.glob(dirWithArrays+"*.png"):
print("processing file: ", imageLoc)
arrayDaily = arrayImage()
# imageLoc = "../arrays/daily/Dow/array/daily-Dow-array-2017-05-24-21.37.37.png"
img = arrayDaily.readArray(imageLoc)
im_bw = arrayDaily.cleanImage(img)
arrayDaily.ocrSegment(im_bw)
iteration += 1
if iteration > 0:
exit()
|
|
#!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START program]
"""The Stigler diet problem.
A description of the problem can be found here:
https://en.wikipedia.org/wiki/Stigler_diet.
"""
# [START import]
from ortools.linear_solver import pywraplp
# [END import]
def main():
"""Entry point of the program."""
# Instantiate the data problem.
# [START data_model]
# Nutrient minimums.
nutrients = [
['Calories (kcal)', 3],
['Protein (g)', 70],
['Calcium (g)', 0.8],
['Iron (mg)', 12],
['Vitamin A (KIU)', 5],
['Vitamin B1 (mg)', 1.8],
['Vitamin B2 (mg)', 2.7],
['Niacin (mg)', 18],
['Vitamin C (mg)', 75],
]
# Commodity, Unit, 1939 price (cents), Calories (kcal), Protein (g),
# Calcium (g), Iron (mg), Vitamin A (KIU), Vitamin B1 (mg), Vitamin B2 (mg),
# Niacin (mg), Vitamin C (mg)
data = [
[
'Wheat Flour (Enriched)', '10 lb.', 36, 44.7, 1411, 2, 365, 0, 55.4,
33.3, 441, 0
],
['Macaroni', '1 lb.', 14.1, 11.6, 418, 0.7, 54, 0, 3.2, 1.9, 68, 0],
[
'Wheat Cereal (Enriched)', '28 oz.', 24.2, 11.8, 377, 14.4, 175, 0,
14.4, 8.8, 114, 0
],
['Corn Flakes', '8 oz.', 7.1, 11.4, 252, 0.1, 56, 0, 13.5, 2.3, 68, 0],
[
'Corn Meal', '1 lb.', 4.6, 36.0, 897, 1.7, 99, 30.9, 17.4, 7.9, 106,
0
],
[
'Hominy Grits', '24 oz.', 8.5, 28.6, 680, 0.8, 80, 0, 10.6, 1.6,
110, 0
],
['Rice', '1 lb.', 7.5, 21.2, 460, 0.6, 41, 0, 2, 4.8, 60, 0],
['Rolled Oats', '1 lb.', 7.1, 25.3, 907, 5.1, 341, 0, 37.1, 8.9, 64, 0],
[
'White Bread (Enriched)', '1 lb.', 7.9, 15.0, 488, 2.5, 115, 0,
13.8, 8.5, 126, 0
],
[
'Whole Wheat Bread', '1 lb.', 9.1, 12.2, 484, 2.7, 125, 0, 13.9,
6.4, 160, 0
],
['Rye Bread', '1 lb.', 9.1, 12.4, 439, 1.1, 82, 0, 9.9, 3, 66, 0],
['Pound Cake', '1 lb.', 24.8, 8.0, 130, 0.4, 31, 18.9, 2.8, 3, 17, 0],
['Soda Crackers', '1 lb.', 15.1, 12.5, 288, 0.5, 50, 0, 0, 0, 0, 0],
['Milk', '1 qt.', 11, 6.1, 310, 10.5, 18, 16.8, 4, 16, 7, 177],
[
'Evaporated Milk (can)', '14.5 oz.', 6.7, 8.4, 422, 15.1, 9, 26, 3,
23.5, 11, 60
],
['Butter', '1 lb.', 30.8, 10.8, 9, 0.2, 3, 44.2, 0, 0.2, 2, 0],
['Oleomargarine', '1 lb.', 16.1, 20.6, 17, 0.6, 6, 55.8, 0.2, 0, 0, 0],
['Eggs', '1 doz.', 32.6, 2.9, 238, 1.0, 52, 18.6, 2.8, 6.5, 1, 0],
[
'Cheese (Cheddar)', '1 lb.', 24.2, 7.4, 448, 16.4, 19, 28.1, 0.8,
10.3, 4, 0
],
['Cream', '1/2 pt.', 14.1, 3.5, 49, 1.7, 3, 16.9, 0.6, 2.5, 0, 17],
[
'Peanut Butter', '1 lb.', 17.9, 15.7, 661, 1.0, 48, 0, 9.6, 8.1,
471, 0
],
['Mayonnaise', '1/2 pt.', 16.7, 8.6, 18, 0.2, 8, 2.7, 0.4, 0.5, 0, 0],
['Crisco', '1 lb.', 20.3, 20.1, 0, 0, 0, 0, 0, 0, 0, 0],
['Lard', '1 lb.', 9.8, 41.7, 0, 0, 0, 0.2, 0, 0.5, 5, 0],
[
'Sirloin Steak', '1 lb.', 39.6, 2.9, 166, 0.1, 34, 0.2, 2.1, 2.9,
69, 0
],
['Round Steak', '1 lb.', 36.4, 2.2, 214, 0.1, 32, 0.4, 2.5, 2.4, 87, 0],
['Rib Roast', '1 lb.', 29.2, 3.4, 213, 0.1, 33, 0, 0, 2, 0, 0],
['Chuck Roast', '1 lb.', 22.6, 3.6, 309, 0.2, 46, 0.4, 1, 4, 120, 0],
['Plate', '1 lb.', 14.6, 8.5, 404, 0.2, 62, 0, 0.9, 0, 0, 0],
[
'Liver (Beef)', '1 lb.', 26.8, 2.2, 333, 0.2, 139, 169.2, 6.4, 50.8,
316, 525
],
['Leg of Lamb', '1 lb.', 27.6, 3.1, 245, 0.1, 20, 0, 2.8, 3.9, 86, 0],
[
'Lamb Chops (Rib)', '1 lb.', 36.6, 3.3, 140, 0.1, 15, 0, 1.7, 2.7,
54, 0
],
['Pork Chops', '1 lb.', 30.7, 3.5, 196, 0.2, 30, 0, 17.4, 2.7, 60, 0],
[
'Pork Loin Roast', '1 lb.', 24.2, 4.4, 249, 0.3, 37, 0, 18.2, 3.6,
79, 0
],
['Bacon', '1 lb.', 25.6, 10.4, 152, 0.2, 23, 0, 1.8, 1.8, 71, 0],
['Ham, smoked', '1 lb.', 27.4, 6.7, 212, 0.2, 31, 0, 9.9, 3.3, 50, 0],
['Salt Pork', '1 lb.', 16, 18.8, 164, 0.1, 26, 0, 1.4, 1.8, 0, 0],
[
'Roasting Chicken', '1 lb.', 30.3, 1.8, 184, 0.1, 30, 0.1, 0.9, 1.8,
68, 46
],
['Veal Cutlets', '1 lb.', 42.3, 1.7, 156, 0.1, 24, 0, 1.4, 2.4, 57, 0],
[
'Salmon, Pink (can)', '16 oz.', 13, 5.8, 705, 6.8, 45, 3.5, 1, 4.9,
209, 0
],
['Apples', '1 lb.', 4.4, 5.8, 27, 0.5, 36, 7.3, 3.6, 2.7, 5, 544],
['Bananas', '1 lb.', 6.1, 4.9, 60, 0.4, 30, 17.4, 2.5, 3.5, 28, 498],
['Lemons', '1 doz.', 26, 1.0, 21, 0.5, 14, 0, 0.5, 0, 4, 952],
['Oranges', '1 doz.', 30.9, 2.2, 40, 1.1, 18, 11.1, 3.6, 1.3, 10, 1998],
['Green Beans', '1 lb.', 7.1, 2.4, 138, 3.7, 80, 69, 4.3, 5.8, 37, 862],
['Cabbage', '1 lb.', 3.7, 2.6, 125, 4.0, 36, 7.2, 9, 4.5, 26, 5369],
['Carrots', '1 bunch', 4.7, 2.7, 73, 2.8, 43, 188.5, 6.1, 4.3, 89, 608],
['Celery', '1 stalk', 7.3, 0.9, 51, 3.0, 23, 0.9, 1.4, 1.4, 9, 313],
['Lettuce', '1 head', 8.2, 0.4, 27, 1.1, 22, 112.4, 1.8, 3.4, 11, 449],
['Onions', '1 lb.', 3.6, 5.8, 166, 3.8, 59, 16.6, 4.7, 5.9, 21, 1184],
[
'Potatoes', '15 lb.', 34, 14.3, 336, 1.8, 118, 6.7, 29.4, 7.1, 198,
2522
],
['Spinach', '1 lb.', 8.1, 1.1, 106, 0, 138, 918.4, 5.7, 13.8, 33, 2755],
[
'Sweet Potatoes', '1 lb.', 5.1, 9.6, 138, 2.7, 54, 290.7, 8.4, 5.4,
83, 1912
],
[
'Peaches (can)', 'No. 2 1/2', 16.8, 3.7, 20, 0.4, 10, 21.5, 0.5, 1,
31, 196
],
[
'Pears (can)', 'No. 2 1/2', 20.4, 3.0, 8, 0.3, 8, 0.8, 0.8, 0.8, 5,
81
],
[
'Pineapple (can)', 'No. 2 1/2', 21.3, 2.4, 16, 0.4, 8, 2, 2.8, 0.8,
7, 399
],
[
'Asparagus (can)', 'No. 2', 27.7, 0.4, 33, 0.3, 12, 16.3, 1.4, 2.1,
17, 272
],
[
'Green Beans (can)', 'No. 2', 10, 1.0, 54, 2, 65, 53.9, 1.6, 4.3,
32, 431
],
[
'Pork and Beans (can)', '16 oz.', 7.1, 7.5, 364, 4, 134, 3.5, 8.3,
7.7, 56, 0
],
['Corn (can)', 'No. 2', 10.4, 5.2, 136, 0.2, 16, 12, 1.6, 2.7, 42, 218],
[
'Peas (can)', 'No. 2', 13.8, 2.3, 136, 0.6, 45, 34.9, 4.9, 2.5, 37,
370
],
[
'Tomatoes (can)', 'No. 2', 8.6, 1.3, 63, 0.7, 38, 53.2, 3.4, 2.5,
36, 1253
],
[
'Tomato Soup (can)', '10 1/2 oz.', 7.6, 1.6, 71, 0.6, 43, 57.9, 3.5,
2.4, 67, 862
],
[
'Peaches, Dried', '1 lb.', 15.7, 8.5, 87, 1.7, 173, 86.8, 1.2, 4.3,
55, 57
],
[
'Prunes, Dried', '1 lb.', 9, 12.8, 99, 2.5, 154, 85.7, 3.9, 4.3, 65,
257
],
[
'Raisins, Dried', '15 oz.', 9.4, 13.5, 104, 2.5, 136, 4.5, 6.3, 1.4,
24, 136
],
[
'Peas, Dried', '1 lb.', 7.9, 20.0, 1367, 4.2, 345, 2.9, 28.7, 18.4,
162, 0
],
[
'Lima Beans, Dried', '1 lb.', 8.9, 17.4, 1055, 3.7, 459, 5.1, 26.9,
38.2, 93, 0
],
[
'Navy Beans, Dried', '1 lb.', 5.9, 26.9, 1691, 11.4, 792, 0, 38.4,
24.6, 217, 0
],
['Coffee', '1 lb.', 22.4, 0, 0, 0, 0, 0, 4, 5.1, 50, 0],
['Tea', '1/4 lb.', 17.4, 0, 0, 0, 0, 0, 0, 2.3, 42, 0],
['Cocoa', '8 oz.', 8.6, 8.7, 237, 3, 72, 0, 2, 11.9, 40, 0],
['Chocolate', '8 oz.', 16.2, 8.0, 77, 1.3, 39, 0, 0.9, 3.4, 14, 0],
['Sugar', '10 lb.', 51.7, 34.9, 0, 0, 0, 0, 0, 0, 0, 0],
['Corn Syrup', '24 oz.', 13.7, 14.7, 0, 0.5, 74, 0, 0, 0, 5, 0],
['Molasses', '18 oz.', 13.6, 9.0, 0, 10.3, 244, 0, 1.9, 7.5, 146, 0],
[
'Strawberry Preserves', '1 lb.', 20.5, 6.4, 11, 0.4, 7, 0.2, 0.2,
0.4, 3, 0
],
]
# [END data_model]
# [START solver]
# Instantiate a Glop solver and naming it.
solver = pywraplp.Solver('StiglerDietExample',
pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
# [END solver]
# [START variables]
# Declare an array to hold our variables.
foods = [solver.NumVar(0.0, solver.infinity(), item[0]) for item in data]
print('Number of variables =', solver.NumVariables())
# [END variables]
# [START constraints]
# Create the constraints, one per nutrient.
constraints = []
for i, nutrient in enumerate(nutrients):
constraints.append(solver.Constraint(nutrient[1], solver.infinity()))
for j, item in enumerate(data):
constraints[i].SetCoefficient(foods[j], item[i + 3])
print('Number of constraints =', solver.NumConstraints())
# [END constraints]
# [START objective]
# Objective function: Minimize the sum of (price-normalized) foods.
objective = solver.Objective()
for food in foods:
objective.SetCoefficient(food, 1)
objective.SetMinimization()
# [END objective]
# [START solve]
status = solver.Solve()
# [END solve]
# [START print_solution]
# Check that the problem has an optimal solution.
if status != solver.OPTIMAL:
print('The problem does not have an optimal solution!')
if status == solver.FEASIBLE:
print('A potentially suboptimal solution was found.')
else:
print('The solver could not solve the problem.')
exit(1)
# Display the amounts (in dollars) to purchase of each food.
nutrients_result = [0] * len(nutrients)
print('\nAnnual Foods:')
for i, food in enumerate(foods):
if food.solution_value() > 0.0:
print('{}: ${}'.format(data[i][0], 365. * food.solution_value()))
for j, _ in enumerate(nutrients):
nutrients_result[j] += data[i][j + 3] * food.solution_value()
print('\nOptimal annual price: ${:.4f}'.format(365. * objective.Value()))
print('\nNutrients per day:')
for i, nutrient in enumerate(nutrients):
print('{}: {:.2f} (min {})'.format(nutrient[0], nutrients_result[i],
nutrient[1]))
# [END print_solution]
# [START advanced]
print('\nAdvanced usage:')
print('Problem solved in ', solver.wall_time(), ' milliseconds')
print('Problem solved in ', solver.iterations(), ' iterations')
# [END advanced]
if __name__ == '__main__':
main()
# [END program]
|
|
from __future__ import unicode_literals
import json
import six
import re
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from .models import dynamodb_backend2, dynamo_json_dump
GET_SESSION_TOKEN_RESULT = """
<GetSessionTokenResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<GetSessionTokenResult>
<Credentials>
<SessionToken>
AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L
To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z
rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp
Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE
</SessionToken>
<SecretAccessKey>
wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY
</SecretAccessKey>
<Expiration>2011-07-11T19:55:29.611Z</Expiration>
<AccessKeyId>AKIAIOSFODNN7EXAMPLE</AccessKeyId>
</Credentials>
</GetSessionTokenResult>
<ResponseMetadata>
<RequestId>58c5dbae-abef-11e0-8cfe-09039844ac7d</RequestId>
</ResponseMetadata>
</GetSessionTokenResponse>"""
def sts_handler():
return GET_SESSION_TOKEN_RESULT
class DynamoHandler(BaseResponse):
def get_endpoint_name(self, headers):
"""Parses request headers and extracts part od the X-Amz-Target
that corresponds to a method of DynamoHandler
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
"""
# Headers are case-insensitive. Probably a better way to do this.
match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
if match:
return match.split(".")[1]
def error(self, type_, status=400):
return status, self.response_headers, dynamo_json_dump({'__type': type_})
def call_action(self):
body = self.body
if 'GetSessionToken' in body:
return 200, self.response_headers, sts_handler()
self.body = json.loads(body or '{}')
endpoint = self.get_endpoint_name(self.headers)
if endpoint:
endpoint = camelcase_to_underscores(endpoint)
response = getattr(self, endpoint)()
if isinstance(response, six.string_types):
return 200, self.response_headers, response
else:
status_code, new_headers, response_content = response
self.response_headers.update(new_headers)
return status_code, self.response_headers, response_content
else:
return 404, self.response_headers, ""
def list_tables(self):
body = self.body
limit = body.get('Limit')
if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName")
start = list(dynamodb_backend2.tables.keys()).index(last) + 1
else:
start = 0
all_tables = list(dynamodb_backend2.tables.keys())
if limit:
tables = all_tables[start:start + limit]
else:
tables = all_tables[start:]
response = {"TableNames": tables}
if limit and len(all_tables) > start + limit:
response["LastEvaluatedTableName"] = tables[-1]
return dynamo_json_dump(response)
def create_table(self):
body = self.body
# get the table name
table_name = body['TableName']
# get the throughput
throughput = body["ProvisionedThroughput"]
# getting the schema
key_schema = body['KeySchema']
# getting attribute definition
attr = body["AttributeDefinitions"]
# getting the indexes
global_indexes = body.get("GlobalSecondaryIndexes", [])
local_secondary_indexes = body.get("LocalSecondaryIndexes", [])
table = dynamodb_backend2.create_table(table_name,
schema=key_schema,
throughput=throughput,
attr=attr,
global_indexes=global_indexes,
indexes=local_secondary_indexes)
if table is not None:
return dynamo_json_dump(table.describe())
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceInUseException'
return self.error(er)
def delete_table(self):
name = self.body['TableName']
table = dynamodb_backend2.delete_table(name)
if table is not None:
return dynamo_json_dump(table.describe())
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def update_table(self):
name = self.body['TableName']
if 'GlobalSecondaryIndexUpdates' in self.body:
table = dynamodb_backend2.update_table_global_indexes(
name, self.body['GlobalSecondaryIndexUpdates'])
if 'ProvisionedThroughput' in self.body:
throughput = self.body["ProvisionedThroughput"]
table = dynamodb_backend2.update_table_throughput(name, throughput)
return dynamo_json_dump(table.describe())
def describe_table(self):
name = self.body['TableName']
try:
table = dynamodb_backend2.tables[name]
except KeyError:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
return dynamo_json_dump(table.describe(base_key='Table'))
def put_item(self):
name = self.body['TableName']
item = self.body['Item']
overwrite = 'Expected' not in self.body
if not overwrite:
expected = self.body['Expected']
else:
expected = None
# Attempt to parse simple ConditionExpressions into an Expected
# expression
if not expected:
condition_expression = self.body.get('ConditionExpression')
if condition_expression and 'OR' not in condition_expression:
cond_items = [c.strip()
for c in condition_expression.split('AND')]
if cond_items:
expected = {}
overwrite = False
exists_re = re.compile('^attribute_exists\((.*)\)$')
not_exists_re = re.compile(
'^attribute_not_exists\((.*)\)$')
for cond in cond_items:
exists_m = exists_re.match(cond)
not_exists_m = not_exists_re.match(cond)
if exists_m:
expected[exists_m.group(1)] = {'Exists': True}
elif not_exists_m:
expected[not_exists_m.group(1)] = {'Exists': False}
try:
result = dynamodb_backend2.put_item(
name, item, expected, overwrite)
except Exception:
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
return self.error(er)
if result:
item_dict = result.to_json()
item_dict['ConsumedCapacityUnits'] = 1
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def batch_write_item(self):
table_batches = self.body['RequestItems']
for table_name, table_requests in table_batches.items():
for table_request in table_requests:
request_type = list(table_request.keys())[0]
request = list(table_request.values())[0]
if request_type == 'PutRequest':
item = request['Item']
dynamodb_backend2.put_item(table_name, item)
elif request_type == 'DeleteRequest':
keys = request['Key']
item = dynamodb_backend2.delete_item(table_name, keys)
response = {
"ConsumedCapacity": [
{
'TableName': table_name,
'CapacityUnits': 1.0,
'Table': {'CapacityUnits': 1.0}
} for table_name, table_requests in table_batches.items()
],
"ItemCollectionMetrics": {},
"UnprocessedItems": {}
}
return dynamo_json_dump(response)
def get_item(self):
name = self.body['TableName']
key = self.body['Key']
try:
item = dynamodb_backend2.get_item(name, key)
except ValueError:
er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, status=400)
if item:
item_dict = item.describe_attrs(attributes=None)
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
# Item not found
er = '{}'
return self.error(er, status=200)
def batch_get_item(self):
table_batches = self.body['RequestItems']
results = {
"ConsumedCapacity": [],
"Responses": {
},
"UnprocessedKeys": {
}
}
for table_name, table_request in table_batches.items():
keys = table_request['Keys']
attributes_to_get = table_request.get('AttributesToGet')
results["Responses"][table_name] = []
for key in keys:
item = dynamodb_backend2.get_item(table_name, key)
if item:
item_describe = item.describe_attrs(attributes_to_get)
results["Responses"][table_name].append(
item_describe["Item"])
results["ConsumedCapacity"].append({
"CapacityUnits": len(keys),
"TableName": table_name
})
return dynamo_json_dump(results)
def query(self):
name = self.body['TableName']
# {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}}
key_condition_expression = self.body.get('KeyConditionExpression')
filter_kwargs = {}
if key_condition_expression:
value_alias_map = self.body['ExpressionAttributeValues']
table = dynamodb_backend2.get_table(name)
index_name = self.body.get('IndexName')
if index_name:
all_indexes = (table.global_indexes or []) + \
(table.indexes or [])
indexes_by_name = dict((i['IndexName'], i)
for i in all_indexes)
if index_name not in indexes_by_name:
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
index_name, name, ', '.join(indexes_by_name.keys())
))
index = indexes_by_name[index_name]['KeySchema']
else:
index = table.schema
key_map = [column for _, column in sorted(
(k, v) for k, v in self.body['ExpressionAttributeNames'].items())]
if " AND " in key_condition_expression:
expressions = key_condition_expression.split(" AND ", 1)
index_hash_key = [
key for key in index if key['KeyType'] == 'HASH'][0]
hash_key_index_in_key_map = key_map.index(
index_hash_key['AttributeName'])
hash_key_expression = expressions.pop(
hash_key_index_in_key_map).strip('()')
# TODO implement more than one range expression and OR
# operators
range_key_expression = expressions[0].strip('()')
range_key_expression_components = range_key_expression.split()
range_comparison = range_key_expression_components[1]
if 'AND' in range_key_expression:
range_comparison = 'BETWEEN'
range_values = [
value_alias_map[range_key_expression_components[2]],
value_alias_map[range_key_expression_components[4]],
]
elif 'begins_with' in range_key_expression:
range_comparison = 'BEGINS_WITH'
range_values = [
value_alias_map[range_key_expression_components[1]],
]
else:
range_values = [value_alias_map[
range_key_expression_components[2]]]
else:
hash_key_expression = key_condition_expression
range_comparison = None
range_values = []
hash_key_value_alias = hash_key_expression.split("=")[1].strip()
hash_key = value_alias_map[hash_key_value_alias]
else:
# 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}}
key_conditions = self.body.get('KeyConditions')
if key_conditions:
hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(
name, key_conditions.keys())
for key, value in key_conditions.items():
if key not in (hash_key_name, range_key_name):
filter_kwargs[key] = value
if hash_key_name is None:
er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException"
return self.error(er)
hash_key = key_conditions[hash_key_name][
'AttributeValueList'][0]
if len(key_conditions) == 1:
range_comparison = None
range_values = []
else:
if range_key_name is None and not filter_kwargs:
er = "com.amazon.coral.validate#ValidationException"
return self.error(er)
else:
range_condition = key_conditions.get(range_key_name)
if range_condition:
range_comparison = range_condition[
'ComparisonOperator']
range_values = range_condition[
'AttributeValueList']
else:
range_comparison = None
range_values = []
index_name = self.body.get('IndexName')
exclusive_start_key = self.body.get('ExclusiveStartKey')
limit = self.body.get("Limit")
scan_index_forward = self.body.get("ScanIndexForward")
items, scanned_count, last_evaluated_key = dynamodb_backend2.query(
name, hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, index_name=index_name, **filter_kwargs)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
result = {
"Count": len(items),
"ConsumedCapacityUnits": 1,
"ScannedCount": scanned_count
}
if self.body.get('Select', '').upper() != 'COUNT':
result["Items"] = [item.attrs for item in items]
if last_evaluated_key is not None:
result["LastEvaluatedKey"] = last_evaluated_key
return dynamo_json_dump(result)
def scan(self):
name = self.body['TableName']
filters = {}
scan_filters = self.body.get('ScanFilter', {})
for attribute_name, scan_filter in scan_filters.items():
# Keys are attribute names. Values are tuples of (comparison,
# comparison_value)
comparison_operator = scan_filter["ComparisonOperator"]
comparison_values = scan_filter.get("AttributeValueList", [])
filters[attribute_name] = (comparison_operator, comparison_values)
exclusive_start_key = self.body.get('ExclusiveStartKey')
limit = self.body.get("Limit")
items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters,
limit,
exclusive_start_key)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
result = {
"Count": len(items),
"Items": [item.attrs for item in items],
"ConsumedCapacityUnits": 1,
"ScannedCount": scanned_count
}
if last_evaluated_key is not None:
result["LastEvaluatedKey"] = last_evaluated_key
return dynamo_json_dump(result)
def delete_item(self):
name = self.body['TableName']
keys = self.body['Key']
return_values = self.body.get('ReturnValues', '')
item = dynamodb_backend2.delete_item(name, keys)
if item:
if return_values == 'ALL_OLD':
item_dict = item.to_json()
else:
item_dict = {'Attributes': {}}
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
return self.error(er)
def update_item(self):
name = self.body['TableName']
key = self.body['Key']
update_expression = self.body.get('UpdateExpression')
attribute_updates = self.body.get('AttributeUpdates')
expression_attribute_names = self.body.get(
'ExpressionAttributeNames', {})
expression_attribute_values = self.body.get(
'ExpressionAttributeValues', {})
existing_item = dynamodb_backend2.get_item(name, key)
# Support spaces between operators in an update expression
# E.g. `a = b + c` -> `a=b+c`
if update_expression:
update_expression = re.sub(
'\s*([=\+-])\s*', '\\1', update_expression)
item = dynamodb_backend2.update_item(
name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values)
item_dict = item.to_json()
item_dict['ConsumedCapacityUnits'] = 0.5
if not existing_item:
item_dict['Attributes'] = {}
return dynamo_json_dump(item_dict)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
from collections import defaultdict
from datetime import datetime
from itertools import product
import getpass
import logging
import signal
import socket
import subprocess
import sys
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings, utils
from airflow import configuration
from airflow.utils import AirflowException, State, LoggingMixin
Base = models.Base
ID_LEN = models.ID_LEN
# Setting up a statsd client if needed
statsd = None
if configuration.getboolean('scheduler', 'statsd_on'):
from statsd import StatsClient
statsd = StatsClient(
host=configuration.get('scheduler', 'statsd_host'),
port=configuration.getint('scheduler', 'statsd_port'),
prefix=configuration.get('scheduler', 'statsd_prefix'))
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=configuration.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.gethostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(configuration.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
session.close()
self.heartbeat_callback()
self.logger.debug('[heart] Boom.')
def run(self):
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
:param do_pickle: to pickle the DAG object and send over to workers
for non-local executors
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
do_pickle=False,
*args, **kwargs):
self.dag_id = dag_id
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = configuration.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
@utils.provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
following_schedule = dag.following_schedule(dttm)
while dttm < datetime.now():
if following_schedule + task.sla < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
blocking_tis = ([ti for ti in blocking_tis
if ti.are_dependencies_met(main_session=session)])
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
from airflow import ascii
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{ascii.bug}<code></pre>
""".format(**locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
utils.send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
for sla in slas:
sla.email_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
def schedule_dag(self, dag):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
DagRun = models.DagRun
session = settings.Session()
qry = session.query(DagRun).filter(
DagRun.dag_id == dag.dag_id,
DagRun.external_trigger == False,
DagRun.state == State.RUNNING,
)
active_runs = qry.all()
if len(active_runs) >= dag.max_active_runs:
return
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < datetime.now() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = datetime.now()
session.commit()
qry = session.query(func.max(DagRun.execution_date)).filter_by(
dag_id = dag.dag_id).filter(
or_(DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX+'%')))
last_scheduled_run = qry.scalar()
next_run_date = None
if not last_scheduled_run:
# First run
TI = models.TaskInstance
latest_run = (
session.query(func.max(TI.execution_date))
.filter_by(dag_id=dag.dag_id)
.scalar()
)
if latest_run:
# Migrating from previous version
# make the past 5 runs active
next_run_date = dag.date_range(latest_run, -5)[0]
else:
next_run_date = min([t.start_date for t in dag.tasks])
elif dag.schedule_interval != '@once':
next_run_date = dag.following_schedule(last_scheduled_run)
elif dag.schedule_interval == '@once' and not last_scheduled_run:
next_run_date = datetime.now()
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
schedule_end = next_run_date
elif next_run_date:
schedule_end = dag.following_schedule(next_run_date)
if next_run_date and schedule_end and schedule_end <= datetime.now():
next_run = DagRun(
dag_id=dag.dag_id,
run_id='scheduled__' + next_run_date.isoformat(),
execution_date=next_run_date,
state=State.RUNNING,
external_trigger=False
)
session.add(next_run)
session.commit()
return next_run
def process_dag(self, dag, executor):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
TI = models.TaskInstance
DagModel = models.DagModel
session = settings.Session()
# picklin'
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
db_dag = session.query(DagModel).filter_by(dag_id=dag.dag_id).first()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (
datetime.now() - last_scheduler_run).total_seconds()
# if db_dag.scheduler_lock or
if secs_since_last < self.heartrate:
session.commit()
session.close()
return None
else:
# Taking a lock
db_dag.scheduler_lock = True
db_dag.last_scheduler_run = datetime.now()
session.commit()
active_runs = dag.get_active_runs()
self.logger.info('Getting list of tasks to skip for active runs.')
skip_tis = set()
if active_runs:
qry = (
session.query(TI.task_id, TI.execution_date)
.filter(
TI.dag_id == dag.dag_id,
TI.execution_date.in_(active_runs),
TI.state.in_((State.RUNNING, State.SUCCESS, State.FAILED)),
)
)
skip_tis = {(ti[0], ti[1]) for ti in qry.all()}
descartes = [obj for obj in product(dag.tasks, active_runs)]
self.logger.info('Checking dependencies on {} tasks instances, minus {} '
'skippable ones'.format(len(descartes), len(skip_tis)))
for task, dttm in descartes:
if task.adhoc or (task.task_id, dttm) in skip_tis:
continue
ti = TI(task, dttm)
ti.refresh_from_db()
if ti.state in (
State.RUNNING, State.QUEUED, State.SUCCESS, State.FAILED):
continue
elif ti.is_runnable(flag_upstream_failed=True):
self.logger.debug('Firing task: {}'.format(ti))
executor.queue_task_instance(ti, pickle_id=pickle_id)
# Releasing the lock
self.logger.debug("Unlocking DAG (scheduler_lock)")
db_dag = (
session.query(DagModel)
.filter(DagModel.dag_id == dag.dag_id)
.first()
)
db_dag.scheduler_lock = False
session.merge(db_dag)
session.commit()
session.close()
@utils.provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
self.logger.info("Prioritizing {} queued jobs".format(len(queued_tis)))
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if ti.dag_id not in dagbag.dags:
self.logger.info("DAG not longer in dagbag, "
"deleting {}".format(ti))
session.delete(ti)
session.commit()
elif not dagbag.dags[ti.dag_id].has_task(ti.task_id):
self.logger.info("Task not longer exists, deleting {}".format(ti))
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
overloaded_dags = set()
for pool, tis in list(d.items()):
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than 32 per run
open_slots = 128
else:
open_slots = pools[pool].open_slots(session=session)
queue_size = len(tis)
self.logger.info("Pool {pool} has {open_slots} slots, {queue_size} "
"task instances in queue".format(**locals()))
if not open_slots:
continue
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis:
if not open_slots:
continue
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
self.logger.error("Queued task {} seems gone".format(ti))
session.delete(ti)
session.commit()
continue
if not task:
continue
ti.task = task
# picklin'
dag = dagbag.dags[ti.dag_id]
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor,
executors.SequentialExecutor):
self.logger.info("Pickling DAG {}".format(dag))
pickle_id = dag.pickle(session).id
if dag.dag_id in overloaded_dags or dag.concurrency_reached:
overloaded_dags.add(dag.dag_id)
continue
if ti.are_dependencies_met():
executor.queue_task_instance(
ti, force=True, pickle_id=pickle_id)
open_slots -= 1
else:
session.delete(ti)
continue
ti.task = task
session.commit()
def _execute(self):
dag_id = self.dag_id
def signal_handler(signum, frame):
self.logger.error("SIGINT (ctrl-c) received")
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
utils.pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
self.logger.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = dagbag.executor
executor.start()
i = 0
while not self.num_runs or self.num_runs > i:
try:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
self.logger.exception(e)
i += 1
try:
if i % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except:
self.logger.error("Failed at reloading the dagbag")
if statsd:
statsd.incr('dag_refresh_error', 1, 1)
sleep(5)
if dag_id:
dags = [dagbag.dags[dag_id]]
else:
dags = [
dag for dag in dagbag.dags.values() if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
for dag in dags:
self.logger.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag or (dag.dag_id in paused_dag_ids):
continue
try:
self.schedule_dag(dag)
self.process_dag(dag, executor)
self.manage_slas(dag)
except Exception as e:
self.logger.exception(e)
self.logger.info("Done queuing tasks, calling the executor's "
"heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
self.logger.info("Loop took: {} seconds".format(duration_sec))
try:
self.import_errors(dagbag)
except Exception as e:
self.logger.exception(e)
try:
dagbag.kill_zombies()
except Exception as e:
self.logger.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
self.logger.exception(e)
self.logger.error("Tachycardia!")
except Exception as deep_e:
self.logger.exception(deep_e)
executor.end()
def heartbeat_callback(self):
if statsd:
statsd.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
pool=None,
*args, **kwargs):
self.dag = dag
dag.override_start_date(start_date)
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
self.pool = pool
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
# Build a list of all instances to run
tasks_to_run = {}
failed = []
succeeded = []
started = []
wont_run = []
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in self.dag.date_range(start_date, end_date=end_date):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
# Triggering what is ready to get triggered
while tasks_to_run:
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db()
if ti.state in (
State.SUCCESS, State.SKIPPED) and key in tasks_to_run:
succeeded.append(key)
tasks_to_run.pop(key)
elif ti.state in (State.RUNNING, State.QUEUED):
continue
elif ti.is_runnable(flag_upstream_failed=True):
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
task_start_date=self.bf_start_date,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies,
pool=self.pool)
ti.state = State.RUNNING
if key not in started:
started.append(key)
self.heartbeat()
executor.heartbeat()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
if (
ti.state in (State.FAILED, State.SKIPPED) or
state == State.FAILED):
if ti.state == State.FAILED or state == State.FAILED:
failed.append(key)
self.logger.error("Task instance " + str(key) + " failed")
elif ti.state == State.SKIPPED:
wont_run.append(key)
self.logger.error("Skipping " + str(key) + " failed")
tasks_to_run.pop(key)
# Removing downstream tasks that also shouldn't run
for t in self.dag.get_task(task_id).get_flat_relatives(
upstream=False):
key = (ti.dag_id, t.task_id, execution_date)
if key in tasks_to_run:
wont_run.append(key)
tasks_to_run.pop(key)
elif ti.state == State.SUCCESS and state == State.SUCCESS:
succeeded.append(key)
tasks_to_run.pop(key)
elif (
ti.state not in (State.SUCCESS, State.QUEUED) and
state == State.SUCCESS):
self.logger.error(
"The airflow run command failed "
"at reporting an error. This should not occur "
"in normal circumstances. Task state is '{}',"
"reported state is '{}'. TI is {}"
"".format(ti.state, state, ti))
msg = (
"[backfill progress] "
"waiting: {0} | "
"succeeded: {1} | "
"kicked_off: {2} | "
"failed: {3} | "
"wont_run: {4} ").format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(wont_run))
self.logger.info(msg)
executor.end()
session.close()
if failed:
msg = (
"------------------------------------------\n"
"Some tasks instances failed, "
"here's the list:\n{}".format(failed))
raise AirflowException(msg)
self.logger.info("All done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
force=False,
mark_success=False,
pickle_id=None,
task_start_date=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.force = force
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_start_date = task_start_date
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
task_start_date=self.task_start_date,
job_id=self.id,
pool=self.pool,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
|
|
#!/usr/bin/env python
import json
import time
import requests
import sys
import traceback
import re
import types
from abc import abstractmethod
from bottle import request, response
from ..smcontext import SmContext, ServiceManagerException
from ..smport import PortProvider
RUN_ON_PORT = 8085
RUN_ON_HOST = "localhost"
SERVICE_START_TIMEOUT_SECONDS = 90
MAX_TEST_ID_LENGTH = 40
deprecated_release_params = {"SNAPSHOT_JAR": "SNAPSHOT", "RELEASE_JAR": "RELEASE"}
class BadRequestException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class SmResponse:
def __init__(self):
pass # Do nothing
@staticmethod
def bad_request(message):
print("Bad Request: " + str(message))
response.status = 400
return json.dumps({"statusCode": 400, "errorMessage": str(message)})
@staticmethod
def error_500(message):
response.status = 500
return json.dumps({"statusCode": 500, "errorMessage": str(message)})
class SmRequest:
def __init__(self, server, json_body, offlineMode, show_progress, verbose):
self.server = server
self.json_body = json_body
try:
test_id = self.json_body["testId"]
except Exception:
raise BadRequestException("Missing testId parameter")
SmRequest._validate_test_id(test_id)
request_specific_features = SmRequest._extract_and_validate_request_specific_features(self.json_body)
self.test_id = test_id
self.context = SmContext(
server.application,
self.test_id,
show_progress=show_progress,
request_specific_features=request_specific_features,
offline=offlineMode,
verbose=verbose,
)
@abstractmethod
def process_request(self):
pass
@staticmethod
def _extract_and_validate_request_specific_features(json_body):
if not "features" in json_body:
return None
request_specific_features = json_body["features"]
if not request_specific_features:
return None
if not isinstance(request_specific_features, list):
raise BadRequestException("'features' must be a list of strings")
for feature in request_specific_features:
if not isinstance(feature, str):
raise BadRequestException("'features' must be a list of strings")
return request_specific_features
def _bad_request_exception(self, message):
return BadRequestException("[%s] %s" % (self.test_id, message))
def _log(self, message):
self.context.log(message)
@staticmethod
def _validate_test_id(test_id):
regex = re.compile(r"^[a-zA-Z0-9\-_]+$")
if not regex.match(test_id):
raise BadRequestException(
"Invalid parameter 'testId' with value '%s', valid characters are 'a-z', 'A-Z', '0-9', '-' and '_'"
% test_id
)
if test_id.upper() == "LOCAL":
raise BadRequestException("'%s' is not a valid value for testId" % test_id)
if len(test_id) > MAX_TEST_ID_LENGTH:
raise BadRequestException(
"Test id '%s' is too long (%d characters) (maximum is %d characters)"
% (test_id, len(test_id), MAX_TEST_ID_LENGTH)
)
def _get_or_throw_bad_request(self, obj, key, message):
if key not in obj:
raise self._bad_request_exception(message)
value = obj[key]
if not value:
raise self._bad_request_exception(message)
return value
def _stop_services(self, drop_databases):
self._log("Stopping services (drop databases = %s)" % drop_databases)
errors = self.context.kill()
if drop_databases:
for service_name in self.server.service_names_for_test(self.test_id):
if self.context.service_data(service_name).get("hasMongo", False):
self.context.drop_database_for_service(service_name)
self.context.drop_database_for_test()
self.server.test_stopped(self.test_id)
return errors
class SmStartRequest(SmRequest):
def __init__(
self, server, json_request_body, do_not_run_from_source, offlineMode, show_progress, verbose,
):
self.do_not_run_from_source = do_not_run_from_source
self.json_body = json_request_body
SmRequest.__init__(self, server, self.json_body, offlineMode, show_progress, verbose)
def process_request(self):
# START REQUEST PAYLOAD:
# {
# "testId": "blah",
# "features": ["feature1", "feature2", ...],
# "services": [
# {"serviceName" : "auth", "runFrom" : "SNAPSHOT"},
# {"serviceName" : "matching", "runFrom" : "RELEASE", "version" : "3.0.1"},
# {"serviceName" : "portal", "runFrom" : "SOURCE"},
# {"serviceName" : "nps", "runFrom" : "SOURCE"},
# ...
# ]
# }
self._log("Processing service start request")
if self.server.is_running(self.test_id):
raise self._bad_request_exception("Test '%s' is already running" % self.test_id)
self.server.starting_test(self.test_id)
services_to_start = self._get_or_throw_bad_request(
self.json_body, "services", "'services' missing from request"
)
self._log("Service(s) to start: " + str(services_to_start))
(orchestration_services, service_mapping_ports,) = self._validate_start_request_and_assign_ports(
services_to_start, self.do_not_run_from_source
)
try:
self._start_services_for_test(orchestration_services, service_mapping_ports)
sm_response = []
for service_mapping_name in service_mapping_ports:
sm_response += [
{"serviceName": service_mapping_name, "port": service_mapping_ports[service_mapping_name],}
]
self._log(
'All services started! To kill the running processes for this test, POST {"testId":"%s"} to http://%s:%s/stop'
% (self.test_id, RUN_ON_HOST, RUN_ON_PORT)
)
return json.dumps(sm_response)
except Exception as e:
traceback.print_exc(file=sys.stdout)
return self._stop_services_and_return_500("Unexpected exception: %s" % e)
# {"AUTH": {"port": 43124, "runFrom":"JAR", "serviceMapping" : "auth"}}
def _start_services(self, orchestration_services, service_mapping_ports, proxy):
for service_name in orchestration_services:
port = orchestration_services[service_name]["port"]
admin_port = orchestration_services[service_name]["adminPort"]
run_from = orchestration_services[service_name]["runFrom"]
classifier = orchestration_services[service_name]["classifier"]
version = orchestration_services[service_name]["version"]
append_args = orchestration_services[service_name]["appendArgs"] # Allows for dynamic config overriding
# Allow for deprecated run_from values
if run_from in deprecated_release_params:
run_from = deprecated_release_params[run_from]
self.context.start_service(
service_name,
run_from,
proxy,
classifier,
service_mapping_ports,
port,
admin_port,
version,
append_args,
)
def _await_service_startup(self, service_name, port, admin_port):
seconds_remaining = SERVICE_START_TIMEOUT_SECONDS
servicedata = self.context.service_data(service_name)
if "healthcheck" in servicedata:
healthcheck_url = servicedata["healthcheck"]["url"].replace("${port}", str(admin_port))
healthcheck_response_regex = self.context.service_data(service_name)["healthcheck"]["response"]
while seconds_remaining > 0:
if (seconds_remaining < 10 or seconds_remaining % 5 == 0) and seconds_remaining != 1:
self._log(
"Waiting for %s to start on port %d, %d seconds before timeout"
% (service_name, port, seconds_remaining)
)
elif seconds_remaining == 1:
self._log("Waiting for %s to start on port %d, 1 second before timeout" % (service_name, port))
try:
ping_response = requests.get(healthcheck_url)
response_text = ping_response.text
healthcheck = re.search(healthcheck_response_regex, response_text)
except requests.RequestException:
healthcheck = False
if healthcheck or (seconds_remaining == 0):
self._log("Service %s health check SUCCESSFUL" % service_name)
break
else:
seconds_remaining -= 1
time.sleep(1)
if seconds_remaining <= 0:
raise self.context.exception(
"Service %s - healthcheck did not pass within allocated time (%d seconds)"
% (service_name, SERVICE_START_TIMEOUT_SECONDS)
)
else:
self._log(
"There is no health check for '%s'. This is not really advisable we can only assume it has started correctly"
% service_name
)
def _start_services_for_test(self, orchestration_services, service_mapping_ports):
self._start_services(orchestration_services, service_mapping_ports, None)
for service_name in orchestration_services:
self.server.starting_service_for_test(self.test_id, service_name)
port = orchestration_services[service_name]["port"]
admin_port = orchestration_services[service_name]["adminPort"]
self._await_service_startup(service_name, port, admin_port)
def _stop_services_and_return_500(self, message):
self._log(message)
errors = self._stop_services(drop_databases=True)
if errors:
self._log("Errors during shutdown: %s" % str(errors))
return SmResponse.error_500(message)
def _service_mapping_for(self, service_start_request):
service_mapping_name = self._get_or_throw_bad_request(
service_start_request, "serviceName", "Missing 'serviceName' parameter in instruction to start services",
)
mapping = self._get_or_throw_bad_request(
self.context.application.service_mappings,
service_mapping_name,
"Unknown service name '%s'" % service_mapping_name,
)
need_classifier = isinstance(mapping, dict)
have_classifier = "classifier" in service_start_request and service_start_request["classifier"]
version = None
if "version" in service_start_request and service_start_request["version"]:
version = service_start_request["version"]
append_args = service_start_request.get("appendArgs", [])
if need_classifier:
valid_classifiers = "[" + (",".join(str(x) for x in list(mapping.keys()))) + "]"
if not have_classifier:
raise self._bad_request_exception(
"Service '%s' requires a classifier (one of: %s)" % (service_mapping_name, valid_classifiers)
)
classifier = service_start_request["classifier"]
if classifier not in mapping:
raise self._bad_request_exception(
"Unknown classifier '%s' for service '%s' (expected one of: %s)"
% (classifier, service_mapping_name, valid_classifiers)
)
service_name = mapping[classifier]
else:
if have_classifier:
raise self._bad_request_exception(
"Service '%s' does not take classifiers (found: '%s')"
% (service_mapping_name, service_start_request["classifier"])
)
service_name = mapping
classifier = None
return service_mapping_name, service_name, classifier, version, append_args
def _validate_start_request_and_assign_ports(self, services_to_start, dontrunfromsource):
orchestration_services = {}
service_mapping_ports = {}
for service_start_request in services_to_start:
(service_mapping_name, service_name, classifier, version, append_args,) = self._service_mapping_for(
service_start_request
)
if append_args and not isinstance(append_args, list):
raise self._bad_request_exception(
"ERROR: I was passed a non list for append args of '"
+ str(append_args)
+ "' I dont know what to do with this"
)
if service_mapping_name in service_mapping_ports:
raise self._bad_request_exception(
"Duplicate entry for service '%s' in start request" % service_mapping_name
)
run_from = self._get_or_throw_bad_request(
service_start_request,
"runFrom",
"Missing 'runFrom' parameter in instruction to start '%s'" % service_mapping_name,
)
if run_from not in ["SOURCE", "SNAPSHOT", "RELEASE"] + list(deprecated_release_params.keys()):
raise self._bad_request_exception(
"runFrom parameter has invalid value '%s' (should be 'SOURCE', 'SNAPSHOT' or 'RELEASE')" % run_from
)
if dontrunfromsource:
if run_from == "SOURCE":
raise self._bad_request_exception(
"runFrom parameter has value '%s', however --nosource was specified when smserver started"
% run_from
)
if (
append_args
and not self.context.get_service_starter(service_name, run_from, None).supports_append_args()
):
raise BadRequestException("The service type for '" + service_name + "' does not support append args")
if service_name in orchestration_services:
existing_entry = orchestration_services[service_name]
service_mapping_ports[service_mapping_name] = existing_entry["port"]
if run_from != existing_entry["runFrom"]:
raise self._bad_request_exception(
"Conflicting runFrom values (%s and %s) for underlying service '%s'"
% (run_from, existing_entry["runFrom"], service_name)
)
if classifier and existing_entry["classifier"] and classifier != existing_entry["classifier"]:
raise self._bad_request_exception(
"Conflicting classifier values (%s and %s) for underlying service '%s'"
% (classifier, existing_entry["classifier"], service_name)
)
else:
port = self.server.next_available_port()
admin_port = (
self.server.next_available_port()
if self.context.service_type(service_name) == "dropwizard"
else port
)
service_mapping_ports[service_mapping_name] = port
orchestration_services[service_name] = {
"port": port,
"adminPort": admin_port,
"runFrom": run_from,
"classifier": classifier,
"version": version,
"appendArgs": append_args,
}
return orchestration_services, service_mapping_ports
class SmStopRequest(SmRequest):
def __init__(self, server, json_request_body, offlineMode, show_progress, verbose):
SmRequest.__init__(self, server, json_request_body, offlineMode, show_progress, verbose)
def process_request(self):
if not self.server.is_running(self.test_id):
raise BadRequestException("Invalid test id (or already stopped): %s" % self.test_id)
self._log("Stopping test")
drop_databases = self.json_body.get("dropDatabases", True)
if type(drop_databases) is not bool:
raise self._bad_request_exception(
"dropDatabases parameter must be boolean (value was: %s)" % drop_databases
)
errors = self._stop_services(drop_databases)
if errors:
self._log("Completed stopping services - errors occurred: %s" % str(errors))
response.status = 500
return json.dumps({"statusCode": 500, "errorMessage": errors})
else:
self._log("Successfully stopped services")
response.status = 204
class SmShutdownRequest:
def __init__(self, server):
self.server = server
def process_request(self):
print("shutting down...")
for test_id in self.server.running_tests:
context = SmContext(self.server.application, test_id)
context.log("Killing everything for testId %s..." % test_id)
context.kill()
for service_name in self.server.service_names_for_test(test_id):
if context.service_data(service_name).get("hasMongo", False):
context.drop_database_for_service(service_name)
context.drop_database_for_test()
context.log("Successfully stopped all services for testId %s..." % test_id)
print("finished shutting down...")
class SmServer:
def __init__(self, application):
self.application = application
self.port_provider = PortProvider()
# Map of test_id to list of service names
self.running_tests = {}
def next_available_port(self):
return self.port_provider.next_available_port()
def service_names_for_test(self, test_id):
if self.is_running(test_id):
return self.running_tests[test_id]
else:
return []
def is_running(self, test_id):
return test_id in self.running_tests
def starting_test(self, test_id):
if self.is_running(test_id):
raise ServiceManagerException("Test '%s' is already running" % test_id)
self.running_tests[test_id] = []
def starting_service_for_test(self, test_id, service_name):
if not self.is_running(test_id):
raise ServiceManagerException("Test '%s' is not running" % test_id)
self.running_tests[test_id] += [service_name]
def test_stopped(self, test_id):
del self.running_tests[test_id]
class SmVersionRequest:
def __init__(self, server):
self.application = server.application
def process_request(self):
service = request.query["service"]
if not service in self.application.service_mappings:
raise BadRequestException(
"Service '%s' cannot be found in 'service_mappings.json', please update this file" % service
)
service_alias = self.application.service_mappings[service]
if not service_alias in self.application.services:
raise BadRequestException(
"Service '%s' cannot be found in 'services.json', please update this file" % service_alias
)
if not "versionEnv" in self.application.services[service_alias]:
raise BadRequestException(
"'versionEnv' cannot be found for service '%s', please update 'services.json'" % service_alias
)
return {"variable": self.application.services[service_alias]["versionEnv"]}
|
|
from customSearchEngine import CustomSearchEngine
from cryptoConverter import CryptoConverter
from stockInfo import StockInfo
import dataContainers
import globalSettings
import datetime
import random
import logging
_logger = logging.getLogger()
#This class is mainly a way to keep the code clean
#So any functions required purely for command execution go here
#This also will facilitate the execution of pending responses
#which don't naturally have a context in the chat parser anymore
class FunctionExecutor():
def __init__(self, broker):
self.crypto_symbols = []
self._broker = broker
def execute(self, function, request_id, response_id, message, bot, parser, web):
return getattr(self, function)(request_id, response_id, message, bot, parser, web)
async def add(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = None
total = 0
for i in range(1, len(split)):
try:
total += int(split[i])
except ValueError:
result = "I can only add numbers, bub"
break
except Exception:
result = "I don't even know what's going on anymore"
break
if not result:
result = "I know, the answer is {}!".format(str(total))
return (result, True)
async def favorite(self, request_id, response_id, message, bot, parser, web):
requests = bot.db.get_user_requests(message.author.name)
counts = {}
fav_count = 0
fav_list = []
for r in requests:
if r.command_id in counts:
counts[r.command_id] += 1
else:
counts[r.command_id] = 1
if r.command_id in parser.commands \
and parser.commands[r.command_id].text != parser.prefix + 'createCommand' \
and parser.commands[r.command_id].text != parser.prefix + 'deleteCommand' \
and parser.commands[r.command_id].text != parser.prefix + 'deleteResponse':
if counts[r.command_id] == fav_count:
fav_list.append(r.command_id)
elif counts[r.command_id] > fav_count:
fav_list = [r.command_id]
fav_count = counts[r.command_id]
if len(fav_list) > 1:
result = message.author.mention + ", your favorite commands are: {0} ({1} calls each)"
else:
result = message.author.mention + ", your favorite command is: {0} ({1} calls)"
result = result.format(", ".join(parser.commands[cmd_id].text for cmd_id in fav_list), fav_count)
return (result, True)
async def create_command(self, request_id, response_id, message, bot, parser, web):
_logger.info("test")
_logger.info(str(message.content))
split = message.content.split(" ", 2)
result = ""
try:
type_id = parser.get_command_type_id('EQUALS')
# TODO bad hardcoded check...but i'm leaving it for now because *fast*
if len(split[2]) > 256:
raise Exception('Length must be shorter than 256 character')
newCommand = dataContainers.Command([-1, split[1], True, False, True, type_id])
newResponse = dataContainers.Response([-1, split[2], None, None, None, -1])
if newResponse.text.startswith('!tip'):
raise Exception("I'm just a poor :ottoBot: trying to scrape together a living. No need to steal my momocoins")
parser.add_command(newCommand, newResponse)
result = "Added command: " + newCommand.text
except Exception as e:
result = "Failed to add command: " + str(e)
return (result, True)
async def create_delayed_command(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ", 3)
result = "Roger roger"
try:
cmd_id = parser.get_response_by_id(response_id).command_id
resp_id = [x for x in parser.responses[cmd_id] if parser.responses[cmd_id][x].text == split[2]]
if len(resp_id) == 0:
resp = dataContainers.Response([-1, split[2], None, response_id, None, cmd_id])
parser.add_command(parser.commands[cmd_id], resp)
resp_id = [x for x in parser.responses[cmd_id] if parser.responses[cmd_id][x].text == split[2]][0]
else:
resp_id = resp_id[0]
delay = float(split[1])
when = datetime.datetime.now() + datetime.timedelta(seconds=delay)
new_id = bot.db.insert_pending_response(request_id, resp_id, when, message)
result += " - " + str(new_id)
except Exception as e:
result = "Failed to parse delayed response: " + str(e)
return (result, False)
async def delete_pending_response(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = ""
if len(split) < 2:
result = "Please supply a pending response id"
else:
try:
delayed_id = int(split[1])
bot.db.delete_pending_response(delayed_id)
result = "Da-Cheated"
except Exception:
result = "Failed to parse delayed response id"
return (result, True)
async def delete_command(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = "No matching command found"
for c in parser.commands:
if parser.is_match(parser.commands[c], split[1]):
index = 0
if len(split) > 2:
try:
index = int(split[2])
except Exception:
result = split[2] + " is not a valid index"
break
if parser.commands[c].removable:
response = parser.get_response(parser.commands[c].id, index)
if response:
parser.delete_response(response)
result = "Removed command: " + parser.commands[c].text
else:
result = "This command doesn't have that many responses"
break
else:
result = "Command not removable"
break
return (result, True)
async def delete_response(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = "Invalid response"
try:
response_id = int(split[1])
response = parser.get_response_by_id(response_id)
if response:
if parser.commands[response.command_id].removable:
parser.delete_response(response)
result = "Response deleted"
else:
result = "That response is not editable"
else:
result = "Could not find matching response"
except Exception:
result = "Could not parse response id"
return (result, True)
async def get_crawl_link(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = None
if len(split) == 1:
result = "You can't watch no one!"
else:
_logger.info("about to test for existence of crawl user: " + split[1])
exists = await web.doesCrawlUserExist(split[1])
_logger.info("crawl user " + split[1] + " exists: " + str(exists))
if exists:
result = "http://crawl.akrasiac.org:8080/#watch-" + split[1]
else:
result = split[1] + "?? That person doesn't even play crawl!"
return (result, True)
async def get_crawl_dump_link(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = None
if len(split) == 1:
result = "You can't watch no one!"
else:
if await web.doesCrawlUserExist(split[1]):
result = "http://crawl.akrasiac.org/rawdata/{}/{}.txt".format(split[1], split[1])
else:
result = split[1] + "?? That person doesn't even play crawl!"
return (result, True)
async def list_commands(self, request_id, response_id, message, bot, parser, web):
output = ', '.join(parser.commands[cmd].text for cmd in sorted(parser.commands, key=lambda x:parser.commands[x].text) if parser.commands[cmd].text.startswith(parser.prefix))
return (output, True)
async def find_steam_game(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ", 1)
result = ""
if len(split) == 1:
result = "Please specify a game"
else:
cse = CustomSearchEngine(web,
globalSettings.config.get('DEFAULT', 'cse_cx_steam'),
globalSettings.config.get('DEFAULT', 'cse_key'))
response = await cse.search(split[1])
if response.status != 200:
if response.error_message:
result = response.error_message + " "
result += "(Http status: " + str(response.status) + ")"
elif len(response.items) == 0:
result = "Found no responses for query"
else:
result = response.items[0].title + ": " + response.items[0].link
return (result, True)
async def find_xkcd_comic(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ", 1)
result = ""
if len(split) == 1:
result = "Please specify a keyword"
else:
cse = CustomSearchEngine(web,
globalSettings.config.get('DEFAULT', 'cse_cx_xkcd'),
globalSettings.config.get('DEFAULT', 'cse_key'))
response = await cse.search(split[1])
if response.status != 200:
if response.error_message:
result = response.error_message + " "
result += "(Http status: " + str(response.status) + ")"
elif len(response.items) == 0:
result = "Found no responses for query"
else:
result = response.items[0].title + ": " + response.items[0].link
return (result, True)
async def timing_queue(self, request_id, response_id, message, bot, parser, web):
false_start = random.randint(1, 10)
if false_start <= 3:
return (message.author.mention + " TIMING!!!!!!!!!!!!\n\n\nWait no...", False)
minTime = 0
maxTime = 10520000
delay = random.randrange(minTime, maxTime, 1)
when = datetime.datetime.now() + datetime.timedelta(seconds=delay)
next_id = parser.get_response_by_id(response_id).next
bot.db.insert_pending_response(request_id, next_id, when, message)
return ("Want to know the secret to good comedy?", False)
async def timing_pop(self, request_id, response_id, message, bot, parser, web):
return (message.author.mention + " TIMING!!!!!!!!!!!", True)
async def clear_chat(self, request_id, response_id, message, bot, parser, web):
if message.server:
server_id = message.server.id
channel_id = message.channel.id
return (await bot.clear_chat(server_id, channel_id), True)
else:
return ("Couldn't find server id? I don't really support PMs", False)
async def convert_money(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = ""
if len(split) < 4:
result = parser.prefix + "convertHelp"
else:
try:
val = float(split[1])
from_symbol = split[2].upper()
to_symbol = split[3].upper()
crypto = CryptoConverter(web)
if not self.crypto_symbols:
_logger.info('populating symbols in convert_money')
self.crypto_symbols = await crypto.get_symbols()
_logger.info('done populating')
if from_symbol not in self.crypto_symbols:
result = "I do not recognize base type: {}\n(USD not a valid base type)".format(from_symbol)
elif to_symbol not in self.crypto_symbols and to_symbol != 'USD':
result = "I do not recognize target type: {}".format(to_symbol)
if result:
return (result, True)
result = message.author.mention + ", you have "
converted = await crypto.convert(self.crypto_symbols[from_symbol], to_symbol)
if converted:
calculated = val * converted
result += "{:,f}".format(calculated) + " in " + to_symbol
else:
result = "Something went wrong :("
except ValueError as e:
_logger.error('something happened man: {}'.format(e))
result = "Could not parse value to convert. Please use decimal notation"
except Exception as e:
_logger.error("convert_money exception: " + str(e))
result = ":robot: ERROR: " + str(e)
return (result, True)
async def crypto_market_cap(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = "Total market cap: "
coin = None
crypto = CryptoConverter(web)
if len(split) > 1:
coin = split[1].upper()
if not self.crypto_symbols:
self.crypto_symbols = await crypto.get_symbols()
if coin in self.crypto_symbols:
result = coin + " market cap: "
coin = self.crypto_symbols[coin]
else:
return ("Invalid coin name: '%s'" % coin, True)
market_cap = await crypto.market_cap(coin)
if market_cap == 0:
result = "An error occurred getting market cap. Please check the logs"
else:
result += "{:,.2f}".format(market_cap)
return (result, True)
async def broker(self, request_id, response_id, message, bot, parser, web):
return await self._broker.handle_command(request_id, response_id, message, bot, parser, web)
async def stock_data(self, request_id, response_id, message, bot, parser, web):
split = message.content.split(" ")
result = "Stock Data (%s, %s):\n"
symbol = None
symbol_data = None
error_info = None
stock_info = StockInfo(web)
if len(split) > 1:
symbol = split[1].upper()
# default to 'live' timing
timing = 'live'
duration = -1
debug = False
if len(split) > 2:
timing = split[2].lower()
if len(split) > 3:
extra = split[3:]
if 'debug' in extra:
debug = True
for s in extra:
if s.isdigit():
duration = int(s)
break
try:
symbol_data = {}
result = result % (symbol, timing)
if timing == 'live':
symbol_data = await stock_info.live(symbol, debug)
elif timing == 'daily':
symbol_data = await stock_info.daily(symbol, debug)
elif timing == 'duration':
symbol_data = await stock_info.duration(symbol, duration, debug)
elif timing == 'moving_average':
symbol_data = await stock_info.moving_average(symbol, duration, debug)
except Exception as e:
error_info = str(e)
if symbol_data != None:
prefix_len = max([len(x) for x in symbol_data])
# skip the first 3 characters of the key, because they *should* be '#. '
result += '\n'.join(["`" + str(x).ljust(prefix_len) + ": " + str(symbol_data[x]) + "`" for x in symbol_data])
else:
result = "An error occurred getting stock data."
if error_info is not None:
result = error_info
return (result, True)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Low-level locale data access.
:note: The `Locale` class, which uses this module under the hood, provides a
more convenient interface for accessing the locale data.
"""
import os
from babel.compat import pickle, DictMixin, PY3, u, threading
__all__ = ['exists', 'locale_identifiers', 'load']
__docformat__ = 'restructuredtext en'
_cache = {}
_cache_lock = threading.RLock()
_dirname = os.path.join(os.path.dirname(__file__), 'localedata')
def exists(name):
"""Check whether locale data is available for the given locale.
:param name: the locale identifier string
:return: `True` if the locale data exists, `False` otherwise
:rtype: `bool`
"""
if name in _cache:
return True
return os.path.exists(os.path.join(_dirname, '%s.dat' % name))
def locale_identifiers():
"""Return a list of all locale identifiers for which locale data is
available.
:return: a list of locale identifiers (strings)
:rtype: `list`
:since: version 0.8.1
"""
return [stem for stem, extension in [
os.path.splitext(filename) for filename in os.listdir(_dirname)
] if extension == '.dat' and stem != 'root']
def load(name, merge_inherited=True):
"""Load the locale data for the given locale.
The locale data is a dictionary that contains much of the data defined by
the Common Locale Data Repository (CLDR). This data is stored as a
collection of pickle files inside the ``babel`` package.
>>> d = load('en_US')
>>> d['languages']['sv'] == u('Swedish')
True
Note that the results are cached, and subsequent requests for the same
locale return the same dictionary:
>>> d1 = load('en_US')
>>> d2 = load('en_US')
>>> d1 is d2
True
:param name: the locale identifier string (or "root")
:param merge_inherited: whether the inherited data should be merged into
the data of the requested locale
:return: the locale data
:rtype: `dict`
:raise `IOError`: if no locale data file is found for the given locale
identifer, or one of the locales it inherits from
"""
_cache_lock.acquire()
try:
data = _cache.get(name)
if not data:
# Load inherited data
if name == 'root' or not merge_inherited:
data = {}
else:
parts = name.split('_')
if len(parts) == 1:
parent = 'root'
else:
parent = '_'.join(parts[:-1])
data = load(parent).copy()
filename = os.path.join(_dirname, '%s.dat' % name)
fileobj = open(filename, 'rb')
try:
if name != 'root' and merge_inherited:
merge(data, pickle.load(fileobj))
else:
data = pickle.load(fileobj)
_cache[name] = data
finally:
fileobj.close()
return data
finally:
_cache_lock.release()
def merge(dict1, dict2):
"""Merge the data from `dict2` into the `dict1` dictionary, making copies
of nested dictionaries.
>>> d = {1: 'foo', 3: 'baz'}
>>> merge(d, {1: 'Foo', 2: 'Bar'})
>>> items = sorted(d.items()); items
[(1, 'Foo'), (2, 'Bar'), (3, 'baz')]
:param dict1: the dictionary to merge into
:param dict2: the dictionary containing the data that should be merged
"""
for key, val2 in dict2.items():
if val2 is not None:
val1 = dict1.get(key)
if isinstance(val2, dict):
if val1 is None:
val1 = {}
if isinstance(val1, Alias):
val1 = (val1, val2)
elif isinstance(val1, tuple):
alias, others = val1
others = others.copy()
merge(others, val2)
val1 = (alias, others)
else:
val1 = val1.copy()
merge(val1, val2)
else:
val1 = val2
dict1[key] = val1
class Alias(object):
"""Representation of an alias in the locale data.
An alias is a value that refers to some other part of the locale data,
as specified by the `keys`.
"""
def __init__(self, keys):
self.keys = tuple(keys)
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.keys)
def resolve(self, data):
"""Resolve the alias based on the given data.
This is done recursively, so if one alias resolves to a second alias,
that second alias will also be resolved.
:param data: the locale data
:type data: `dict`
"""
base = data
for key in self.keys:
data = data[key]
if isinstance(data, Alias):
data = data.resolve(base)
elif isinstance(data, tuple):
alias, others = data
data = alias.resolve(base)
return data
class LocaleDataDict(DictMixin, dict):
"""Dictionary wrapper that automatically resolves aliases to the actual
values.
"""
def __init__(self, data, base=None):
dict.__init__(self, data)
if PY3:
DictMixin.__init__(self, data)
if base is None:
base = data
self.base = base
def __getitem__(self, key):
orig = val = dict.__getitem__(self, key)
if isinstance(val, Alias): # resolve an alias
val = val.resolve(self.base)
if isinstance(val, tuple): # Merge a partial dict with an alias
alias, others = val
val = alias.resolve(self.base).copy()
merge(val, others)
if type(val) is dict: # Return a nested alias-resolving dict
val = LocaleDataDict(val, base=self.base)
if val is not orig:
self[key] = val
return val
def copy(self):
return LocaleDataDict(dict.copy(self), base=self.base)
|
|
"""
Ops for downsampling images.
Planned:
Pool, DownsampleAvg, DownsampleSoftmax.
"""
from __future__ import absolute_import, print_function, division
# This file should move along with conv.py
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import six.moves.builtins as builtins
import theano
from theano import gof, OpenMPOp, tensor, Variable, Apply
def max_pool_2d_same_size(input, patch_size):
"""
Takes as input a 4-D tensor. It sets all non maximum values
of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero,
keeping only the maximum values. The output has the same dimensions as
the input.
Parameters
----------
input : 4-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
patch_size : tuple of length 2
Size of the patch (patch height, patch width).
(2,2) will retain only one non-zero value per patch of 4 values.
"""
output = Pool(patch_size, True)(input)
outs = MaxPoolGrad(patch_size, True)(input, output, output)
return outs
def pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
mode='max'):
"""Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1])
Parameters
----------
input : N-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
ds : tuple of length 2
Factor by which to downscale (vertical ds, horizontal ds).
(2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5) input with ds=(2,2) will generate a (2,2) output.
(3,3) otherwise.
st : tuple of two ints
Stride size, which is the number of shifts over rows/cols to get the
next pool region. If st is None, it is considered equal to ds
(no overlap on pooling regions).
padding : tuple of two ints
(pad_h, pad_w), pad zeros to extend beyond four borders of the
images, pad_h is the size of the top and bottom margins, and
pad_w is the size of the left and right margins.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
"""
if input.ndim < 2:
raise NotImplementedError('pool_2d requires a dimension >= 2')
if ignore_border is None:
warnings.warn(
"pool_2d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ds == st and padding == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
if input.ndim == 4:
op = Pool(ds, ignore_border, st=st, padding=padding,
mode=mode)
output = op(input)
return output
# extract image dimensions
img_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size, 1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1]),
img_shape), 'int64')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of images
op = Pool(ds, ignore_border, st=st, padding=padding,
mode=mode)
output = op(input_4D)
# restore to original shape
outshp = tensor.join(0, input.shape[:-2], output.shape[-2:])
return tensor.reshape(output, outshp, ndim=input.ndim)
class Pool(OpenMPOp):
"""
For N-dimensional tensors, consider that the last two dimensions span
images. This Op downsamples these images by taking the max, sum or average
over different patch.
The constructor takes the max, sum or average or different input patches.
Parameters
----------
ds : list or tuple of two ints
Downsample factor over rows and column.
ds indicates the pool region size.
ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col
of partial downsampling (False) or ignore it (True).
st : list or tuple of two ints or None
Stride size, which is the number of shifts over rows/cols to get the
next pool region. If st is None, it is considered equal to ds
(no overlap on pooling regions).
padding: tuple of two ints
(pad_h, pad_w), pad zeros to extend beyond four borders of the images,
pad_h is the size of the top and bottom margins, and pad_w is the size
of the left and right margins.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it)
"""
__props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode')
@staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=(0, 0)):
"""
Return the shape of the output from this op, for input of given
shape and flags.
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
The shape of a tensor of images. The last two elements are
interpreted as the number of rows, and the number of cols.
ds : list or tuple of two ints
Downsample factor over rows and columns this parameter indicates
the size of the pooling region.
st : list or tuple of two ints
The stride size. This is the distance between the pooling regions.
If it's set to None, it equals ds.
ignore_border : bool
If ds doesn't divide imgshape, do we include an extra row/col of
partial downsampling (False) or ignore it (True).
padding : tuple of two ints
(pad_h, pad_w), pad zeros to extend beyond four borders
of the images, pad_h is the size of the top and bottom margins,
and pad_w is the size of the left and right margins.
Returns
-------
list
The shape of the output from this op, for input of given shape.
This will have the same length as imgshape, but with last two
elements reduced as per the downsampling & ignore_border flags.
"""
if len(imgshape) < 2:
raise TypeError('imgshape must have at least two elements '
'(rows, cols)')
if st is None:
st = ds
r, c = imgshape[-2:]
r = tensor.extract_constant(r)
c = tensor.extract_constant(c)
if padding[0]:
r += padding[0] * 2
if padding[1]:
c += padding[1] * 2
if ignore_border:
if ds[0] == st[0]:
nr = r // st[0]
else:
out_r = (r - ds[0]) // st[0] + 1
if isinstance(r, theano.Variable):
nr = tensor.maximum(out_r, 0)
else:
nr = numpy.maximum(out_r, 0)
if ds[1] == st[1]:
nc = c // st[1]
else:
out_c = (c - ds[1]) // st[1] + 1
if isinstance(c, theano.Variable):
nc = tensor.maximum(out_c, 0)
else:
nc = numpy.maximum(out_c, 0)
else:
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0]) //
st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
nr = max(0, (r - 1 - ds[0] + st[0]) // st[0]) + 1
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1]) //
st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
nc = max(0, (c - 1 - ds[1] + st[1]) // st[1]) + 1
rval = list(imgshape[:-2]) + [nr, nc]
return rval
def __init__(self, ds, ignore_border=False, st=None, padding=(0, 0),
mode='max', openmp=None):
super(Pool, self).__init__(openmp=openmp)
self.ds = tuple(ds)
if not all([isinstance(d, integer_types) for d in ds]):
raise ValueError(
"Pool downsample parameters must be ints."
" Got %s" % str(ds))
if st is None:
st = ds
assert isinstance(st, (tuple, list))
self.st = tuple(st)
self.ignore_border = ignore_border
self.padding = tuple(padding)
if self.padding != (0, 0) and not ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if self.padding[0] >= self.ds[0] or self.padding[1] >= self.ds[1]:
raise NotImplementedError(
'padding_h and padding_w must be smaller than strides')
if mode not in ['max', 'average_inc_pad', 'average_exc_pad', 'sum']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
def make_node(self, x):
# TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x)
if x.type.ndim != 4:
raise TypeError()
# If the input shape are broadcastable we can have 0 in the output shape
broad = x.broadcastable[:2] + (False, False)
out = tensor.TensorType(x.dtype, broad)
return gof.Apply(self, [x], [out()])
def perform(self, node, inp, out):
x, = inp
z, = out
if len(x.shape) != 4:
raise NotImplementedError(
'Pool requires 4D input for now')
z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st,
self.padding)
if not self.ignore_border:
assert z_shape[2] > 0
assert z_shape[3] > 0
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = numpy.empty(z_shape, dtype=x.dtype)
zz = z[0]
# number of pooling output rows
pr = zz.shape[-2]
# number of pooling output cols
pc = zz.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
pad_h = self.padding[0]
pad_w = self.padding[1]
img_rows = x.shape[-2] + 2 * pad_h
img_cols = x.shape[-1] + 2 * pad_w
inc_pad = self.mode == 'average_inc_pad'
# pad the image
if self.padding != (0, 0):
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x
else:
y = x
func = numpy.max
if self.mode == 'sum':
func = numpy.sum
elif self.mode != 'max':
func = numpy.average
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
row_st = r * st0
row_end = builtins.min(row_st + ds0, img_rows)
if not inc_pad:
row_st = builtins.max(row_st, self.padding[0])
row_end = builtins.min(row_end, x.shape[-2] + pad_h)
for c in xrange(pc):
col_st = c * st1
col_end = builtins.min(col_st + ds1, img_cols)
if not inc_pad:
col_st = builtins.max(col_st, self.padding[1])
col_end = builtins.min(col_end,
x.shape[-1] + pad_w)
zz[n, k, r, c] = func(y[
n, k, row_st:row_end, col_st:col_end])
def infer_shape(self, node, in_shapes):
shp = self.out_shape(in_shapes[0], self.ds,
self.ignore_border, self.st, self.padding)
return [shp]
def grad(self, inp, grads):
x, = inp
gz, = grads
if self.mode == 'max':
maxout = self(x)
return [MaxPoolGrad(self.ds,
ignore_border=self.ignore_border,
st=self.st, padding=self.padding)(
x, maxout, gz)]
else:
return [AveragePoolGrad(self.ds,
ignore_border=self.ignore_border,
st=self.st, padding=self.padding,
mode=self.mode)(
x, gz)]
def c_headers(self):
headers = ['<algorithm>']
headers += super(Pool, self).c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
if self.mode not in ('max', 'sum', 'average_exc_pad', 'average_inc_pad'):
raise theano.gof.utils.MethodNotDefined()
x, = inp
z, = out
fail = sub['fail']
ignore_border = int(self.ignore_border)
ds0, ds1 = self.ds
st0, st1 = self.st
pd0, pd1 = self.padding
if self.openmp:
omp_parallel = '#pragma omp parallel for private(r_st, r_end, c_st, c_end, collector) schedule(static)'
else:
omp_parallel = ''
ccode = """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int z_r, z_c; // shape of the output
int r, c; // shape of the padded_input
if(PyArray_NDIM(%(x)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "x must be a 4d ndarray");
%(fail)s;
}
r = PyArray_DIMS(%(x)s)[2];
c = PyArray_DIMS(%(x)s)[3];
r += %(pd0)s * 2;
c += %(pd1)s * 2;
if (%(pd0)s != 0 && %(pd1)s != 0 && !%(ignore_border)s)
{
PyErr_SetString(PyExc_ValueError,
"padding must be (0,0) when ignore border is False");
%(fail)s;
}
if (%(ignore_border)s)
{
// '/' in C is different from '/' in python
if (r - %(ds0)s < 0)
{
z_r = 0;
}
else
{
z_r = (r - %(ds0)s) / %(st0)s + 1;
}
if (c - %(ds1)s < 0)
{
z_c = 0;
}
else
{
z_c = (c - %(ds1)s) / %(st1)s + 1;
}
}
else
{
// decide how many rows the output has
if (%(st0)s >= %(ds0)s)
{
z_r = (r - 1) / %(st0)s + 1;
}
else
{
z_r = std::max(0, (r - 1 - %(ds0)s + %(st0)s) / %(st0)s) + 1;
}
// decide how many columns the output has
if (%(st1)s >= %(ds1)s)
{
z_c = (c - 1) / %(st1)s + 1;
}
else
{
z_c = std::max(0, (c - 1 - %(ds1)s + %(st0)s) / %(st1)s) + 1;
}
assert(z_r > 0);
assert(z_c > 0);
}
// memory allocation of z if necessary
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(x)s)[0])
||(PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(x)s)[1])
||(PyArray_DIMS(%(z)s)[2] != z_r)
||(PyArray_DIMS(%(z)s)[3] != z_c)
)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[4] = {0,0,0,0};
dims[0]=PyArray_DIMS(%(x)s)[0];
dims[1]=PyArray_DIMS(%(x)s)[1];
dims[2]=z_r;
dims[3]=z_c;
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}
// used for indexing a pool region inside the input
dtype_%(x)s collector; // temp var for the value in a region
if (z_r && z_c)
{
int r_st, r_end, c_st, c_end;
%(omp_parallel)s
for(int t = 0; t < PyArray_DIMS(%(x)s)[0] * PyArray_DIMS(%(x)s)[1]; t++){
int b = t %% PyArray_DIMS(%(x)s)[0];
int k = t / PyArray_DIMS(%(x)s)[0];
for(int i=0; i < z_r; i++){
r_st = i * %(st0)s;
r_end = r_st + %(ds0)s;
// skip the padding
r_st = r_st < %(pd0)s ? %(pd0)s : r_st;
r_end = r_end > (r - %(pd0)s) ? r - %(pd0)s : r_end;
// from padded_img space to img space
r_st -= %(pd0)s;
r_end -= %(pd0)s;
// handle the case where no padding, ignore border is True
if (%(ignore_border)s)
{
r_end = r_end > r ? r : r_end;
}
for(int j=0; j<z_c; j++){
c_st = j * %(st1)s;
c_end = c_st + %(ds1)s;
// skip the padding
c_st = c_st < %(pd1)s ? %(pd1)s : c_st;
c_end = c_end > (c - %(pd1)s) ? c - %(pd1)s : c_end;
dtype_%(z)s * z = (
(dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, b, k, i, j)));
// change coordinates from padding_img space into img space
c_st -= %(pd1)s;
c_end -= %(pd1)s;
// handle the case where no padding, ignore border is True
if (%(ignore_border)s)
{
c_end = c_end > c ? c : c_end;
}
"""
if self.mode == 'max':
ccode += """
// use the first element as the initial value of collector
collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,r_st,c_st)))[0];
// go through the pooled region in the unpadded input
for(int m=r_st; m<r_end; m++)
{
for(int n=c_st; n<c_end; n++)
{
dtype_%(x)s a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,m,n)))[0];
collector = (a > collector) ? a : collector;
}
}
z[0] = collector;
"""
elif self.mode in ('sum', 'average_exc_pad', 'average_inc_pad'):
ccode += """
// initialize the sum at zero
collector = ((dtype_%(x)s)(0));
// go through the pooled region in the unpadded input
for(int m=r_st; m<r_end; m++)
{
for(int n=c_st; n<c_end; n++)
{
dtype_%(x)s a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,m,n)))[0];
collector += a;
}
}
"""
if self.mode == "sum":
ccode += """
z[0] = collector;
"""
elif self.mode == 'average_inc_pad' and self.ignore_border:
ccode += """
z[0] = collector / (%(ds0)s * %(ds1)s);
"""
else:
ccode += """
z[0] = collector / ((r_end-r_st)*(c_end-c_st));
"""
ccode += """
}
}
}
}
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 6, 8, 4, self.openmp)
class PoolGrad(OpenMPOp):
__props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode')
@staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None, padding=(0, 0)):
"""Return the shape of the output from this op, for input of given
shape and flags.
Parameters
----------
imgshape : tuple of integers or scalar Theano variables
the shape of a tensor of images. The last two elements are
interpreted as the number of rows, and the number of cols.
ds : tuple of two ints
downsample factor over rows and columns this parameter
indicates the size of the pooling region
st : tuple of two ints
the stride size. This is the distance between the pooling
regions. If it's set to None, in which case it equlas ds.
ignore_border : bool
if ds doesn't divide imgshape, do we include an extra
row/col of partial downsampling (False) or ignore it
(True).
padding : tuple of two ints
(pad_h, pad_w), pad zeros to extend beyond four borders of
the images, pad_h is the size of the top and bottom
margins, and pad_w is the size of the left and right
margins.
Returns
-------
list :
the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but
with last two elements reduced as per the downsampling &
ignore_border flags.
"""
if len(imgshape) < 2:
raise TypeError('imgshape must have at least two elements '
'(rows, cols)')
if st is None:
st = ds
r, c = imgshape[-2:]
r += padding[0] * 2
c += padding[1] * 2
if ignore_border:
out_r = (r - ds[0]) // st[0] + 1
out_c = (c - ds[1]) // st[1] + 1
if isinstance(r, theano.Variable):
nr = tensor.maximum(out_r, 0)
else:
nr = numpy.maximum(out_r, 0)
if isinstance(c, theano.Variable):
nc = tensor.maximum(out_c, 0)
else:
nc = numpy.maximum(out_c, 0)
else:
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0]) //
st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
nr = max(0, (r - 1 - ds[0]) // st[0] + 1) + 1
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1]) //
st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
nc = max(0, (c - 1 - ds[1]) // st[1] + 1) + 1
rval = list(imgshape[:-2]) + [nr, nc]
return rval
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max', openmp=None):
self.ds = tuple(ds)
self.ignore_border = ignore_border
if st is None:
st = ds
self.st = tuple(st)
self.padding = tuple(padding)
if mode not in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
super(PoolGrad, self).__init__(openmp=openmp)
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
class MaxPoolGrad(PoolGrad):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), openmp=None):
PoolGrad.__init__(self, ds, ignore_border, st, padding, 'max', openmp)
def make_node(self, x, maxout, gz):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
assert isinstance(x, Variable) and x.ndim == 4
assert isinstance(maxout, Variable) and maxout.ndim == 4
assert isinstance(gz, Variable) and gz.ndim == 4
return Apply(self, [x, maxout, gz], [x.type()])
def perform(self, node, inp, out):
assert self.mode == 'max'
x, maxout, gz = inp
gx_stg, = out
# number of pooling output rows
pr = maxout.shape[-2]
# number of pooling output cols
pc = maxout.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
pad_h = self.padding[0]
pad_w = self.padding[1]
img_rows = x.shape[-2] + 2 * pad_h
img_cols = x.shape[-1] + 2 * pad_w
# pad the image
if self.padding != (0, 0):
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x
else:
y = x
gx = numpy.zeros_like(y)
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
row_st = builtins.max(r * st0, self.padding[0])
row_end = builtins.min(row_st + ds0, img_rows)
for c in xrange(pc):
col_st = builtins.max(c * st1, self.padding[1])
col_end = builtins.min(col_st + ds1, img_cols)
for row_ind in xrange(row_st, row_end):
for col_ind in xrange(col_st, col_end):
if (maxout[n, k, r, c] == y[n, k, row_ind, col_ind]):
gx[n, k, row_ind, col_ind] += gz[n, k, r, c]
# unpad the image
gx = gx[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)]
gx_stg[0] = gx
def grad(self, inp, grads):
x, maxout, gz = inp
ggx, = grads
return [theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
DownsampleFactorMaxGradGrad(
self.ds, ignore_border=self.ignore_border,
st=self.st, padding=self.padding)(x, maxout, ggx)]
def c_code(self, node, name, inp, out, sub):
assert self.mode == 'max'
x, z, gz = inp
gx, = out
fail = sub['fail']
ignore_border = int(self.ignore_border)
ds0, ds1 = self.ds
st0, st1 = self.st
pd0, pd1 = self.padding
if self.openmp:
omp_parallel = '#pragma omp parallel for private(r_st, r_end, c_st, c_end, maximum) schedule(static)'
else:
omp_parallel = ''
return """
// sanity checks
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int z_typenum = PyArray_ObjectType((PyObject*)%(z)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
if ((x_typenum != z_typenum) || (x_typenum != gz_typenum))
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "x must be a 4d ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(z)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "z must be a 4d ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "gz must be a 4d ndarray");
%(fail)s;
}
int z_r, z_c;
z_r = PyArray_DIMS(%(z)s)[2];
z_c = PyArray_DIMS(%(z)s)[3];
int r, c; // shape of the padded_input
r = PyArray_DIMS(%(x)s)[2];
c = PyArray_DIMS(%(x)s)[3];
r += %(pd0)s * 2;
c += %(pd1)s * 2;
// allocating memory for gx
if ((!%(gx)s)
|| !PyArray_ISCONTIGUOUS(%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=4
||(PyArray_DIMS(%(gx)s)[0] != PyArray_DIMS(%(x)s)[0])
||(PyArray_DIMS(%(gx)s)[1] != PyArray_DIMS(%(x)s)[1])
||(PyArray_DIMS(%(gx)s)[2] != PyArray_DIMS(%(x)s)[2])
||(PyArray_DIMS(%(gx)s)[3] != PyArray_DIMS(%(x)s)[3])
)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(4, PyArray_DIMS(%(x)s), x_typenum,0);
}
else {
PyArray_FILLWBYTE(%(gx)s, 0);
}
dtype_%(z)s maximum; // temp var for maximum value in a region
if (z_r && z_c)
{
int r_st, r_end, c_st, c_end;
%(omp_parallel)s
for(int t = 0; t < PyArray_DIMS(%(x)s)[0] * PyArray_DIMS(%(x)s)[1]; t++){
int b = t %% PyArray_DIMS(%(x)s)[0];
int k = t / PyArray_DIMS(%(x)s)[0];
for(int i=0; i < z_r; i++){
r_st = i * %(st0)s;
r_end = r_st + %(ds0)s;
// skip the padding
r_st = r_st < %(pd0)s ? %(pd0)s : r_st;
r_end = r_end > (r - %(pd0)s) ? r - %(pd0)s : r_end;
// from padded_img space to img space
r_st -= %(pd0)s;
r_end -= %(pd0)s;
for(int j=0; j<z_c; j++){
c_st = j * %(st1)s;
c_end = c_st + %(ds1)s;
// skip the padding
c_st = c_st < %(pd1)s ? %(pd1)s : c_st;
c_end = c_end > (c - %(pd1)s) ? c - %(pd1)s : c_end;
// change coordinates from padding_img space into img space
c_st -= %(pd1)s;
c_end -= %(pd1)s;
// the maximum value
maximum = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,b,k,i,j)))[0];
// the gradient corresponding to this maximum value in z
dtype_%(gz)s * gz = (
(dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, b, k, i, j)));
// go through the pooled region in the unpadded input
for(int m=r_st; m<r_end; m++)
{
for(int n=c_st; n<c_end; n++)
{
dtype_%(x)s a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,m,n)))[0];
dtype_%(gx)s * gx = (
(dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, b, k, m, n)));
if (a == maximum){
gx[0] = gx[0] + gz[0];
}
}
}
}
}
}
}
""" % locals()
def c_code_cache_version(self):
return (0, 7, self.openmp)
class AveragePoolGrad(PoolGrad):
def __init__(self, ds, ignore_border, st=None, padding=(0, 0),
mode='average_inc_pad'):
assert mode in ['sum', 'average_inc_pad', 'average_exc_pad']
PoolGrad.__init__(self, ds, ignore_border, st, padding, mode)
# There is an extra dummy parameter to match the parameter count
# of MaxPoolGrad. They have to keep the same interface because of
# the DownsampleFactorMaxGrad trick to keep old scripts working
# (see downsample.py for details on this).
def make_node(self, x, gz, dummy=None):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
gz = tensor.as_tensor_variable(gz)
assert isinstance(x, Variable) and x.ndim == 4
assert isinstance(gz, Variable) and gz.ndim == 4
return Apply(self, [x, gz], [x.type()])
def perform(self, node, inp, out):
if self.mode == 'average_exc_pad' and self.padding != (0, 0):
raise NotImplementedError()
x, gz = inp
gx_stg, = out
z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st,
self.padding)
if (gx_stg[0] is None) or (gx_stg[0].shape != z_shape):
gx_stg[0] = numpy.empty(z_shape, dtype=x.dtype)
zz = gx_stg[0]
# number of pooling output rows
pr = zz.shape[-2]
# number of pooling output cols
pc = zz.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
pad_h = self.padding[0]
pad_w = self.padding[1]
img_rows = x.shape[-2] + 2 * pad_h
img_cols = x.shape[-1] + 2 * pad_w
inc_pad = self.mode == 'average_inc_pad'
sum_mode = self.mode == 'sum'
# pad the image
if self.padding != (0, 0):
y = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x
else:
y = x
gx = numpy.zeros_like(y)
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
if sum_mode or inc_pad:
row_st = r * st0
else:
row_st = builtins.max(r * st0, self.padding[0])
row_end = builtins.min(row_st + ds0, img_rows)
for c in xrange(pc):
if sum_mode or inc_pad:
col_st = c * st1
else:
col_st = builtins.max(c * st1,
self.padding[1])
col_end = builtins.min(col_st + ds1, img_cols)
if sum_mode:
val = gz[n, k, r, c]
else:
val = gz[n, k, r, c] / ((row_end - row_st) *
(col_end - col_st))
gx[n, k, row_st:row_end, col_st:col_end] += val
# unpad the image
gx = gx[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)]
gx_stg[0] = gx
def grad(self, inp, grads):
x, gz = inp
ggx, = grads
return [theano.tensor.zeros_like(x),
Pool(self.ds, ignore_border=self.ignore_border,
st=self.st, padding=self.padding, mode=self.mode)(ggx)]
class DownsampleFactorMaxGradGrad(OpenMPOp):
__props__ = ('ds', 'ignore_border', 'st', 'padding', 'mode')
def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max', openmp=None):
self.ds = tuple(ds)
if not all([isinstance(d, integer_types) for d in ds]):
raise ValueError(
"Pool downsample parameters must be ints."
" Got %s" % str(ds))
if st is None:
st = ds
assert isinstance(st, (tuple, list))
self.st = tuple(st)
self.ignore_border = ignore_border
self.padding = tuple(padding)
if self.padding != (0, 0) and not ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if self.padding[0] >= self.ds[0] or self.padding[1] >= self.ds[1]:
raise NotImplementedError(
'padding_h and padding_w must be smaller than strides')
self.mode = mode
super(DownsampleFactorMaxGradGrad, self).__init__(openmp=openmp)
assert self.mode == 'max'
def make_node(self, x, maxout, gz):
# make_node should only be called by the grad function of
# MaxPoolGrad, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
assert x.ndim == 4
assert maxout.ndim == 4
assert gz.ndim == 4
return Apply(self, [x, maxout, gz], [x.type()])
def perform(self, node, inp, out):
x, maxout, ggx = inp
z, = out
if len(x.shape) != 4:
raise NotImplementedError(
'DownsampleFactorMaxGradGrad requires 4D input for now')
if (z[0] is None) or (z[0].shape != maxout.shape):
z[0] = numpy.zeros(maxout.shape, dtype=x.dtype)
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
# number of pooling output rows
pr = ggz.shape[-2]
# number of pooling output cols
pc = ggz.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
pd0, pd1 = self.padding
img_rows = x.shape[-2] + 2 * pd0
img_cols = x.shape[-1] + 2 * pd1
# pad the image and its gradients
if self.padding != (0, 0):
y_padded = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype) + x.min() - 1
y_padded[:, :, pd0:(img_rows - pd0), pd1:(img_cols - pd1)] = x
ggx_padded = numpy.zeros(
(x.shape[0], x.shape[1], img_rows, img_cols),
dtype=x.dtype)
ggx_padded[:, :, pd0:(img_rows - pd0), pd1:(img_cols - pd1)] = ggx
else:
y_padded = x
ggx_padded = ggx
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
row_st = r * st0
row_end = builtins.min(row_st + ds0, img_rows)
for c in xrange(pc):
col_st = c * st1
col_end = builtins.min(col_st + ds1, img_cols)
for row_ind in xrange(row_st, row_end):
for col_ind in xrange(col_st, col_end):
if (maxout[n, k, r, c] == y_padded[n, k, row_ind, col_ind]):
ggz[n, k, r, c] = ggx_padded[n, k, row_ind, col_ind]
def infer_shape(self, node, in_shapes):
return [in_shapes[1]]
def c_code(self, node, name, inp, out, sub):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, maxout, ggx = inp
z, = out # the grad of grad
fail = sub['fail']
ignore_border = int(self.ignore_border)
ds0, ds1 = self.ds
st0, st1 = self.st
pd0, pd1 = self.padding
if self.openmp:
omp_parallel = '#pragma omp parallel for private(r_st, r_end, c_st, c_end, maximum) schedule(static)'
else:
omp_parallel = ''
return """
int z_typenum = PyArray_ObjectType((PyObject*)%(maxout)s, 0);
int z_r, z_c;
z_r = PyArray_DIMS(%(maxout)s)[2];
z_c = PyArray_DIMS(%(maxout)s)[3];
int r, c; // shape of the padded_input
r = PyArray_DIMS(%(x)s)[2];
c = PyArray_DIMS(%(x)s)[3];
r += %(pd0)s * 2;
c += %(pd1)s * 2;
// allocating memory for output
if ((!%(z)s)
|| !PyArray_ISCONTIGUOUS(%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(maxout)s)[0])
||(PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(maxout)s)[1])
||(PyArray_DIMS(%(z)s)[2] != PyArray_DIMS(%(maxout)s)[2])
||(PyArray_DIMS(%(z)s)[3] != PyArray_DIMS(%(maxout)s)[3])
)
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, PyArray_DIMS(%(maxout)s), z_typenum,0);
}
else {
PyArray_FILLWBYTE(%(z)s, 0);
}
dtype_%(maxout)s maximum; // temp var for maximum value in a region
int r_st, r_end, c_st, c_end;
%(omp_parallel)s
for(int t = 0; t < PyArray_DIMS(%(x)s)[0] * PyArray_DIMS(%(x)s)[1]; t++){
int b = t %% PyArray_DIMS(%(x)s)[0];
int k = t / PyArray_DIMS(%(x)s)[0];
for(int i=0; i < z_r; i++){
r_st = i * %(st0)s;
r_end = r_st + %(ds0)s;
// skip the padding
r_st = r_st < %(pd0)s ? %(pd0)s : r_st;
r_end = r_end > (r - %(pd0)s) ? r - %(pd0)s : r_end;
// from padded_img space to img space
r_st -= %(pd0)s;
r_end -= %(pd0)s;
for(int j=0; j<z_c; j++){
c_st = j * %(st1)s;
c_end = c_st + %(ds1)s;
// skip the padding
c_st = c_st < %(pd1)s ? %(pd1)s : c_st;
c_end = c_end > (c - %(pd1)s) ? c - %(pd1)s : c_end;
// from padding_img space into img space
c_st -= %(pd1)s;
c_end -= %(pd1)s;
// the maximum value
maximum = ((dtype_%(maxout)s*)(PyArray_GETPTR4(%(maxout)s,b,k,i,j)))[0];
// z at this position
dtype_%(z)s * z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, b, k, i, j)));
// go through the pooled region in the unpadded input
for(int m=r_st; m<r_end; m++)
{
for(int n=c_st; n<c_end; n++)
{
dtype_%(x)s a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,m,n)))[0];
dtype_%(ggx)s * ggx = (
(dtype_%(ggx)s*)(PyArray_GETPTR4(%(ggx)s, b, k, m, n)));
if (a == maximum){
z[0] += ggx[0];
}
}
}
}
}
}
""" % locals()
def c_code_cache_version(self):
return (0, 1, self.openmp)
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from enable.component_editor import ComponentEditor
from pyface.action.menu_manager import MenuManager
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traits.api import Int, Property, Instance
from traitsui.api import (
View,
UItem,
Item,
VGroup,
TabularEditor,
HGroup,
spring,
EnumEditor,
Tabbed,
Handler,
CheckListEditor,
)
from traitsui.menu import Action
from traitsui.tabular_adapter import TabularAdapter
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.configurable_tabular_adapter import ConfigurableMixin
from pychron.core.helpers.traitsui_shortcuts import okcancel_view, VFold
from pychron.core.ui.qt.tabular_editors import FilterTabularEditor
from pychron.core.ui.table_configurer import TableConfigurer, TableConfigurerHandler
from pychron.envisage.icon_button_editor import icon_button_editor
class PositionsAdapter(TabularAdapter, ConfigurableMixin):
columns = [
("Identifier", "identifier"),
("Irradiation", "irradiation_str"),
("Sample", "sample"),
("Material", "material"),
("Position", "position"),
("Weight", "weight"),
("N. Xtals", "nxtals"),
("Note", "note"),
]
all_columns = [
("Identifier", "identifier"),
("Packet", "packet"),
("Irradiation", "irradiation_str"),
("Sample", "sample"),
("Material", "material"),
("Position", "position"),
("Weight", "weight"),
("N. Xtals", "nxtals"),
("Note", "note"),
]
font = "arial 12"
def get_menu(self, obj, trait, row, column):
actions = [
Action(name="Configure", action="configure_position_table"),
]
mm = MenuManager(*actions)
return mm
class GroupedPositionsAdapter(TabularAdapter, ConfigurableMixin):
columns = [
("Identifier", "identifier"),
("Irradiation", "irradiation_str"),
("Sample", "sample"),
("Material", "material"),
("Positions", "position_str"),
]
all_columns = [
("Identifier", "identifier"),
("Packet", "packet"),
("Irradiation", "irradiation_str"),
("Sample", "sample"),
("Material", "material"),
("Positions", "position_str"),
]
font = "arial 12"
identifier_width = Int(80)
irradiation_str_width = Int(80)
sample_width = Int(80)
position_str_width = Int(80)
def get_menu(self, obj, trait, row, column):
actions = [
Action(name="Configure", action="configure_grouped_position_table"),
]
mm = MenuManager(*actions)
return mm
def get_bg_color(self, obj, trait, row, column=0):
item = getattr(obj, trait)[row]
c = item.color
if hasattr(c, "__iter__"):
c = [x * 255 for x in c]
return c
def get_text_color(self, obj, trait, row, column=0):
item = getattr(obj, trait)[row]
color = "black"
if hasattr(item.color, "__iter__"):
if sum(item.color[:3]) < 1.5:
color = "white"
return color
class BaseLoadPane(TraitsDockPane):
display_load_name = Property(depends_on="model.load_name")
# display_tray_name = Property(depends_on='model.tray')
def _get_display_load_name(self):
if self.model.load_instance:
ret = '<font size=12 color="blue"><b>{} ({}) {}</b></font>'.format(
self.model.load_instance.name,
self.model.tray,
self.model.load_instance.create_date,
)
else:
ret = ""
return ret
class PositionTableConfigurer(TableConfigurer):
id = "position_table"
def traits_view(self):
v = VGroup(
UItem(
"columns",
style="custom",
editor=CheckListEditor(name="available_columns", cols=3),
),
Item("font", enabled_when="fontsize_enabled"),
)
return okcancel_view(
v,
# kind='modal',
title="Configure Position Table",
handler=TableConfigurerHandler(),
)
class GroupedPositionTableConfigurer(TableConfigurer):
id = "grouped_position_table"
def traits_view(self):
v = VGroup(
UItem(
"columns",
style="custom",
editor=CheckListEditor(name="available_columns", cols=3),
),
Item("font", enabled_when="fontsize_enabled"),
)
return okcancel_view(
v,
# kind='modal',
title="Configure Grouped Position Table",
handler=TableConfigurerHandler(),
)
class LoadTableHandler(Handler):
def configure_position_table(self, info, obj):
pane = info.ui.context["pane"]
tb = pane.position_configurer
tb.edit_traits()
def configure_grouped_position_table(self, info, obj):
pane = info.ui.context["pane"]
tb = pane.grouped_position_configurer
tb.edit_traits()
class LoadTablePane(BaseLoadPane):
name = "Positions"
id = "pychron.loading.positions"
position_configurer = Instance(PositionTableConfigurer)
grouped_position_configurer = Instance(GroupedPositionTableConfigurer)
position_adapter = Instance(PositionsAdapter)
grouped_position_adapter = Instance(GroupedPositionsAdapter)
def __init__(self, *args, **kw):
super(LoadTablePane, self).__init__(*args, **kw)
self.position_configurer.load()
self.grouped_position_configurer.load()
def _position_configurer_default(self):
c = PositionTableConfigurer()
c.set_adapter(self.position_adapter)
return c
def _grouped_position_configurer_default(self):
c = GroupedPositionTableConfigurer()
c.set_adapter(self.grouped_position_adapter)
return c
def _position_adapter_default(self):
return PositionsAdapter()
def _grouped_position_adapter_default(self):
return GroupedPositionsAdapter()
def traits_view(self):
a = HGroup(spring, UItem("pane.display_load_name", style="readonly"), spring)
b = UItem(
"positions",
editor=TabularEditor(adapter=self.position_adapter, multi_select=True),
)
c = UItem(
"grouped_positions",
label="Grouped Positions",
editor=TabularEditor(adapter=self.grouped_position_adapter),
)
v = View(VGroup(spring, a, Tabbed(b, c)), handler=LoadTableHandler())
return v
class LoadInstanceAdapter(TabularAdapter):
columns = [("Load", "name"), ("Create Date", "create_date")]
font = "modern 10"
class LoadPane(TraitsTaskPane):
def traits_view(self):
v = View(VGroup(UItem("canvas", style="custom", editor=ComponentEditor())))
return v
class LoadDockPane(BaseLoadPane):
name = "Load"
id = "pychron.loading.load"
def traits_view(self):
a = HGroup(
Item("pane.display_load_name", style="readonly", label="Load"), spring
)
b = UItem("canvas", style="custom", editor=ComponentEditor())
v = View(VGroup(a, b))
return v
class LoadControlPane(TraitsDockPane):
name = "Load"
id = "pychron.loading.controls"
def traits_view(self):
notegrp = VGroup(
Item(
"retain_note", tooltip="Retain the Note for the next hole", label="Lock"
),
Item("note", style="custom", show_label=False),
show_border=True,
label="Note",
)
viewgrp = VGroup(
HGroup(
Item("use_cmap", label="Color Map"),
UItem("cmap_name", enabled_when="use_cmap"),
),
HGroup(
Item("show_hole_numbers"), Item("show_identifiers", label="Identifiers")
),
HGroup(Item("show_weights"), Item("show_nxtals", label="N. Xtals")),
Item("show_samples"),
# Item('show_spans'),
show_border=True,
label="View",
)
load_grp = VGroup(
HGroup(
Item(
"username",
label="User",
editor=EnumEditor(name="available_user_names"),
),
icon_button_editor("add_button", "add", tooltip="Add a load"),
icon_button_editor(
"delete_button", "delete", tooltip="Delete selected load"
),
icon_button_editor(
"archive_button",
"application-x-archive",
tooltip="Archive a set of loads",
),
icon_button_editor(
"unarchive_button",
"application-x-archive",
tooltip="Unarchive a set of loads",
),
),
UItem(
"loads",
editor=FilterTabularEditor(
adapter=LoadInstanceAdapter(),
use_fuzzy=True,
editable=False,
multi_select=True,
selected="selected_instances",
stretch_last_section=False,
),
height=250,
),
label="Load",
show_border=True,
)
samplegrp = VGroup(
HGroup(
UItem("irradiation", editor=EnumEditor(name="irradiations")),
UItem("level", editor=EnumEditor(name="levels")),
UItem("identifier", editor=EnumEditor(name="identifiers")),
),
Item("sample_info", style="readonly"),
Item("packet", style="readonly"),
HGroup(
Item("weight", label="Weight (mg)", springy=True),
Item(
"retain_weight",
label="Lock",
tooltip="Retain the Weight for the next hole",
),
),
HGroup(
Item("nxtals", label="N. Xtals", springy=True),
Item(
"retain_nxtals",
label="Lock",
tooltip="Retain the N. Xtals for the next hole",
),
),
HGroup(
Item("npositions", label="NPositions", springy=True),
Item("auto_increment"),
),
enabled_when="load_name",
show_border=True,
label="Sample",
)
v = View(VFold(load_grp, samplegrp, notegrp, viewgrp))
return v
# ============= EOF =============================================
|
|
#import functools
import ssl
import sys
import re
import json
import pytz
from itertools import chain
from django.shortcuts import render, redirect
from django.http import Http404
from django.views.generic import ListView
from django.db.models import Q
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.contrib.auth import authenticate, login
from django.conf import settings
from django.views.generic import ListView, UpdateView
from django.views.generic.detail import DetailView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.exceptions import ImproperlyConfigured
from .models import *
from rest_framework import viewsets, generics
from .serializers import TransmissionSerializer, TalkGroupSerializer, ScanListSerializer, MenuScanListSerializer, MenuTalkGroupListSerializer, MessageSerializer
from datetime import datetime, timedelta
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import mail_admins
from allauth.account.models import EmailAddress as allauth_emailaddress
from pprint import pprint
from django.contrib import messages
import logging
from .forms import *
logger = logging.getLogger(__name__)
def check_anonymous(decorator):
"""
Decarator used to see if we allow anonymous access
"""
anonymous = getattr(settings, 'ALLOW_ANONYMOUS', True)
return decorator if not anonymous else lambda x: x
@login_required
def userScanList(request):
template = 'radio/userscanlist.html'
if request.method == "POST":
form = UserScanForm(request.POST)
if form.is_valid():
print('Form Valid')
name = form.cleaned_data['name']
tgs = form.cleaned_data['talkgroups']
print('Form Data [{}] [{}]'.format(name, tgs))
sl = ScanList()
sl.created_by = request.user
sl.name = name
sl.description = name
sl.save()
sl.talkgroups.add(*tgs)
return redirect('user_profile')
else:
print('Form not Valid')
else:
form = UserScanForm()
return render(request, template, {'form': form})
@login_required
def userProfile(request):
template = 'radio/profile.html'
if request.method == "POST":
form = UserForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('user_profile')
else:
profile_form = UserForm(instance=request.user)
profile = Profile.objects.get(user=request.user)
scan_lists = ScanList.objects.filter(created_by=request.user)
return render(request, template, {'profile_form': profile_form, 'profile': profile, 'scan_lists': scan_lists} )
def agencyList(request):
template = 'radio/agency_list.html'
query_data = Agency.objects.exclude(short='_DEF_').order_by('name')
return render(request, template, {'agency': query_data})
def cityListView(request):
template = 'radio/city_list.html'
query_data = City.objects.filter(visible=True)
return render(request, template, {'cities': query_data})
def cityDetailView(request, slug):
template = 'radio/city_detail.html'
query_data = City.objects.get(slug=slug)
return render(request, template, {'object': query_data})
def TransDetailView(request, slug):
template = 'radio/transmission_detail.html'
status = 'Good'
try:
query_data = Transmission.objects.filter(slug=slug)
if not query_data:
raise Http404
except Transmission.DoesNotExist:
raise Http404
query_data2 = limit_transmission_history(request, query_data)
if not query_data2 and not query_data[0].incident_set.filter(public=True):
query_data[0].audio_file = None
status = 'Expired'
restricted, new_query = restrict_talkgroups(request, query_data)
if not new_query:
raise Http404
return render(request, template, {'object': query_data[0], 'status': status})
def transDownloadView(request, slug):
import requests
try:
query_data = Transmission.objects.filter(slug=slug)
if not query_data:
raise Http404
except Transmission.DoesNotExist:
raise Http404
query_data2 = limit_transmission_history(request, query_data)
if not query_data2: raise Http404 # Just raise 404 if its too old
restricted, new_query = restrict_talkgroups(request, query_data)
if not new_query: raise Http404
trans = new_query[0]
if trans.audio_file_type == 'm4a':
audio_type = 'audio/m4a'
else:
audio_type = 'audio/mp3'
response = HttpResponse(content_type=audio_type)
start_time = timezone.localtime(trans.start_datetime).strftime('%Y%m%d_%H%M%S')
filename = '{}_{}.{}'.format(start_time, trans.talkgroup_info.slug, trans.audio_file_type)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
url = 'https:{}{}.{}'.format(trans.audio_url, trans.audio_file, trans.audio_file_type)
if trans.audio_url[:2] != '//':
url = 'http:'
if request.is_secure():
url = 'https:'
url += '//{}/{}{}.{}'.format(request.get_host(), trans.audio_url, trans.audio_file, trans.audio_file_type)
data = requests.get(url, verify=False)
response.write(data.content)
return response
class TransmissionViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Transmission.objects.none()
serializer_class = TransmissionSerializer
def get_serializer_context(self):
return {'request': self.request}
class ScanListViewSet(viewsets.ModelViewSet):
queryset = ScanList.objects.all().prefetch_related('talkgroups')
serializer_class = ScanListSerializer
class TalkGroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
# queryset = TalkGroup.objects.filter(public=True)
serializer_class = TalkGroupSerializer
base_name = 'TalkGroup'
def get_queryset(self):
if settings.ACCESS_TG_RESTRICT:
tg = allowed_tg_list(self.request.user)
else:
tg = TalkGroup.objects.filter(public=True)
return tg
class TransmissionView(ListView):
model = Transmission
paginate_by = 50
def ScanListFilter(request, filter_val):
template = 'radio/transmission.html'
return render(request, template, {'filter_data': filter_val, 'api_url': '/api_v1/ScanList'})
def TalkGroupFilterNew(request, filter_val):
template = 'radio/transmission_play.html'
return render(request, template, {'filter_data': filter_val})
def TalkGroupFilterjq(request, filter_val):
template = 'radio/transmission_list_jq.html'
return TalkGroupFilterBase(request, filter_val, template)
def TalkGroupFilter(request, filter_val):
template = 'radio/transmission_list.html'
return TalkGroupFilterBase(request, filter_val, template)
# Open to anyone
def Generic(request, page_name):
template = 'radio/generic.html'
query_data = WebHtml.objects.get(name=page_name)
return render(request, template, {'html_object': query_data})
def get_user_profile(user):
if user.is_authenticated:
user_profile = Profile.objects.get(user=user)
else:
try:
anon_user = User.objects.get(username='ANONYMOUS_USER')
except User.DoesNotExist:
raise ImproperlyConfigured('ANONYMOUS_USER is missing from User table, was "./manage.py migrations" not run?')
user_profile = Profile.objects.get(user=anon_user)
return user_profile
def get_history_allow(user):
user_profile = get_user_profile(user)
if user_profile:
history_minutes = user_profile.plan.history
else:
history_minutes = settings.ANONYMOUS_TIME
return history_minutes
def limit_transmission_history(request, query_data):
history_minutes = get_history_allow(request.user)
if history_minutes > 0:
time_threshold = timezone.now() - timedelta(minutes=history_minutes)
query_data = query_data.filter(start_datetime__gt=time_threshold)
return query_data
def limit_transmission_history_six_months(request, query_data):
history_minutes = 259200
time_threshold = timezone.now() - timedelta(minutes=history_minutes)
query_data = query_data.filter(start_datetime__gt=time_threshold)
return query_data
def allowed_tg_list(user):
user_profile = get_user_profile(user)
tg_list = None
for group in user_profile.talkgroup_access.all():
if tg_list is None:
tg_list = group.talkgroups.all()
else:
tg_list = tg_list | group.talkgroups.all()
if tg_list:
tg_list = tg_list.distinct()
else:
# Set blank talkgroup queryset
tg_list = TalkGroup.objects.none()
return tg_list
def restrict_talkgroups(request, query_data):
''' Checks to make sure the user can view
each of the talkgroups in the query_data
returns ( was_restricted, new query_data )
'''
if not settings.ACCESS_TG_RESTRICT:
return False, query_data
tg_list = allowed_tg_list(request.user)
query_data = query_data.filter(talkgroup_info__in=tg_list)
return None, query_data
def TalkGroupFilterBase(request, filter_val, template):
try:
tg = TalkGroup.objects.get(alpha_tag__startswith=filter_val)
except TalkGroup.DoesNotExist:
raise Http404
try:
query_data = Transmission.objects.filter(talkgroup_info=tg).prefetch_related('units')
#query_data = limit_transmission_history(self.request, rc_data)
query_data = limit_transmission_history_six_months(self.request, rc_data)
restrict_talkgroups(self.request, rc_data)
except Transmission.DoesNotExist:
raise Http404
return render(request, template, {'object_list': query_data, 'filter_data': filter_val})
class ScanViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
scanlist = self.kwargs['filter_val']
try:
sl = ScanList.objects.get(slug__iexact=scanlist)
except ScanList.DoesNotExist:
if scanlist == 'default':
tg = TalkGroup.objects.all()
else:
print("Scan list does not match")
raise
else:
tg = sl.talkgroups.all()
rc_data = Transmission.objects.filter(talkgroup_info__in=tg).prefetch_related('units').prefetch_related('talkgroup_info')
#rc_data = limit_transmission_history(self.request, rc_data)
rc_data = limit_transmission_history_six_months(self.request, rc_data)
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class IncViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
inc = self.kwargs['filter_val']
try:
if self.request.user.is_staff:
rc_data = Incident.objects.get(slug__iexact=inc).transmissions.all()
else:
rc_data = Incident.objects.get(slug__iexact=inc, public=True).transmissions.all()
except Incident.DoesNotExist:
print("Incident does not exist")
raise
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class MessagePopUpViewSet(generics.ListAPIView):
serializer_class = MessageSerializer
def get_queryset(self):
return MessagePopUp.objects.filter(active=True)
class TalkGroupFilterViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
tg_var = self.kwargs['filter_val']
search_tgs = re.split('[\+]', tg_var)
q = Q()
for stg in search_tgs:
q |= Q(common_name__iexact=stg)
q |= Q(slug__iexact=stg)
tg = TalkGroup.objects.filter(q)
rc_data = Transmission.objects.filter(talkgroup_info__in=tg).prefetch_related('units')
#rc_data = limit_transmission_history(self.request, rc_data)
rc_data = limit_transmission_history_six_months(self.request, rc_data)
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class UnitFilterViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
unit_var = self.kwargs['filter_val']
search_unit = re.split('[\+]', unit_var)
q = Q()
for s_unit in search_unit:
q |= Q(slug__iexact=s_unit)
units = Unit.objects.filter(q)
rc_data = Transmission.objects.filter(units__in=units).filter(talkgroup_info__public=True).prefetch_related('units').distinct()
#rc_data = limit_transmission_history(self.request, rc_data)
rc_data = limit_transmission_history_six_months(self.request, rc_data)
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class TalkGroupList(ListView):
model = TalkGroup
context_object_name = 'talkgroups'
template_name = 'radio/talkgroup_list.html'
#queryset = TalkGroup.objects.filter(public=True)
def get_queryset(self):
if settings.ACCESS_TG_RESTRICT:
tg = allowed_tg_list(self.request.user)
else:
tg = TalkGroup.objects.filter(public=True)
if self.request.GET.get('recent', None):
tg = tg.order_by('-recent_usage', '-last_transmission')
return tg
@login_required
@csrf_protect
def upgrade(request):
if request.method == 'POST':
form = PaymentForm(request.POST)
if not form.is_valid():
return render(
request,
'registration/upgrade.html',
{'form': form},
)
try:
plan = form.cleaned_data.get('plan_type')
card_name = form.cleaned_data.get('cardholder_name')
stripe_cust = None
logger.error('Change plan to {} for customer {} Card Name {}'.format(plan, stripe_cust, card_name))
stripe_info = None
except stripe.InvalidRequestError as e:
messages.error(request, "Error with stripe {}".format(e))
logger.error("Error with stripe {}".format(e))
return render(
request,
'registration/upgrade.html',
{'form': form},
)
except stripe.CardError as e:
messages.error(request, "<b>Error</b> Sorry there was an error with processing your card:<br>{}".format(e))
logger.error("Error with stripe user card{}".format(e))
return render(
request,
'registration/upgrade.html',
{'form': form},
)
print('------ STRIPE DEBUG -----')
pprint(stripe_info, sys.stderr)
return render(
request,
'registration/upgrade_complete.html',
)
else:
form = PaymentForm()
return render(
request,
'registration/upgrade.html',
{'form': form, },
)
@csrf_protect
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'],
email=form.cleaned_data['email']
)
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
new_user = authenticate(username=username, password=password)
if new_user is not None:
if new_user.is_active:
#stripe_actions.customers.create(user=new_user)
login(request, new_user)
return HttpResponseRedirect('/scan/default/')
else:
# this would be weird to get here
return HttpResponseRedirect('/register/success/')
else:
return HttpResponseRedirect('/register/success/')
else:
form = RegistrationForm()
return render(
request,
'registration/register.html',
{ 'form': form },
)
def register_success(request):
return render(
request,
'registration/success.html', {},
)
class MenuScanListViewSet(viewsets.ModelViewSet):
serializer_class = MenuScanListSerializer
queryset = MenuScanList.objects.all()
class MenuTalkGroupListViewSet(viewsets.ModelViewSet):
serializer_class = MenuTalkGroupListSerializer
queryset = MenuTalkGroupList.objects.all()
class UnitUpdateView(PermissionRequiredMixin, UpdateView):
model = Unit
form_class = UnitEditForm
success_url = '/unitupdategood/'
permission_required = ('radio.change_unit')
def form_valid(self, form):
try:
update_unit_email = SiteOption.objects.get(name='SEND_ADMIN_EMAIL_ON_UNIT_NAME')
if update_unit_email.value_boolean_or_string() == True:
Unit = form.save()
send_mail(
'Unit ID Change',
'User {} updated unit ID {} Now {}'.format(self.request.user, Unit.dec_id, Unit.description),
settings.SERVER_EMAIL,
[ mail for name, mail in settings.ADMINS],
fail_silently=False,
)
except SiteOption.DoesNotExist:
pass
return super().form_valid(form)
def ScanDetailsList(request, name):
template = 'radio/scandetaillist.html'
scanlist = None
try:
scanlist = ScanList.objects.get(name=name)
except ScanList.DoesNotExist:
if name == 'default':
query_data = TalkGroup.objects.all()
else:
raise Http404
if scanlist:
query_data = scanlist.talkgroups.all()
return render(request, template, {'object_list': query_data, 'scanlist': scanlist, 'request': request})
@login_required
@csrf_protect
def cancel_plan(request):
template = 'radio/cancel.html'
if request.method == 'POST':
msg = 'User {} ({}) wants to cancel'.format(request.user.username, request.user.pk)
mail_admins('Cancel Subscription', msg )
return render(request, template, {'complete': True})
else:
return render(request, template, {'complete': False})
@csrf_protect
def plans(request):
token = None
has_verified_email = False
plans = None
default_plan = None
if request.method == 'POST':
template = 'radio/subscribed.html'
token = request.POST.get('stripeToken')
plan = request.POST.get('plan')
# See if this user already has a stripe account
try:
stripe_cust = None
except ObjectDoesNotExist:
#stripe_actions.customers.create(user=request.user)
stripe_cust = None
try:
stripe_info = None #stripe_actions.subscriptions.create(customer=stripe_cust, plan=plan, token=request.POST.get('stripeToken'))
except Exception as e: #stripe.CardError as e:
template = 'radio/charge_failed.html'
logger.error("Error with stripe user card{}".format(e))
return render(request, template, {'error_msg': e })
for t in request.POST:
logger.error("{} {}".format(t, request.POST[t]))
else:
template = 'radio/plans.html'
plans = StripePlanMatrix.objects.filter(order__lt=99).filter(active=True)
default_plan = Plan.objects.get(pk=Plan.DEFAULT_PK)
# Check if users email address is verified
if request.user.is_authenticated:
verified_email = allauth_emailaddress.objects.filter(user=request.user, primary=True, verified=True)
if verified_email:
has_verified_email = True
return render(request, template, {'token': token, 'verified_email': has_verified_email, 'plans': plans, 'default_plan': default_plan} )
def incident(request, inc_slug):
template = 'radio/player_main.html'
try:
if request.user.is_staff:
inc = Incident.objects.get(slug=inc_slug)
else:
inc = Incident.objects.get(slug=inc_slug, public=True)
except Incident.DoesNotExist:
raise Http404
return render(request, template, {'inc':inc})
@csrf_exempt
def import_transmission(request):
if request.method == "POST":
settings_auth_token = getattr(settings, 'ADD_TRANS_AUTH_TOKEN', None)
if settings_auth_token == '7cf5857c61284': # Check is default is still set
return HttpResponse('Unauthorized, default ADD_TRANS_AUTH_TOKEN still set.', status=401)
body_unicode = request.body.decode('utf-8')
request_data = json.loads(body_unicode)
auth_token = request_data.get('auth_token')
if auth_token != settings_auth_token:
return HttpResponse('Unauthorized, check auth_token', status=401)
# System
system_name = request_data.get('system')
if system_name is None:
return HttpResponse('system is missing', status=400)
system, created = System.objects.get_or_create(name=system_name)
# Source
source_name = request_data.get('source')
if source_name is None:
return HttpResponse('source is missing', status=400)
source, created = Source.objects.get_or_create(description=source_name)
# TalkGroup
tg_dec = request_data.get('talkgroup')
if tg_dec is None:
return HttpResponse('talkgroup is missing', status=400)
try:
tg = TalkGroup.objects.get(dec_id=tg_dec, system=system)
except TalkGroup.DoesNotExist:
name = '#{}'.format(tg_dec)
tg = TalkGroup.objects.create(dec_id=tg_dec, system=system, alpha_tag=name, description='TalkGroup {}'.format(name))
# Transmission start
epoc_ts = request_data.get('start_time')
start_dt = datetime.fromtimestamp(int(epoc_ts), pytz.UTC)
epoc_end_ts = request_data.get('stop_time')
end_dt = datetime.fromtimestamp(int(epoc_end_ts), pytz.UTC)
play_length = epoc_end_ts - epoc_ts
audio_filename = request_data.get('audio_filename')
audio_file_url_path = request_data.get('audio_file_url_path')
freq = request_data.get('freq') # This should be depricated
audio_file_type = request_data.get('audio_file_type')
audio_file_play_length = request_data.get('audio_file_play_length', play_length)
has_audio = request_data.get('has_audio', True)
t = Transmission( start_datetime = start_dt,
end_datetime = end_dt,
audio_file = audio_filename,
talkgroup = tg_dec,
talkgroup_info = tg,
freq = int(float(freq)),
emergency = False,
source = source,
system = system,
audio_file_url_path = audio_file_url_path,
audio_file_type = audio_file_type,
play_length = audio_file_play_length,
has_audio = has_audio,
)
t.save()
# Units
count = 0
for unit in request_data.get('srcList'):
try:
trans_unit = unit['src']
except TypeError:
trans_unit = unit
u,created = Unit.objects.get_or_create(dec_id=trans_unit,system=t.system)
tu = TranmissionUnit.objects.create(transmission=t, unit=u, order=count)
count=count+1
return HttpResponse("Transmission added [{}]".format(t.pk))
else:
return HttpResponse(status=405)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Used render templates for datastore admin."""
import base64
import collections
import datetime
import logging
import os
import random
from google.appengine.datastore import entity_pb
from google.appengine.api import datastore
from google.appengine.api import lib_config
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.db import stats
from google.appengine.ext.mapreduce import control
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import operation
from google.appengine.ext.mapreduce import util
from google.appengine.ext.webapp import _template
MEMCACHE_NAMESPACE = '_ah-datastore_admin'
XSRF_VALIDITY_TIME = 600
KINDS_AND_SIZES_VAR = 'kinds_and_sizes'
MAPREDUCE_MIN_SHARDS = 8
MAPREDUCE_DEFAULT_SHARDS = 32
MAPREDUCE_MAX_SHARDS = 256
DATASTORE_ADMIN_OPERATION_KIND = '_AE_DatastoreAdmin_Operation'
BACKUP_INFORMATION_KIND = '_AE_Backup_Information'
BACKUP_INFORMATION_FILES_KIND = '_AE_Backup_Information_Kind_Files'
DATASTORE_ADMIN_KINDS = (DATASTORE_ADMIN_OPERATION_KIND,
BACKUP_INFORMATION_KIND,
BACKUP_INFORMATION_FILES_KIND)
class ConfigDefaults(object):
"""Configurable constants.
To override datastore_admin configuration values, define values like this
in your appengine_config.py file (in the root of your app):
datastore_admin_MAPREDUCE_PATH = /_ah/mapreduce
"""
BASE_PATH = '/_ah/datastore_admin'
MAPREDUCE_PATH = '/_ah/mapreduce'
DEFERRED_PATH = BASE_PATH + '/queue/deferred'
CLEANUP_MAPREDUCE_STATE = True
config = lib_config.register('datastore_admin', ConfigDefaults.__dict__)
config.BASE_PATH
def IsKindNameVisible(kind_name):
return not (kind_name.startswith('__') or kind_name in DATASTORE_ADMIN_KINDS)
def RenderToResponse(handler, template_file, template_params):
"""Render the given template_file using template_vals and write to response.
Args:
handler: the handler whose response we should render to
template_file: the file name only of the template file we are using
template_params: the parameters used to render the given template
"""
template_params = _GetDefaultParams(template_params)
rendered = _template.render(_GetTemplatePath(template_file), template_params)
handler.response.out.write(rendered)
def _GetTemplatePath(template_file):
"""Return the expected path for the template to render.
Args:
template_file: simple file name of template to render.
Returns:
path of template to render.
"""
return os.path.join(
os.path.dirname(__file__), 'templates', template_file)
def _GetDefaultParams(template_params):
"""Update template_params to always contain necessary paths and never None."""
if not template_params:
template_params = {}
template_params.update({
'base_path': config.BASE_PATH,
'mapreduce_path': config.MAPREDUCE_PATH,
})
return template_params
def CreateXsrfToken(action):
"""Generate a token to be passed with a form for XSRF protection.
Args:
action: action to restrict token to
Returns:
suitably random token which is only valid for ten minutes and, if the user
is authenticated, is only valid for the user that generated it.
"""
user_str = _MakeUserStr()
token = base64.b64encode(
''.join(chr(int(random.random()*255)) for _ in range(0, 64)))
memcache.set(token,
(user_str, action),
time=XSRF_VALIDITY_TIME,
namespace=MEMCACHE_NAMESPACE)
return token
def ValidateXsrfToken(token, action):
"""Validate a given XSRF token by retrieving it from memcache.
If the token has not been evicted from memcache (past ten minutes) and the
user strings are equal, then this is a valid token.
Args:
token: token to validate from memcache.
action: action that token should correspond to
Returns:
True if the token exists in memcache and the user strings are equal,
False otherwise.
"""
user_str = _MakeUserStr()
token_obj = memcache.get(token, namespace=MEMCACHE_NAMESPACE)
if not token_obj:
return False
token_str, token_action = token_obj
if user_str != token_str or action != token_action:
return False
return True
def CacheStats(formatted_results):
"""Cache last retrieved kind size values in memcache.
Args:
formatted_results: list of dictionaries of the form returnned by
main._PresentableKindStats.
"""
kinds_and_sizes = dict((kind['kind_name'], kind['total_bytes'])
for kind in formatted_results)
memcache.set(KINDS_AND_SIZES_VAR,
kinds_and_sizes,
namespace=MEMCACHE_NAMESPACE)
def RetrieveCachedStats():
"""Retrieve cached kind sizes from last datastore stats call.
Returns:
Dictionary mapping kind names to total bytes.
"""
return memcache.get(KINDS_AND_SIZES_VAR, namespace=MEMCACHE_NAMESPACE)
def _MakeUserStr():
"""Make a user string to use to represent the user. 'noauth' by default."""
user = users.get_current_user()
return user.nickname() if user else 'noauth'
def GetPrettyBytes(bytes_num, significant_digits=0):
"""Get a pretty print view of the given number of bytes.
This will give a string like 'X MBytes'.
Args:
bytes_num: the original number of bytes to pretty print.
significant_digits: number of digits to display after the decimal point.
Returns:
A string that has the pretty print version of the given bytes.
If bytes_num is to big the string 'Alot' will be returned.
"""
byte_prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E']
for i in range(0, 7):
exp = i * 10
if bytes_num < 1<<(exp + 10):
if i == 0:
formatted_bytes = str(bytes_num)
else:
formatted_bytes = '%.*f' % (significant_digits,
(bytes_num * 1.0 / (1<<exp)))
if formatted_bytes != '1':
plural = 's'
else:
plural = ''
return '%s %sByte%s' % (formatted_bytes, byte_prefixes[i], plural)
logging.error('Number too high to convert: %d', bytes_num)
return 'Alot'
def FormatThousands(value):
"""Format a numerical value, inserting commas as thousands separators.
Args:
value: An integer, float, or string representation thereof.
If the argument is a float, it is converted to a string using '%.2f'.
Returns:
A string with groups of 3 digits before the decimal point (if any)
separated by commas.
NOTE: We don't deal with whitespace, and we don't insert
commas into long strings of digits after the decimal point.
"""
if isinstance(value, float):
value = '%.2f' % value
else:
value = str(value)
if '.' in value:
head, tail = value.split('.', 1)
tail = '.' + tail
elif 'e' in value:
head, tail = value.split('e', 1)
tail = 'e' + tail
else:
head = value
tail = ''
sign = ''
if head.startswith('-'):
sign = '-'
head = head[1:]
while len(head) > 3:
tail = ',' + head[-3:] + tail
head = head[:-3]
return sign + head + tail
def TruncDelta(delta):
"""Strips microseconds from a timedelta."""
return datetime.timedelta(days=delta.days, seconds=delta.seconds)
def GetPrintableStrs(namespace, kinds):
"""Returns tuples describing affected kinds and namespace.
Args:
namespace: namespace being targeted.
kinds: list of kinds being targeted.
Returns:
(namespace_str, kind_str) tuple used for display to user.
"""
namespace_str = namespace or ''
if kinds:
kind_str = 'all %s entities' % ', '.join(kinds)
else:
kind_str = ''
return (namespace_str, kind_str)
def ParseKindsAndSizes(kinds):
"""Parses kind|size list and returns template parameters.
Args:
kinds: list of kinds to process.
Returns:
sizes_known: whether or not all kind objects have known sizes.
size_total: total size of objects with known sizes.
len(kinds) - 2: for template rendering of greater than 3 kinds.
"""
sizes_known = True
size_total = 0
kinds_and_sizes = RetrieveCachedStats()
if kinds_and_sizes:
for kind in kinds:
if kind in kinds_and_sizes:
size_total += kinds_and_sizes[kind]
else:
sizes_known = False
else:
sizes_known = False
if size_total:
size_total = GetPrettyBytes(size_total)
return sizes_known, size_total, len(kinds) - 2
def _CreateDatastoreConfig():
"""Create datastore config for use during datastore admin operations."""
return datastore_rpc.Configuration(force_writes=True)
class MapreduceDoneHandler(webapp.RequestHandler):
"""Handler to delete data associated with successful MapReduce jobs."""
SUFFIX = 'mapreduce_done'
def post(self):
"""Mapreduce done callback to delete job data if it was successful."""
if 'Mapreduce-Id' in self.request.headers:
mapreduce_id = self.request.headers['Mapreduce-Id']
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
mapreduce_params = mapreduce_state.mapreduce_spec.params
keys = []
job_success = True
shard_states = model.ShardState.find_by_mapreduce_state(mapreduce_state)
for shard_state in shard_states:
keys.append(shard_state.key())
if not shard_state.result_status == 'success':
job_success = False
db_config = _CreateDatastoreConfig()
if job_success:
operation_key = mapreduce_params.get(
DatastoreAdminOperation.PARAM_DATASTORE_ADMIN_OPERATION)
if operation_key is None:
logging.error('Done callback for job %s without operation key.',
mapreduce_id)
else:
def tx():
operation = DatastoreAdminOperation.get(operation_key)
if mapreduce_id in operation.active_job_ids:
operation.active_jobs -= 1
operation.completed_jobs += 1
operation.active_job_ids.remove(mapreduce_id)
if not operation.active_jobs:
if operation.status == DatastoreAdminOperation.STATUS_ACTIVE:
operation.status = DatastoreAdminOperation.STATUS_COMPLETED
db.delete(DatastoreAdminOperationJob.all().ancestor(operation),
config=db_config)
operation.put(config=db_config)
if 'done_callback_handler' in mapreduce_params:
done_callback_handler = util.for_name(
mapreduce_params['done_callback_handler'])
if done_callback_handler:
done_callback_handler(operation, mapreduce_id, mapreduce_state)
else:
logging.error('done_callbackup_handler %s was not found',
mapreduce_params['done_callback_handler'])
db.run_in_transaction(tx)
if config.CLEANUP_MAPREDUCE_STATE:
keys.append(mapreduce_state.key())
keys.append(model.MapreduceControl.get_key_by_job_id(mapreduce_id))
db.delete(keys, config=db_config)
logging.info('State for successful job %s was deleted.', mapreduce_id)
else:
logging.info('Job %s was not successful so no state was deleted.',
mapreduce_id)
else:
logging.error('Done callback called without Mapreduce Id.')
class DatastoreAdminOperation(db.Model):
"""An entity to keep progress and status of datastore admin operation."""
STATUS_CREATED = 'Created'
STATUS_ACTIVE = 'Active'
STATUS_COMPLETED = 'Completed'
STATUS_FAILED = 'Failed'
STATUS_ABORTED = 'Aborted'
PARAM_DATASTORE_ADMIN_OPERATION = 'datastore_admin_operation'
DEFAULT_LAST_UPDATED_VALUE = datetime.datetime(1970, 1, 1)
description = db.TextProperty()
status = db.StringProperty(default=STATUS_CREATED)
active_jobs = db.IntegerProperty(default=0)
active_job_ids = db.StringListProperty()
completed_jobs = db.IntegerProperty(default=0)
last_updated = db.DateTimeProperty(default=DEFAULT_LAST_UPDATED_VALUE,
auto_now=True)
@classmethod
def kind(cls):
return DATASTORE_ADMIN_OPERATION_KIND
class DatastoreAdminOperationJob(db.Model):
"""An entity to keep track of started jobs to ensure idempotency.
This entity can be used during spawning additional jobs. It is
always stored as a child entity of DatastoreAdminOperation.
Entity key name is job unique id.
"""
pass
def StartOperation(description):
"""Start datastore admin operation.
Args:
description: operation description to be displayed to user.
Returns:
an instance of DatastoreAdminOperation.
"""
operation = DatastoreAdminOperation(
description=description,
id=db.allocate_ids(
db.Key.from_path(DatastoreAdminOperation.kind(), 1), 1)[0])
operation.put(config=_CreateDatastoreConfig())
return operation
def StartMap(operation_key,
job_name,
handler_spec,
reader_spec,
writer_spec,
mapper_params,
mapreduce_params=None,
start_transaction=True,
queue_name=None,
shard_count=MAPREDUCE_DEFAULT_SHARDS):
"""Start map as part of datastore admin operation.
Will increase number of active jobs inside the operation and start new map.
Args:
operation_key: Key of the DatastoreAdminOperation for current operation.
job_name: Map job name.
handler_spec: Map handler specification.
reader_spec: Input reader specification.
writer_spec: Output writer specification.
mapper_params: Custom mapper parameters.
mapreduce_params: Custom mapreduce parameters.
start_transaction: Specify if a new transaction should be started.
queue_name: the name of the queue that will be used by the M/R.
shard_count: the number of shards the M/R will try to use.
Returns:
resulting map job id as string.
"""
if not mapreduce_params:
mapreduce_params = {}
mapreduce_params[DatastoreAdminOperation.PARAM_DATASTORE_ADMIN_OPERATION] = (
str(operation_key))
mapreduce_params['done_callback'] = '%s/%s' % (config.BASE_PATH,
MapreduceDoneHandler.SUFFIX)
mapreduce_params['force_writes'] = 'True'
def tx():
operation = DatastoreAdminOperation.get(operation_key)
job_id = control.start_map(
job_name, handler_spec, reader_spec,
mapper_params,
output_writer_spec=writer_spec,
mapreduce_parameters=mapreduce_params,
base_path=config.MAPREDUCE_PATH,
shard_count=shard_count,
transactional=True,
queue_name=queue_name,
transactional_parent=operation)
operation.status = DatastoreAdminOperation.STATUS_ACTIVE
operation.active_jobs += 1
operation.active_job_ids = list(set(operation.active_job_ids + [job_id]))
operation.put(config=_CreateDatastoreConfig())
return job_id
if start_transaction:
return db.run_in_transaction(tx)
else:
return tx()
def RunMapForKinds(operation_key,
kinds,
job_name_template,
handler_spec,
reader_spec,
writer_spec,
mapper_params,
mapreduce_params=None,
queue_name=None):
"""Run mapper job for all entities in specified kinds.
Args:
operation_key: The key of the DatastoreAdminOperation to record all jobs.
kinds: list of entity kinds as strings.
job_name_template: template for naming individual mapper jobs. Can
reference %(kind)s and %(namespace)s formatting variables.
handler_spec: mapper handler specification.
reader_spec: reader specification.
writer_spec: writer specification.
mapper_params: custom parameters to pass to mapper.
mapreduce_params: dictionary parameters relevant to the whole job.
queue_name: the name of the queue that will be used by the M/R.
Returns:
Ids of all started mapper jobs as list of strings.
"""
jobs = []
try:
for kind in kinds:
mapper_params['entity_kind'] = kind
job_name = job_name_template % {'kind': kind, 'namespace':
mapper_params.get('namespaces', '')}
shard_count = GetShardCount(kind)
jobs.append(StartMap(operation_key, job_name, handler_spec, reader_spec,
writer_spec, mapper_params, mapreduce_params,
queue_name=queue_name, shard_count=shard_count))
return jobs
except BaseException:
AbortAdminOperation(operation_key,
_status=DatastoreAdminOperation.STATUS_FAILED)
raise
def GetShardCount(kind):
stat = stats.KindStat.all().filter('kind_name =', kind).get()
if stat:
return min(max(MAPREDUCE_MIN_SHARDS, stat.bytes // (32 * 1024 * 1024)),
MAPREDUCE_MAX_SHARDS)
return MAPREDUCE_DEFAULT_SHARDS
def AbortAdminOperation(operation_key,
_status=DatastoreAdminOperation.STATUS_ABORTED):
"""Aborts active jobs."""
operation = DatastoreAdminOperation.get(operation_key)
operation.status = _status
operation.put(config=_CreateDatastoreConfig())
for job in operation.active_job_ids:
logging.info('Aborting Job %s', job)
model.MapreduceControl.abort(job, config=_CreateDatastoreConfig())
def FixKeys(entity_proto, app_id):
"""Go over keys in the given entity and update the application id.
Args:
entity_proto: An EntityProto to be fixed up. All identifiable keys in the
proto will have the 'app' field reset to match app_id.
app_id: The desired application id, typically os.getenv('APPLICATION_ID').
"""
def FixKey(mutable_key):
mutable_key.set_app(app_id)
def FixPropertyList(property_list):
for prop in property_list:
prop_value = prop.mutable_value()
if prop_value.has_referencevalue():
FixKey(prop_value.mutable_referencevalue())
elif prop.meaning() == entity_pb.Property.ENTITY_PROTO:
embeded_entity_proto = entity_pb.EntityProto()
try:
embeded_entity_proto.ParsePartialFromString(prop_value.stringvalue())
except Exception:
logging.exception('Failed to fix-keys for property %s of %s',
prop.name(),
entity_proto.key())
else:
FixKeys(embeded_entity_proto, app_id)
prop_value.set_stringvalue(
embeded_entity_proto.SerializePartialToString())
if entity_proto.has_key() and entity_proto.key().path().element_size():
FixKey(entity_proto.mutable_key())
FixPropertyList(entity_proto.property_list())
FixPropertyList(entity_proto.raw_property_list())
class AllocateMaxIdPool(object):
"""Mapper pool to keep track of all allocated ids.
Runs allocate_ids rpcs when flushed.
This code uses the knowloedge of allocate_id implementation detail.
Though we don't plan to change allocate_id logic, we don't really
want to depend on it either. We are using this details here to implement
batch-style remote allocate_ids.
"""
def __init__(self, app_id):
self.app_id = app_id
self.ns_to_path_to_max_id = collections.defaultdict(dict)
def allocate_max_id(self, key):
"""Record the key to allocate max id.
Args:
key: Datastore key.
"""
path = key.to_path()
if len(path) == 2:
path_tuple = ('Foo', 1)
key_id = path[-1]
else:
path_tuple = (path[0], path[1], 'Foo', 1)
key_id = None
for path_element in path[2:]:
if isinstance(path_element, (int, long)):
key_id = max(key_id, path_element)
if not isinstance(key_id, (int, long)):
return
path_to_max_id = self.ns_to_path_to_max_id[key.namespace()]
path_to_max_id[path_tuple] = max(key_id, path_to_max_id.get(path_tuple, 0))
def flush(self):
for namespace, path_to_max_id in self.ns_to_path_to_max_id.iteritems():
for path, max_id in path_to_max_id.iteritems():
datastore.AllocateIds(db.Key.from_path(namespace=namespace,
_app=self.app_id,
*list(path)),
max=max_id)
self.ns_to_path_to_max_id = collections.defaultdict(dict)
class AllocateMaxId(operation.Operation):
"""Mapper operation to allocate max id."""
def __init__(self, key, app_id):
self.key = key
self.app_id = app_id
self.pool_id = 'allocate_max_id_%s_pool' % self.app_id
def __call__(self, ctx):
pool = ctx.get_pool(self.pool_id)
if not pool:
pool = AllocateMaxIdPool(self.app_id)
ctx.register_pool(self.pool_id, pool)
pool.allocate_max_id(self.key)
|
|
# pylint: skip-file
# flake8: noqa
class RouterException(Exception):
''' Router exception'''
pass
class RouterConfig(OpenShiftCLIConfig):
''' RouterConfig is a DTO for the router. '''
def __init__(self, rname, namespace, kubeconfig, router_options):
super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options)
class Router(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self,
router_config,
verbose=False):
''' Constructor for OpenshiftOC
a router consists of 3 or more parts
- dc/router
- svc/router
- sa/router
- secret/router-certs
- clusterrolebinding/router-router-role
'''
super(Router, self).__init__(router_config.namespace, router_config.kubeconfig, verbose)
self.config = router_config
self.verbose = verbose
self.router_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
{'kind': 'sa', 'name': self.config.config_options['service_account']['value']},
{'kind': 'secret', 'name': self.config.name + '-certs'},
{'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'},
]
self.__prepared_router = None
self.dconfig = None
self.svc = None
self._secret = None
self._serviceaccount = None
self._rolebinding = None
@property
def prepared_router(self):
''' property for the prepared router'''
if self.__prepared_router is None:
results = self._prepare_router()
if not results or 'returncode' in results and results['returncode'] != 0:
if 'stderr' in results:
raise RouterException('Could not perform router preparation: %s' % results['stderr'])
raise RouterException('Could not perform router preparation.')
self.__prepared_router = results
return self.__prepared_router
@prepared_router.setter
def prepared_router(self, obj):
'''setter for the prepared_router'''
self.__prepared_router = obj
@property
def deploymentconfig(self):
''' property deploymentconfig'''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for property deploymentconfig '''
self.dconfig = config
@property
def service(self):
''' property for service '''
return self.svc
@service.setter
def service(self, config):
''' setter for property service '''
self.svc = config
@property
def secret(self):
''' property secret '''
return self._secret
@secret.setter
def secret(self, config):
''' setter for property secret '''
self._secret = config
@property
def serviceaccount(self):
''' property for serviceaccount '''
return self._serviceaccount
@serviceaccount.setter
def serviceaccount(self, config):
''' setter for property serviceaccount '''
self._serviceaccount = config
@property
def rolebinding(self):
''' property rolebinding '''
return self._rolebinding
@rolebinding.setter
def rolebinding(self, config):
''' setter for property rolebinding '''
self._rolebinding = config
def get_object_by_kind(self, kind):
'''return the current object kind by name'''
if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
return self.deploymentconfig
elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
return self.service
elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
return self.serviceaccount
elif re.match("secret", kind, flags=re.IGNORECASE):
return self.secret
elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
return self.rolebinding
return None
def get(self):
''' return the self.router_parts '''
self.service = None
self.deploymentconfig = None
self.serviceaccount = None
self.secret = None
self.rolebinding = None
for part in self.router_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'sa':
self.serviceaccount = ServiceAccount(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'secret':
self.secret = Secret(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding':
self.rolebinding = RoleBinding(content=result['results'][0])
return {'deploymentconfig': self.deploymentconfig,
'service': self.service,
'serviceaccount': self.serviceaccount,
'secret': self.secret,
'clusterrolebinding': self.rolebinding,
}
def exists(self):
'''return a whether svc or dc exists '''
if self.deploymentconfig and self.service and self.secret and self.serviceaccount:
return True
return False
def delete(self):
'''return all pods '''
parts = []
for part in self.router_parts:
parts.append(self._delete(part['kind'], part['name']))
rval = 0
for part in parts:
if part['returncode'] != 0 and not 'already exist' in part['stderr']:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def add_modifications(self, deploymentconfig):
'''modify the deployment config'''
# We want modifications in the form of edits coming in from the module.
# Let's apply these here
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig
# pylint: disable=too-many-branches
def _prepare_router(self):
'''prepare router for instantiation'''
# if cacert, key, and cert were passed, combine them into a pem file
if (self.config.config_options['cacert_file']['value'] and
self.config.config_options['cert_file']['value'] and
self.config.config_options['key_file']['value']):
router_pem = '/tmp/router.pem'
with open(router_pem, 'w') as rfd:
rfd.write(open(self.config.config_options['cert_file']['value']).read())
rfd.write(open(self.config.config_options['key_file']['value']).read())
if self.config.config_options['cacert_file']['value'] and \
os.path.exists(self.config.config_options['cacert_file']['value']):
rfd.write(open(self.config.config_options['cacert_file']['value']).read())
atexit.register(Utils.cleanup, [router_pem])
self.config.config_options['default_cert']['value'] = router_pem
elif self.config.config_options['default_cert']['value'] is None:
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# pylint: disable=maybe-no-member
if results['returncode'] != 0 or 'items' not in results['results']:
return results
oc_objects = {'DeploymentConfig': {'obj': None, 'path': None, 'update': False},
'Secret': {'obj': None, 'path': None, 'update': False},
'ServiceAccount': {'obj': None, 'path': None, 'update': False},
'ClusterRoleBinding': {'obj': None, 'path': None, 'update': False},
'Service': {'obj': None, 'path': None, 'update': False},
}
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res)
elif res['kind'] == 'Service':
oc_objects['Service']['obj'] = Service(res)
elif res['kind'] == 'ServiceAccount':
oc_objects['ServiceAccount']['obj'] = ServiceAccount(res)
elif res['kind'] == 'Secret':
oc_objects['Secret']['obj'] = Secret(res)
elif res['kind'] == 'ClusterRoleBinding':
oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res)
# Currently only deploymentconfig needs updating
# Verify we got a deploymentconfig
if not oc_objects['DeploymentConfig']['obj']:
return results
# add modifications added
oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj'])
for oc_type, oc_data in oc_objects.items():
if oc_data['obj'] is not None:
oc_data['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_data['obj'].yaml_dict)
return oc_objects
def create(self):
'''Create a router
This includes the different parts:
- deploymentconfig
- service
- serviceaccount
- secrets
- clusterrolebinding
'''
results = []
self.needs_update()
# pylint: disable=maybe-no-member
for kind, oc_data in self.prepared_router.items():
if oc_data['obj'] is not None:
time.sleep(1)
if self.get_object_by_kind(kind) is None:
results.append(self._create(oc_data['path']))
elif oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0 and not 'already exist' in result['stderr']:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the router. This performs a replace'''
results = []
# pylint: disable=maybe-no-member
for _, oc_data in self.prepared_router.items():
if oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
def needs_update(self):
''' check to see if we need to update '''
# ServiceAccount:
# Need to determine changes from the pregenerated ones from the original
# Since these are auto generated, we can skip
skip = ['secrets', 'imagePullSecrets']
if self.serviceaccount is None or \
not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
self.serviceaccount.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['ServiceAccount']['update'] = True
# Secret:
# See if one was generated from our dry-run and verify it if needed
if self.prepared_router['Secret']['obj']:
if not self.secret:
self.prepared_router['Secret']['update'] = True
if self.secret is None or \
not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
self.secret.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Secret']['update'] = True
# Service:
# Fix the ports to have protocol=TCP
for port in self.prepared_router['Service']['obj'].get('spec.ports'):
port['protocol'] = 'TCP'
skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
if self.service is None or \
not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
self.service.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Service']['update'] = True
# DeploymentConfig:
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
if self.deploymentconfig is not None:
if not self.config.config_options['stats_password']['value']:
for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].env') or []):
if env_var['name'] == 'STATS_PASSWORD':
env_var['value'] = \
self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
break
# dry-run doesn't add the protocol to the ports section. We will manually do that.
for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].ports') or []):
if not 'protocol' in port:
port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath', 'hostPort',
'defaultMode',
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
self.deploymentconfig.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['DeploymentConfig']['update'] = True
# Check if any of the parts need updating, if so, return True
# else, no need to update
# pylint: disable=no-member
return any([self.prepared_router[oc_type]['update'] for oc_type in self.prepared_router.keys()])
@staticmethod
def run_ansible(params, check_mode):
'''run ansible idempotent code'''
rconfig = RouterConfig(params['name'],
params['namespace'],
params['kubeconfig'],
{'default_cert': {'value': params['default_cert'], 'include': True},
'cert_file': {'value': params['cert_file'], 'include': False},
'key_file': {'value': params['key_file'], 'include': False},
'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'router_type': {'value': params['router_type'], 'include': False},
'host_network': {'value': params['host_network'], 'include': True},
'external_host': {'value': params['external_host'], 'include': True},
'external_host_vserver': {'value': params['external_host_vserver'],
'include': True},
'external_host_insecure': {'value': params['external_host_insecure'],
'include': True},
'external_host_partition_path': {'value': params['external_host_partition_path'],
'include': True},
'external_host_username': {'value': params['external_host_username'],
'include': True},
'external_host_password': {'value': params['external_host_password'],
'include': True},
'external_host_private_key': {'value': params['external_host_private_key'],
'include': True},
'expose_metrics': {'value': params['expose_metrics'], 'include': True},
'metrics_image': {'value': params['metrics_image'], 'include': True},
'stats_user': {'value': params['stats_user'], 'include': True},
'stats_password': {'value': params['stats_password'], 'include': True},
'stats_port': {'value': params['stats_port'], 'include': True},
# extra
'cacert_file': {'value': params['cacert_file'], 'include': False},
# edits
'edits': {'value': params['edits'], 'include': False},
})
state = params['state']
ocrouter = Router(rconfig, verbose=params['debug'])
api_rval = ocrouter.get()
########
# get
########
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocrouter.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# In case of delete we return a list of each object
# that represents a router and its result in a list
# pylint: disable=redefined-variable-type
api_rval = ocrouter.delete()
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocrouter.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocrouter.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not ocrouter.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocrouter.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.model.meta
from frappe.model.dynamic_links import get_dynamic_link_map
import frappe.defaults
from frappe.utils.file_manager import remove_all
from frappe.utils.password import delete_all_passwords_for
from frappe import _
from frappe.model.naming import revert_series_if_last
from frappe.utils.global_search import delete_for_document
from six import string_types, integer_types
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""
Deletes a doc(dt, dn) and validates if it is not submitted and not linked in a live record
"""
if not ignore_doctypes: ignore_doctypes = []
# get from form
if not doctype:
doctype = frappe.form_dict.get('dt')
name = frappe.form_dict.get('dn')
names = name
if isinstance(name, string_types) or isinstance(name, integer_types):
names = [name]
for name in names or []:
# already deleted..?
if not frappe.db.exists(doctype, name):
if not ignore_missing:
raise frappe.DoesNotExistError
else:
return False
# delete passwords
delete_all_passwords_for(doctype, name)
doc = None
if doctype=="DocType":
if for_reload:
try:
doc = frappe.get_doc(doctype, name)
except frappe.DoesNotExistError:
pass
else:
doc.run_method("before_reload")
else:
doc = frappe.get_doc(doctype, name)
update_flags(doc, flags, ignore_permissions)
check_permission_and_not_submitted(doc)
frappe.db.sql("delete from `tabCustom Field` where dt = %s", name)
frappe.db.sql("delete from `tabCustom Script` where dt = %s", name)
frappe.db.sql("delete from `tabProperty Setter` where doc_type = %s", name)
frappe.db.sql("delete from `tabReport` where ref_doctype=%s", name)
frappe.db.sql("delete from `tabCustom DocPerm` where parent=%s", name)
delete_from_table(doctype, name, ignore_doctypes, None)
else:
doc = frappe.get_doc(doctype, name)
if not for_reload:
update_flags(doc, flags, ignore_permissions)
check_permission_and_not_submitted(doc)
if not ignore_on_trash:
doc.run_method("on_trash")
doc.flags.in_delete = True
doc.run_method('on_change')
frappe.enqueue('frappe.model.delete_doc.delete_dynamic_links', doctype=doc.doctype, name=doc.name,
is_async=False if frappe.flags.in_test else True)
# check if links exist
if not force:
check_if_doc_is_linked(doc)
check_if_doc_is_dynamically_linked(doc)
update_naming_series(doc)
delete_from_table(doctype, name, ignore_doctypes, doc)
doc.run_method("after_delete")
# delete attachments
remove_all(doctype, name, from_delete=True)
# delete global search entry
delete_for_document(doc)
if doc and not for_reload:
add_to_deleted_document(doc)
if not frappe.flags.in_patch:
try:
doc.notify_update()
insert_feed(doc)
except ImportError:
pass
# delete user_permissions
frappe.defaults.clear_default(parenttype="User Permission", key=doctype, value=name)
def add_to_deleted_document(doc):
'''Add this document to Deleted Document table. Called after delete'''
if doc.doctype != 'Deleted Document' and frappe.flags.in_install != 'frappe':
frappe.get_doc(dict(
doctype='Deleted Document',
deleted_doctype=doc.doctype,
deleted_name=doc.name,
data=doc.as_json(),
owner=frappe.session.user
)).db_insert()
def update_naming_series(doc):
if doc.meta.autoname:
if doc.meta.autoname.startswith("naming_series:") \
and getattr(doc, "naming_series", None):
revert_series_if_last(doc.naming_series, doc.name)
elif doc.meta.autoname.split(":")[0] not in ("Prompt", "field", "hash"):
revert_series_if_last(doc.meta.autoname, doc.name)
def delete_from_table(doctype, name, ignore_doctypes, doc):
if doctype!="DocType" and doctype==name:
frappe.db.sql("delete from `tabSingles` where doctype=%s", name)
else:
frappe.db.sql("delete from `tab{0}` where name=%s".format(doctype), name)
# get child tables
if doc:
tables = [d.options for d in doc.meta.get_table_fields()]
else:
def get_table_fields(field_doctype):
return frappe.db.sql_list("""select options from `tab{}` where fieldtype='Table'
and parent=%s""".format(field_doctype), doctype)
tables = get_table_fields("DocField")
if not frappe.flags.in_install=="frappe":
tables += get_table_fields("Custom Field")
# delete from child tables
for t in list(set(tables)):
if t not in ignore_doctypes:
frappe.db.sql("delete from `tab%s` where parenttype=%s and parent = %s" % (t, '%s', '%s'), (doctype, name))
def update_flags(doc, flags=None, ignore_permissions=False):
if ignore_permissions:
if not flags: flags = {}
flags["ignore_permissions"] = ignore_permissions
if flags:
doc.flags.update(flags)
def check_permission_and_not_submitted(doc):
# permission
if (not doc.flags.ignore_permissions
and frappe.session.user!="Administrator"
and (
not doc.has_permission("delete")
or (doc.doctype=="DocType" and not doc.custom))):
frappe.msgprint(_("User not allowed to delete {0}: {1}")
.format(doc.doctype, doc.name), raise_exception=frappe.PermissionError)
# check if submitted
if doc.docstatus == 1:
frappe.msgprint(_("{0} {1}: Submitted Record cannot be deleted.").format(_(doc.doctype), doc.name),
raise_exception=True)
def check_if_doc_is_linked(doc, method="Delete"):
"""
Raises excption if the given doc(dt, dn) is linked in another record.
"""
from frappe.model.rename_doc import get_link_fields
link_fields = get_link_fields(doc.doctype)
link_fields = [[lf['parent'], lf['fieldname'], lf['issingle']] for lf in link_fields]
for link_dt, link_field, issingle in link_fields:
if not issingle:
for item in frappe.db.get_values(link_dt, {link_field:doc.name},
["name", "parent", "parenttype", "docstatus"], as_dict=True):
linked_doctype = item.parenttype if item.parent else link_dt
if linked_doctype in ("Communication", "ToDo", "DocShare", "Email Unsubscribe", 'File', 'Version', "Activity Log"):
# don't check for communication and todo!
continue
if not item:
continue
elif (method != "Delete" or item.docstatus == 2) and (method != "Cancel" or item.docstatus != 1):
# don't raise exception if not
# linked to a non-cancelled doc when deleting or to a submitted doc when cancelling
continue
elif link_dt == doc.doctype and (item.parent or item.name) == doc.name:
# don't raise exception if not
# linked to same item or doc having same name as the item
continue
else:
reference_docname = item.parent or item.name
raise_link_exists_exception(doc, linked_doctype, reference_docname)
else:
if frappe.db.get_value(link_dt, None, link_field) == doc.name:
raise_link_exists_exception(doc, link_dt, link_dt)
def check_if_doc_is_dynamically_linked(doc, method="Delete"):
'''Raise `frappe.LinkExistsError` if the document is dynamically linked'''
for df in get_dynamic_link_map().get(doc.doctype, []):
if df.parent in ("Communication", "ToDo", "DocShare", "Email Unsubscribe", "Activity Log", 'File', 'Version'):
# don't check for communication and todo!
continue
meta = frappe.get_meta(df.parent)
if meta.issingle:
# dynamic link in single doc
refdoc = frappe.db.get_singles_dict(df.parent)
if (refdoc.get(df.options)==doc.doctype
and refdoc.get(df.fieldname)==doc.name
and ((method=="Delete" and refdoc.docstatus < 2)
or (method=="Cancel" and refdoc.docstatus==1))
):
# raise exception only if
# linked to an non-cancelled doc when deleting
# or linked to a submitted doc when cancelling
raise_link_exists_exception(doc, df.parent, df.parent)
else:
# dynamic link in table
df["table"] = ", parent, parenttype, idx" if meta.istable else ""
for refdoc in frappe.db.sql("""select name, docstatus{table} from `tab{parent}` where
{options}=%s and {fieldname}=%s""".format(**df), (doc.doctype, doc.name), as_dict=True):
if ((method=="Delete" and refdoc.docstatus < 2) or (method=="Cancel" and refdoc.docstatus==1)):
# raise exception only if
# linked to an non-cancelled doc when deleting
# or linked to a submitted doc when cancelling
reference_doctype = refdoc.parenttype if meta.istable else df.parent
reference_docname = refdoc.parent if meta.istable else refdoc.name
at_position = "at Row: {0}".format(refdoc.idx) if meta.istable else ""
raise_link_exists_exception(doc, reference_doctype, reference_docname, at_position)
def raise_link_exists_exception(doc, reference_doctype, reference_docname, row=''):
doc_link = '<a href="#Form/{0}/{1}">{1}</a>'.format(doc.doctype, doc.name)
reference_link = '<a href="#Form/{0}/{1}">{1}</a>'.format(reference_doctype, reference_docname)
#hack to display Single doctype only once in message
if reference_doctype == reference_docname:
reference_doctype = ''
frappe.throw(_('Cannot delete or cancel because {0} {1} is linked with {2} {3} {4}')
.format(doc.doctype, doc_link, reference_doctype, reference_link, row), frappe.LinkExistsError)
def delete_dynamic_links(doctype, name):
delete_doc("ToDo", frappe.db.sql_list("""select name from `tabToDo`
where reference_type=%s and reference_name=%s""", (doctype, name)),
ignore_permissions=True, force=True)
frappe.db.sql('''delete from `tabEmail Unsubscribe`
where reference_doctype=%s and reference_name=%s''', (doctype, name))
# delete shares
frappe.db.sql("""delete from `tabDocShare`
where share_doctype=%s and share_name=%s""", (doctype, name))
# delete versions
frappe.db.sql('delete from tabVersion where ref_doctype=%s and docname=%s', (doctype, name))
# delete comments
frappe.db.sql("""delete from `tabCommunication`
where
communication_type = 'Comment'
and reference_doctype=%s and reference_name=%s""", (doctype, name))
# unlink communications
frappe.db.sql("""update `tabCommunication`
set reference_doctype=null, reference_name=null
where
communication_type = 'Communication'
and reference_doctype=%s
and reference_name=%s""", (doctype, name))
# unlink secondary references
frappe.db.sql("""update `tabCommunication`
set link_doctype=null, link_name=null
where link_doctype=%s and link_name=%s""", (doctype, name))
# unlink feed
frappe.db.sql("""update `tabCommunication`
set timeline_doctype=null, timeline_name=null
where timeline_doctype=%s and timeline_name=%s""", (doctype, name))
# unlink activity_log reference_doctype
frappe.db.sql("""update `tabActivity Log`
set reference_doctype=null, reference_name=null
where
reference_doctype=%s
and reference_name=%s""", (doctype, name))
# unlink activity_log timeline_doctype
frappe.db.sql("""update `tabActivity Log`
set timeline_doctype=null, timeline_name=null
where timeline_doctype=%s and timeline_name=%s""", (doctype, name))
def insert_feed(doc):
from frappe.utils import get_fullname
if frappe.flags.in_install or frappe.flags.in_import or getattr(doc, "no_feed_on_delete", False):
return
frappe.get_doc({
"doctype": "Communication",
"communication_type": "Comment",
"comment_type": "Deleted",
"reference_doctype": doc.doctype,
"subject": "{0} {1}".format(_(doc.doctype), doc.name),
"full_name": get_fullname(doc.owner)
}).insert(ignore_permissions=True)
|
|
# -*- coding: utf-8 -*-
from keras.engine import Layer, InputSpec
from keras import initializers, regularizers
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
import numpy as np
class BatchRenormalization(Layer):
"""Batch renormalization layer (Sergey Ioffe, 2017). Source code original
link: https://github.com/titu1994/BatchRenormalization
Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
# Arguments
epsilon: small float > 0. Fuzz parameter.
Theano expects epsilon >= 1e-5.
mode: integer, 0, 1 or 2.
- 0: feature-wise normalization.
Each feature map in the input will
be normalized separately. The axis on which
to normalize is specified by the `axis` argument.
Note that if the input is a 4D image tensor
using Theano conventions (samples, channels, rows, cols)
then you should set `axis` to `1` to normalize along
the channels axis.
During training and testing we use running averages
computed during the training phase to normalize the data
- 1: sample-wise normalization. This mode assumes a 2D input.
- 2: feature-wise normalization, like mode 0, but
using per-batch statistics to normalize the data during both
testing and training.
axis: integer, axis along which to normalize in mode 0. For instance,
if your input tensor has shape (samples, channels, rows, cols),
set axis to 1 to normalize per feature map (channels axis).
momentum: momentum in the computation of the
exponential average of the mean and standard deviation
of the data, for feature-wise normalization.
r_max_value: Upper limit of the value of r_max.
d_max_value: Upper limit of the value of d_max.
t_delta: At each iteration, increment the value of t by t_delta.
weights: Initialization weights.
List of 2 Numpy arrays, with shapes:
`[(input_shape,), (input_shape,)]`
Note that the order of this list is [gamma, beta, mean, std]
beta_init: name of initialization function for shift parameter
(see [initializers](../initializers.md)), or alternatively,
Theano/TensorFlow function to use for weights initialization.
This parameter is only relevant if you don't pass a `weights` argument.
gamma_init: name of initialization function for scale parameter (see
[initializers](../initializers.md)), or alternatively,
Theano/TensorFlow function to use for weights initialization.
This parameter is only relevant if you don't pass a `weights` argument.
gamma_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the gamma vector.
beta_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the beta vector.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
# References
- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99,
r_max_value=3., d_max_value=5., t_delta=1., weights=None, beta_init='zero',
gamma_init='one', gamma_regularizer=None, beta_regularizer=None,
**kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.mode = mode
self.axis = axis
self.momentum = momentum
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.initial_weights = weights
self.r_max_value = r_max_value
self.d_max_value = d_max_value
self.t_delta = t_delta
if self.mode == 0:
self.uses_learning_phase = True
super(BatchRenormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name))
self.beta = self.add_weight(shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name))
self.running_mean = self.add_weight(shape, initializer='zero',
name='{}_running_mean'.format(self.name),
trainable=False)
# Note: running_std actually holds the running variance, not the running std.
self.running_std = self.add_weight(shape, initializer='one',
name='{}_running_std'.format(self.name),
trainable=False)
self.r_max = K.variable(np.ones((1,)), name='{}_r_max'.format(self.name))
self.d_max = K.variable(np.zeros((1,)), name='{}_d_max'.format(self.name))
self.t = K.variable(np.zeros((1,)), name='{}_t'.format(self.name))
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
if self.mode == 0 or self.mode == 2:
assert self.built, 'Layer must be built before being called'
input_shape = K.int_shape(x)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
# mean_batch, var_batch = K.moments(x, reduction_axes, shift=None, keep_dims=False)
normed, mean_batch, var_batch = K.normalize_batch_in_training(
x, self.gamma, self.beta, reduction_axes,
epsilon=self.epsilon)
std_batch = (K.sqrt(var_batch + self.epsilon))
r_max_value = K.get_value(self.r_max)
r = std_batch / (K.sqrt(self.running_std + self.epsilon))
r = K.stop_gradient(K.clip(r, 1 / r_max_value, r_max_value))
d_max_value = K.get_value(self.d_max)
d = (mean_batch - self.running_mean) / K.sqrt(self.running_std + self.epsilon)
d = K.stop_gradient(K.clip(d, -d_max_value, d_max_value))
if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
x_normed_batch = (x - mean_batch) / std_batch
x_normed = (x_normed_batch * r + d) * self.gamma + self.beta
else:
# need broadcasting
broadcast_mean = K.reshape(mean_batch, broadcast_shape)
broadcast_std = K.reshape(std_batch, broadcast_shape)
broadcast_r = K.reshape(r, broadcast_shape)
broadcast_d = K.reshape(d, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
x_normed_batch = (x - broadcast_mean) / broadcast_std
x_normed = (x_normed_batch * broadcast_r + broadcast_d) * broadcast_gamma + broadcast_beta
# explicit update to moving mean and standard deviation
self.add_update([K.moving_average_update(self.running_mean, mean_batch, self.momentum),
K.moving_average_update(self.running_std, std_batch ** 2, self.momentum)], x)
# update r_max and d_max
t_val = K.get_value(self.t)
r_val = self.r_max_value / (1 + (self.r_max_value - 1) * np.exp(-t_val))
d_val = self.d_max_value / (1 + ((self.d_max_value / 1e-3) - 1) * np.exp(-(2 * t_val)))
t_val += float(self.t_delta)
self.add_update([K.update(self.r_max, r_val),
K.update(self.d_max, d_val),
K.update(self.t, t_val)], x)
if self.mode == 0:
if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
x_normed_running = K.batch_normalization(
x, self.running_mean, self.running_std,
self.beta, self.gamma,
epsilon=self.epsilon)
else:
# need broadcasting
broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
x_normed_running = K.batch_normalization(
x, broadcast_running_mean, broadcast_running_std,
broadcast_beta, broadcast_gamma,
epsilon=self.epsilon)
# pick the normalized form of x corresponding to the training phase
# for batch renormalization, inference time remains same as batchnorm
x_normed = K.in_train_phase(x_normed, x_normed_running)
elif self.mode == 1:
# sample-wise normalization
m = K.mean(x, axis=self.axis, keepdims=True)
std = K.sqrt(K.var(x, axis=self.axis, keepdims=True) + self.epsilon)
x_normed_batch = (x - m) / (std + self.epsilon)
r_max_value = K.get_value(self.r_max)
r = std / (self.running_std + self.epsilon)
r = K.stop_gradient(K.clip(r, 1 / r_max_value, r_max_value))
d_max_value = K.get_value(self.d_max)
d = (m - self.running_mean) / (self.running_std + self.epsilon)
d = K.stop_gradient(K.clip(d, -d_max_value, d_max_value))
x_normed = ((x_normed_batch * r) + d) * self.gamma + self.beta
# update r_max and d_max
t_val = K.get_value(self.t)
r_val = self.r_max_value / (1 + (self.r_max_value - 1) * np.exp(-t_val))
d_val = self.d_max_value / (1 + ((self.d_max_value / 1e-3) - 1) * np.exp(-(2 * t_val)))
t_val += float(self.t_delta)
self.add_update([K.update(self.r_max, r_val),
K.update(self.d_max, d_val),
K.update(self.t, t_val)], x)
return x_normed
def get_config(self):
config = {'epsilon': self.epsilon,
'mode': self.mode,
'axis': self.axis,
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'momentum': self.momentum,
'r_max_value': self.r_max_value,
'd_max_value': self.d_max_value,
't_delta': self.t_delta}
base_config = super(BatchRenormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
get_custom_objects().update({'BatchRenormalization': BatchRenormalization})
|
|
#
# pymatgen documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 15 00:13:52 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.dirname(".."))
sys.path.insert(0, os.path.dirname("../pymatgen"))
sys.path.insert(0, os.path.dirname("../.."))
from pymatgen.core import __author__, __file__, __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.linkcode", "sphinx.ext.mathjax"]
exclude_patterns = ["../**/tests*"]
exclude_dirnames = ["../**/tests*"]
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pymatgen"
copyright = "2011, " + __author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "pymatgendoc"
html_theme_options = {
"canonical_url": "https://pymatgen.org",
"logo_only": True,
"display_version": True,
"prev_next_buttons_location": None,
"style_external_links": True,
"style_nav_header_background": "linear-gradient(0deg, rgba(23,63,162,1) 0%, rgba(0,70,192,1) 100%)",
"collapse_navigation": True,
"sticky_navigation": True,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
html_context = {
"display_github": True,
"github_user": "materialsproject",
"github_repo": "pymatgen",
"github_version": "master",
"conf_py_path": "/docs_rst/",
}
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "pymatgen.tex", "pymatgen Documentation", __author__, "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "pymatgen", "pymatgen Documentation", [__author__], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"pymatgen",
"pymatgen Documentation",
__author__,
"pymatgen",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "pymatgen"
epub_author = __author__
epub_publisher = "Pymatgen Development Team"
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Resolve function for the linkcode extension.
# Thanks to https://github.com/Lasagne/Lasagne/blob/master/docs/conf.py
def linkcode_resolve(domain, info):
def find_source():
# try to find the file and line number, based on code from numpy:
# https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286
obj = sys.modules[info["module"]]
for part in info["fullname"].split("."):
obj = getattr(obj, part)
import inspect
import os
fn = inspect.getsourcefile(obj)
fn = os.path.relpath(fn, start=os.path.dirname(__file__))
source, lineno = inspect.getsourcelines(obj)
return fn, lineno, lineno + len(source) - 1
if domain != "py" or not info["module"]:
return None
try:
rel_path, line_start, line_end = find_source()
# __file__ is imported from pymatgen.core
filename = f"pymatgen/core/{rel_path}#L{line_start}-L{line_end}"
except:
# no need to be relative to core here as module includes full path.
filename = info["module"].replace(".", "/") + ".py"
tag = "v" + __version__
return f"https://github.com/materialsproject/pymatgen/blob/{tag}/{filename}"
|
|
"""
sentry.models.groupassignee
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import six
from collections import defaultdict
from django.conf import settings
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, sane_repr, \
BaseManager
from sentry.models.activity import Activity
from sentry.signals import issue_assigned
def get_user_project_ids(users):
"""
Given a list of users, return a dict where keys are user_ids
and values are a set of the project_ids the user is a member of
"""
from sentry.models import OrganizationMemberTeam, ProjectTeam
user_teams = list(OrganizationMemberTeam.objects.filter(
organizationmember__user__in=users,
is_active=True,
).values('organizationmember__user', 'team'))
# team_id to list of projects
projects_by_team = defaultdict(set)
for tp in ProjectTeam.objects.filter(team__in=[ut['team'] for ut in user_teams]):
projects_by_team[tp.team_id].add(tp.project_id)
# user_id to projects
projects_by_user = defaultdict(set)
for ut in user_teams:
projects_by_user[ut['organizationmember__user']].update(projects_by_team[ut['team']])
return projects_by_user
def sync_group_assignee_inbound(integration, email, external_issue_key, assign=True):
"""
Given an integration, user email address and an external issue key,
assign linked groups to matching users. Checks project membership.
Returns a list of groups that were successfully assigned.
"""
from sentry.models import Group, UserEmail, User
logger = logging.getLogger('sentry.integrations.%s' % integration.provider)
orgs_with_sync_enabled = []
for org_id in integration.organizations.values_list('id', flat=True):
installation = integration.get_installation(org_id)
if installation.should_sync('inbound_assignee'):
orgs_with_sync_enabled.append(org_id)
affected_groups = list(
Group.objects.get_groups_by_external_issue(
integration, external_issue_key,
).filter(project__organization_id__in=orgs_with_sync_enabled),
)
if not affected_groups:
return []
if not assign:
for group in affected_groups:
GroupAssignee.objects.deassign(group)
return affected_groups
users = {u.id: u for u in User.objects.filter(
id__in=UserEmail.objects.filter(
is_verified=True,
email=email,
).values_list('user_id', flat=True),
)}
projects_by_user = get_user_project_ids(users.values())
groups_assigned = []
for group in affected_groups:
try:
user_id = [
user_id for user_id, projects in projects_by_user.items()
if group.project_id in projects
][0]
except IndexError:
logger.info(
'assignee-not-found-inbound',
extra={
'integration_id': integration.id,
'email': email,
'issue_key': external_issue_key,
}
)
else:
user = users[user_id]
GroupAssignee.objects.assign(group, user)
groups_assigned.append(group)
return groups_assigned
def sync_group_assignee_outbound(group, user_id, assign=True):
from sentry.tasks.integrations import sync_assignee_outbound
from sentry.models import GroupLink
external_issue_ids = GroupLink.objects.filter(
project_id=group.project_id,
group_id=group.id,
linked_type=GroupLink.LinkedType.issue,
).values_list('linked_id', flat=True)
for external_issue_id in external_issue_ids:
sync_assignee_outbound.apply_async(
kwargs={
'external_issue_id': external_issue_id,
'user_id': user_id,
'assign': assign,
}
)
class GroupAssigneeManager(BaseManager):
def assign(self, group, assigned_to, acting_user=None):
from sentry import features
from sentry.models import User, Team, GroupSubscription, GroupSubscriptionReason
GroupSubscription.objects.subscribe_actor(
group=group,
actor=assigned_to,
reason=GroupSubscriptionReason.assigned,
)
if isinstance(assigned_to, User):
assignee_type = 'user'
other_type = 'team'
elif isinstance(assigned_to, Team):
assignee_type = 'team'
other_type = 'user'
else:
raise AssertionError('Invalid type to assign to: %r' % type(assigned_to))
now = timezone.now()
assignee, created = GroupAssignee.objects.get_or_create(
group=group,
defaults={
'project': group.project,
assignee_type: assigned_to,
'date_added': now,
}
)
if not created:
affected = GroupAssignee.objects.filter(
group=group,
).exclude(**{
assignee_type: assigned_to,
}).update(**{
assignee_type: assigned_to,
other_type: None,
'date_added': now,
})
else:
affected = True
issue_assigned.send(project=group.project, group=group, sender=acting_user)
if affected:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.ASSIGNED,
user=acting_user,
data={
'assignee': six.text_type(assigned_to.id),
'assigneeEmail': getattr(assigned_to, 'email', None),
'assigneeType': assignee_type,
},
)
activity.send_notification()
# sync Sentry assignee to external issues
if assignee_type == 'user' and features.has(
'organizations:internal-catchall', group.organization, actor=acting_user):
sync_group_assignee_outbound(group, assigned_to.id, assign=True)
def deassign(self, group, acting_user=None):
from sentry import features
affected = GroupAssignee.objects.filter(
group=group,
)[:1].count()
GroupAssignee.objects.filter(
group=group,
).delete()
if affected > 0:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.UNASSIGNED,
user=acting_user,
)
activity.send_notification()
# sync Sentry assignee to external issues
if features.has('organizations:internal-catchall',
group.organization, actor=acting_user):
sync_group_assignee_outbound(group, None, assign=False)
class GroupAssignee(Model):
"""
Identifies an assignment relationship between a user/team and an
aggregated event (Group).
"""
__core__ = False
objects = GroupAssigneeManager()
project = FlexibleForeignKey('sentry.Project', related_name="assignee_set")
group = FlexibleForeignKey('sentry.Group', related_name="assignee_set", unique=True)
user = FlexibleForeignKey(
settings.AUTH_USER_MODEL,
related_name="sentry_assignee_set",
null=True)
team = FlexibleForeignKey(
'sentry.Team',
related_name="sentry_assignee_set",
null=True)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupasignee'
__repr__ = sane_repr('group_id', 'user_id', 'team_id')
def save(self, *args, **kwargs):
assert (
not (self.user_id is not None and self.team_id is not None) and
not (self.user_id is None and self.team_id is None)
), 'Must have Team or User, not both'
super(GroupAssignee, self).save(*args, **kwargs)
def assigned_actor_id(self):
if self.user:
return u"user:{}".format(self.user_id)
if self.team:
return u"team:{}".format(self.team_id)
raise NotImplementedError("Unkown Assignee")
def assigned_actor(self):
from sentry.api.fields.actor import Actor
return Actor.from_actor_id(self.assigned_actor_id())
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
import webob
from jacket.api.compute.openstack.compute.legacy_v2 import server_metadata \
as server_metadata_v2
from jacket.api.compute.openstack.compute import server_metadata \
as server_metadata_v21
from jacket.compute.cloud import rpcapi as compute_rpcapi
from jacket.compute.cloud import vm_states
import jacket.db.compute
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit import fake_instance
CONF = cfg.CONF
def return_create_instance_metadata_max(context, server_id, metadata, delete):
return stub_max_server_metadata()
def return_create_instance_metadata(context, server_id, metadata, delete):
return stub_server_metadata()
def fake_instance_save(inst, **kwargs):
inst.metadata = stub_server_metadata()
inst.obj_reset_changes()
def return_server_metadata(context, server_id):
if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
msg = 'id %s must be a uuid in return server metadata' % server_id
raise Exception(msg)
return stub_server_metadata()
def return_empty_server_metadata(context, server_id):
return {}
def delete_server_metadata(context, server_id, key):
pass
def stub_server_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_max_server_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'metadata': stub_server_metadata(),
'vm_state': vm_states.ACTIVE})
def return_server_nonexistent(context, server_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=server_id)
def fake_change_instance_metadata(self, context, instance, diff):
pass
class ServerMetaDataTestV21(test.TestCase):
validation_ex = exception.ValidationError
validation_ex_large = validation_ex
def setUp(self):
super(ServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stub_out('jacket.compute.instance_get', return_server)
self.stub_out('jacket.compute.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('jacket.compute.instance_metadata_get',
return_server_metadata)
self.stubs.Set(compute_rpcapi.JacketAPI, 'change_instance_metadata',
fake_change_instance_metadata)
self._set_up_resources()
def _set_up_resources(self):
self.controller = server_metadata_v21.ServerMetadataController()
self.uuid = str(uuid.uuid4())
self.url = '/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequestV21.blank(self.url + param_url)
def test_index(self):
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_server(self):
self.stub_out('jacket.compute.instance_metadata_get',
return_server_nonexistent)
req = self._get_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stub_out('compute.db.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = self._get_request('/key2')
res_dict = self.controller.show(req, self.uuid, 'key2')
expected = {"meta": {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_server(self):
self.stub_out('jacket.compute.instance_metadata_get',
return_server_nonexistent)
req = self._get_request('/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key2')
def test_show_meta_not_found(self):
self.stub_out('compute.db.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key6')
def test_delete(self):
self.stub_out('jacket.compute.instance_metadata_get',
return_server_metadata)
self.stub_out('compute.db.instance_metadata_delete',
delete_server_metadata)
req = self._get_request('/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.uuid, 'key2')
self.assertIsNone(res)
def test_delete_nonexistent_server(self):
self.stub_out('compute.db.instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key1')
def test_delete_meta_not_found(self):
self.stub_out('jacket.compute.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key6')
def test_create(self):
self.stubs.Set(compute.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
res_dict = self.controller.create(req, self.uuid, body=body)
body['metadata'].update({
"key1": "value1",
"key2": "value2",
"key3": "value3",
})
self.assertEqual(body, res_dict)
def test_create_empty_body(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=None)
def test_create_item_empty_key(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_item_non_dict(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_item_key_too_long(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {("a" * 260): "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.create,
req, self.uuid, body=body)
def test_create_malformed_container(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_malformed_data(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"metadata": ['asdf']}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_nonexistent_server(self):
self.stub_out('jacket.compute.instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request()
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.uuid, body=body)
def test_update_metadata(self):
self.stubs.Set(compute.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dump_as_bytes(expected)
response = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, response)
def test_update_all(self):
self.stubs.Set(compute.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
},
}
req.body = jsonutils.dump_as_bytes(expected)
res_dict = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(compute.Instance, 'save', fake_instance_save)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dump_as_bytes(expected)
res_dict = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, res_dict)
def test_update_all_empty_body_item(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=None)
def test_update_all_with_non_dict_item(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=body)
def test_update_all_malformed_container(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=expected)
def test_update_all_malformed_data(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=expected)
def test_update_all_nonexistent_server(self):
self.stub_out('compute.db.instance_get', return_server_nonexistent)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
def test_update_all_non_dict(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex, self.controller.update_all,
req, self.uuid, body=body)
def test_update_item(self):
self.stubs.Set(compute.Instance, 'save', fake_instance_save)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.uuid, 'key1', body=body)
expected = {"meta": {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_server(self):
self.stub_out('jacket.compute.instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.uuid, 'key1',
body=body)
def test_update_item_empty_body(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=None)
def test_update_malformed_container(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'meta': {}}
req.body = jsonutils.dump_as_bytes(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=expected)
def test_update_malformed_data(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': ['asdf']}
req.body = jsonutils.dump_as_bytes(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=expected)
def test_update_item_empty_key(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, '',
body=body)
def test_update_item_key_too_long(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.update,
req, self.uuid, ("a" * 260), body=body)
def test_update_item_value_too_long(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.update,
req, self.uuid, "key1", body=body)
def test_update_item_too_many_keys(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=body)
def test_update_item_body_uri_mismatch(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.uuid, 'bad',
body=body)
def test_update_item_non_dict(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'bad',
body=body)
def test_update_empty_container(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': {}}
req.body = jsonutils.dump_as_bytes(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'bad',
body=expected)
def test_too_many_metadata_items_on_create(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, self.uuid, body=data)
def test_invalid_metadata_items_on_create(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
# test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.create, req, self.uuid, body=data)
# test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.create, req, self.uuid, body=data)
# test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=data)
def test_too_many_metadata_items_on_update_item(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all,
req, self.uuid, body=data)
def test_invalid_metadata_items_on_update_item(self):
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
self.stub_out('jacket.compute.instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes(data)
req.headers["content-type"] = "application/json"
# test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.update_all, req, self.uuid,
body=data)
# test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.update_all, req, self.uuid,
body=data)
# test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=data)
class ServerMetaDataTestV2(ServerMetaDataTestV21):
validation_ex = webob.exc.HTTPBadRequest
validation_ex_large = webob.exc.HTTPRequestEntityTooLarge
def _set_up_resources(self):
self.controller = server_metadata_v2.Controller()
self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequest.blank(self.url + param_url)
class BadStateServerMetaDataTestV21(test.TestCase):
def setUp(self):
super(BadStateServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stub_out('jacket.compute.instance_metadata_get',
return_server_metadata)
self.stubs.Set(compute_rpcapi.JacketAPI, 'change_instance_metadata',
fake_change_instance_metadata)
self.stub_out('jacket.compute.instance_get', self._return_server_in_build)
self.stub_out('compute.db.instance_get_by_uuid',
self._return_server_in_build_by_uuid)
self.stub_out('compute.db.instance_metadata_delete',
delete_server_metadata)
self._set_up_resources()
def _set_up_resources(self):
self.controller = server_metadata_v21.ServerMetadataController()
self.uuid = str(uuid.uuid4())
self.url = '/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequestV21.blank(self.url + param_url)
def test_invalid_state_on_delete(self):
req = self._get_request('/key2')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, self.uuid, 'key2')
def test_invalid_state_on_update_metadata(self):
self.stub_out('compute.db.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, body=expected)
def _return_server_in_build(self, context, server_id,
columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
def _return_server_in_build_by_uuid(self, context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
@mock.patch.object(jacket.compute.compute.api.API, 'update_instance_metadata',
side_effect=exception.InstanceIsLocked(instance_uuid=0))
def test_instance_lock_update_metadata(self, mock_update):
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'keydummy': 'newkey',
}
}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, body=expected)
class BadStateServerMetaDataTestV2(BadStateServerMetaDataTestV21):
def _set_up_resources(self):
self.controller = server_metadata_v2.Controller()
self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequest.blank(self.url + param_url)
class ServerMetaPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServerMetaPolicyEnforcementV21, self).setUp()
self.controller = server_metadata_v21.ServerMetadataController()
self.req = fakes.HTTPRequest.blank('')
def test_create_policy_failed(self):
rule_name = "os_compute_api:server-metadata:create"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID,
body={'metadata': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = "os_compute_api:server-metadata:index"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_policy_failed(self):
rule_name = "os_compute_api:server-metadata:update"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID,
body={'meta': {'fake_meta': 'fake_meta'}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_all_policy_failed(self):
rule_name = "os_compute_api:server-metadata:update_all"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update_all, self.req, fakes.FAKE_UUID,
body={'metadata': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = "os_compute_api:server-metadata:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:server-metadata:show"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.utils import six
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if not connection.alias in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if not srid in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
# Geodetic units.
geodetic_units = ('Decimal Degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection) in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
if isinstance(value, SQLEvaluator):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name):
super(GeometryField, self).contribute_to_class(cls, name)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class' : forms.GeometryField,
'geom_type' : self.geom_type,
'srid' : self.srid,
}
defaults.update(kwargs)
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
if lookup_type in connection.ops.gis_terms:
# special case for isnull lookup
if lookup_type == 'isnull':
return []
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if lookup_type in connection.ops.distance_functions:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, SQLEvaluator):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if value is None:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
description = _("Geometry collection")
|
|
'''
Methods which sonify annotations for "evaluation by ear".
All functions return a raw signal at the specified sampling rate.
'''
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy.interpolate import interp1d
from . import util
from . import chord
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal
def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None):
"""Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
Returns
-------
output : np.ndarray
synthesized version of the piano roll
"""
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
times = times[:gram.shape[1]]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
frequency = float(frequency)
# Generate ten periods at this frequency
n_samples = int(10.0 * fs / frequency)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# Sum into the aggregate output waveform
output[start:end] += wave[start:end] * gram[n, m]
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output
def pitch_contour(times, frequencies, fs, function=np.sin, length=None,
kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency estimator.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
# Sonify the waveform
return function(np.cumsum(f_est))
def chroma(chromagram, times, fs, **kwargs):
"""Reverse synthesis of a chromagram (semitone matrix)
Parameters
----------
chromagram : np.ndarray, shape=(12, times.shape[0])
Chromagram matrix, where each row represents a semitone [C->Bb]
i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to
``times[j + 1]``
times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2)
Either the start time of each column in the chromagram,
or the time interval corresponding to each column.
fs : int
Sampling rate to synthesize audio data at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chromagram
"""
# We'll just use time_frequency with a Shepard tone-gram
# To create the Shepard tone-gram, we copy the chromagram across 7 octaves
n_octaves = 7
# starting from C2
base_note = 24
# and weight each octave by a normal distribution
# The normal distribution has mean 72 (one octave above middle C)
# and std 6 (one half octave)
mean = 72
std = 6
notes = np.arange(12*n_octaves) + base_note
shepard_weight = np.exp(-(notes - mean)**2./(2.*std**2.))
# Copy the chromagram matrix vertically n_octaves times
gram = np.tile(chromagram.T, n_octaves).T
# This fixes issues if the supplied chromagram is int type
gram = gram.astype(float)
# Apply Sheppard weighting
gram *= shepard_weight.reshape(-1, 1)
# Compute frequencies
frequencies = 440.0*(2.0**((notes - 69)/12.0))
return time_frequency(gram, frequencies, times, fs, **kwargs)
def chords(chord_labels, intervals, fs, **kwargs):
"""Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels
"""
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs)
|
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
|
"""
raven.contrib.django.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Acts as an implicit hook for Django installs.
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from hashlib import md5
import logging
import sys
import warnings
from raven.utils import six
from django.conf import settings as django_settings
logger = logging.getLogger('sentry.errors.client')
def get_installed_apps():
"""
Modules in settings.INSTALLED_APPS as a set.
"""
return set(django_settings.INSTALLED_APPS)
_client = (None, None)
class ProxyClient(object):
"""
A proxy which represents the currenty client at all times.
"""
# introspection support:
__members__ = property(lambda x: x.__dir__())
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
__class__ = property(lambda x: get_client().__class__)
__dict__ = property(lambda o: get_client().__dict__)
__repr__ = lambda x: repr(get_client())
__getattr__ = lambda x, o: getattr(get_client(), o)
__setattr__ = lambda x, o, v: setattr(get_client(), o, v)
__delattr__ = lambda x, o: delattr(get_client(), o)
__lt__ = lambda x, o: get_client() < o
__le__ = lambda x, o: get_client() <= o
__eq__ = lambda x, o: get_client() == o
__ne__ = lambda x, o: get_client() != o
__gt__ = lambda x, o: get_client() > o
__ge__ = lambda x, o: get_client() >= o
if not six.PY3:
__cmp__ = lambda x, o: cmp(get_client(), o) # NOQA
__hash__ = lambda x: hash(get_client())
# attributes are currently not callable
# __call__ = lambda x, *a, **kw: get_client()(*a, **kw)
__nonzero__ = lambda x: bool(get_client())
__len__ = lambda x: len(get_client())
__getitem__ = lambda x, i: get_client()[i]
__iter__ = lambda x: iter(get_client())
__contains__ = lambda x, i: i in get_client()
__getslice__ = lambda x, i, j: get_client()[i:j]
__add__ = lambda x, o: get_client() + o
__sub__ = lambda x, o: get_client() - o
__mul__ = lambda x, o: get_client() * o
__floordiv__ = lambda x, o: get_client() // o
__mod__ = lambda x, o: get_client() % o
__divmod__ = lambda x, o: get_client().__divmod__(o)
__pow__ = lambda x, o: get_client() ** o
__lshift__ = lambda x, o: get_client() << o
__rshift__ = lambda x, o: get_client() >> o
__and__ = lambda x, o: get_client() & o
__xor__ = lambda x, o: get_client() ^ o
__or__ = lambda x, o: get_client() | o
__div__ = lambda x, o: get_client().__div__(o)
__truediv__ = lambda x, o: get_client().__truediv__(o)
__neg__ = lambda x: -(get_client())
__pos__ = lambda x: +(get_client())
__abs__ = lambda x: abs(get_client())
__invert__ = lambda x: ~(get_client())
__complex__ = lambda x: complex(get_client())
__int__ = lambda x: int(get_client())
if not six.PY3:
__long__ = lambda x: long(get_client()) # NOQA
__float__ = lambda x: float(get_client())
__str__ = lambda x: six.binary_type(get_client())
__unicode__ = lambda x: six.text_type(get_client())
__oct__ = lambda x: oct(get_client())
__hex__ = lambda x: hex(get_client())
__index__ = lambda x: get_client().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
client = ProxyClient()
def get_option(x, d=None):
options = getattr(django_settings, 'RAVEN_CONFIG', {})
return getattr(django_settings, 'SENTRY_%s' % x, options.get(x, d))
def get_client(client=None):
global _client
tmp_client = client is not None
if not tmp_client:
client = getattr(django_settings, 'SENTRY_CLIENT', 'raven.contrib.django.DjangoClient')
if _client[0] != client:
module, class_name = client.rsplit('.', 1)
ga = lambda x, d=None: getattr(django_settings, 'SENTRY_%s' % x, d)
options = getattr(django_settings, 'RAVEN_CONFIG', {})
options.setdefault('servers', ga('SERVERS'))
options.setdefault('include_paths', ga('INCLUDE_PATHS', []))
options['include_paths'] = set(options['include_paths']) | get_installed_apps()
options.setdefault('exclude_paths', ga('EXCLUDE_PATHS'))
options.setdefault('timeout', ga('TIMEOUT'))
options.setdefault('name', ga('NAME'))
options.setdefault('auto_log_stacks', ga('AUTO_LOG_STACKS'))
options.setdefault('key', ga('KEY', md5(django_settings.SECRET_KEY.encode('utf8')).hexdigest()))
options.setdefault('string_max_length', ga('MAX_LENGTH_STRING'))
options.setdefault('list_max_length', ga('MAX_LENGTH_LIST'))
options.setdefault('site', ga('SITE'))
options.setdefault('public_key', ga('PUBLIC_KEY'))
options.setdefault('secret_key', ga('SECRET_KEY'))
options.setdefault('project', ga('PROJECT'))
options.setdefault('processors', ga('PROCESSORS'))
options.setdefault('dsn', ga('DSN'))
options.setdefault('context', ga('CONTEXT'))
class_name = str(class_name)
instance = getattr(__import__(module, {}, {}, class_name), class_name)(**options)
if not tmp_client:
_client = (client, instance)
return instance
return _client[1]
def sentry_exception_handler(request=None, **kwargs):
exc_type = sys.exc_info()[0]
exclusions = set(get_option('IGNORE_EXCEPTIONS', ()))
exc_name = '%s.%s' % (exc_type.__module__, exc_type.__name__)
if exc_type.__name__ in exclusions or exc_name in exclusions or any(exc_name.startswith(e[:-1]) for e in exclusions if e.endswith('*')):
logger.info(
'Not capturing exception due to filters: %s', exc_type,
exc_info=sys.exc_info())
return
try:
client.captureException(exc_info=sys.exc_info(), request=request)
except Exception as exc:
try:
logger.exception('Unable to process log entry: %s' % (exc,))
except Exception as exc:
warnings.warn('Unable to process log entry: %s' % (exc,))
def register_handlers():
from django.core.signals import got_request_exception
# HACK: support Sentry's internal communication
if 'sentry' in django_settings.INSTALLED_APPS:
from django.db import transaction
@transaction.commit_on_success
def wrap_sentry(request, **kwargs):
if transaction.is_dirty():
transaction.rollback()
return sentry_exception_handler(request, **kwargs)
exception_handler = wrap_sentry
else:
exception_handler = sentry_exception_handler
# Connect to Django's internal signal handler
got_request_exception.connect(exception_handler, weak=False)
# If Celery is installed, register a signal handler
if 'djcelery' in django_settings.INSTALLED_APPS:
try:
# Celery < 2.5? is not supported
from raven.contrib.celery import (
register_signal, register_logger_signal)
except ImportError:
logger.exception('Failed to install Celery error handler')
else:
try:
register_signal(client)
except Exception:
logger.exception('Failed to install Celery error handler')
try:
register_logger_signal(client)
except Exception:
logger.exception('Failed to install Celery error handler')
def register_serializers():
# force import so serializers can call register
import raven.contrib.django.serializers # NOQA
if ('raven.contrib.django' in django_settings.INSTALLED_APPS
or 'raven.contrib.django.raven_compat' in django_settings.INSTALLED_APPS):
register_handlers()
register_serializers()
|
|
"""Test suite for abdi_processrepo."""
from __future__ import absolute_import
import contextlib
import types
import unittest
import phlcon_differential
import phlmail_mocksender
import phlsys_pluginmanager
import abdmail_mailer
import abdt_arcydreporter
import abdt_branchmock
import abdt_conduitmock
import abdt_exception
import abdt_repooptions
import abdt_reporeporter
import abdt_shareddictoutput
import abdi_processrepo
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [ A] processUpdateRepo can handle the case of no branches
# [ B] processUpdateRepo can create, update and land an uncomplicated review
# [ C] processUpdateRepo can handle a review without test plan
# [ D] processUpdateRepo can handle a review being closed unexpectedly
# [ E] processUpdateRepo can handle a review without initial valid base
# [ F] processUpdateRepo can handle a review without initial author
# [ G] processUpdateRepo can handle a review without commits on branch
# [ H] processUpdateRepo can abandon a review when the branch disappears
# [ I] processUpdateRepo can handle a review with merge conflicts
# [ J] processUpdateRepo can handle a diff that's too big
# [ K] processUpdateRepo can report an exception during branch processing
# [ B] processUpdateRepo doesn't leave the current branch set after processing
# [ L] processUpdateRepo can handle a branch with only empty commits
# [ M] processUpdateRepo won't emit errors in a cycle when landing w/o author
# [ ] processUpdateRepo can handle a review without commits in repo
# [ ] processUpdateRepo will comment on a bad branch if the error has changed
# -----------------------------------------------------------------------------
# Tests:
# [ A] test_A_Breathing
# [ B] test_B_Uncomplicated
# [ C] test_C_NoTestPlan
# [ D] test_D_UnexpectedClose
# [ E] test_E_InvalidBaseBranch
# [ F] test_F_NoInitialAuthor
# [ G] test_G_NoCommitsOnBranch
# [ H] test_H_AbandonRemovedBranch
# [ I] test_I_MergeConflicts
# [ J] test_J_DiffTooBig
# [ K] test_K_ExceptionDuringProcessing
# [ L] test_L_EmptyDiff
# [ M] test_M_NoLandingAuthor
# =============================================================================
class Test(unittest.TestCase):
def __init__(self, data):
super(Test, self).__init__(data)
self.conduit_data = None
self.conduit = None
self.mock_sender = None
self.mailer = None
self.plugin_manager = None
self.arcyd_reporter_data = None
self.arcyd_reporter = None
self.reporter = None
self.reporter_try = None
self.reporter_ok = None
def setUp(self):
self.conduit_data = abdt_conduitmock.ConduitMockData()
self.conduit = abdt_conduitmock.ConduitMock(self.conduit_data)
self.mock_sender = phlmail_mocksender.MailSender()
self.mailer = abdmail_mailer.Mailer(
self.mock_sender,
["admin@server.test"],
"http://server.fake/testrepo.git",
"http://phabricator.server.fake/")
self.plugin_manager = phlsys_pluginmanager.PluginManager([], [])
self.arcyd_reporter_data = {}
self.arcyd_reporter = abdt_arcydreporter.ArcydReporter(
abdt_shareddictoutput.ToDict(self.arcyd_reporter_data),
"arcyd@localhost")
def tearDown(self):
pass
def _process_branches(self, branches):
self.reporter_try = {}
self.reporter_ok = {}
config = abdt_repooptions.Data()
config.branch_url_format = (
'http://my.git/gitweb?p=r.git;a=log;h=refs/heads/{branch}')
config.review_url_format = 'http://my.phabricator/{review}'
self.reporter = abdt_reporeporter.RepoReporter(
self.arcyd_reporter,
'abdi_processrepo__t:Test repo:machine name',
'abdi_processrepo__t:Test repo',
'org/repo',
abdt_shareddictoutput.ToDict(self.reporter_try),
abdt_shareddictoutput.ToDict(self.reporter_ok))
self.reporter.set_config(config)
with contextlib.closing(self.reporter):
abdi_processrepo.process_branches(
branches,
self.conduit,
self.mailer,
self.plugin_manager,
self.reporter)
def test_A_Breathing(self):
self._process_branches([])
self.assertTrue(self.mock_sender.is_empty())
self.assertTrue(self.conduit_data.is_unchanged())
def test_B_Uncomplicated(self):
branch, branch_data = abdt_branchmock.create_simple_new_review()
self._process_branches([branch])
self.assertFalse(branch.is_status_bad())
self.assertTrue(self.mock_sender.is_empty())
self.assertFalse(self.conduit_data.is_unchanged())
self.assertEqual(len(self.conduit_data.revisions), 1)
self.assertFalse(
self.reporter_try[abdt_reporeporter.REPO_ATTRIB_STATUS_BRANCH])
self.conduit_data.accept_the_only_review()
self.conduit_data.set_unchanged()
self._process_branches([branch])
self.assertEqual(len(self.conduit_data.revisions), 1)
self.assertTrue(self.conduit_data.revisions[0].is_closed())
self.assertTrue(self.mock_sender.is_empty())
self.assertFalse(self.conduit_data.is_unchanged())
self.assertTrue(branch.is_new())
self.assertFalse(
self.reporter_try[abdt_reporeporter.REPO_ATTRIB_STATUS_BRANCH])
def test_C_NoTestPlan(self):
branch, branch_data = abdt_branchmock.create_simple_new_review()
def error_parse_commit_message(self, unused_message):
return phlcon_differential.ParseCommitMessageResponse(
fields=None, errors=["FAKE ERROR"])
regular_parse = self.conduit.parse_commit_message
self.conduit.parse_commit_message = types.MethodType(
error_parse_commit_message, self.conduit)
self._process_branches([branch])
self.assertEqual(len(self.conduit_data.revisions), 1)
self.assertFalse(self.conduit_data.revisions[0].is_closed())
self.assertTrue(self.mock_sender.is_empty())
self.assertFalse(self.conduit_data.is_unchanged())
self.assertTrue(branch.is_status_bad())
self.conduit.parse_commit_message = regular_parse
self.conduit_data.set_unchanged()
branch_data.has_new_commits = True
self.conduit_data.accept_the_only_review()
self._process_branches([branch])
self.assertEqual(len(self.conduit_data.revisions), 1)
self.assertTrue(self.conduit_data.revisions[0].is_closed())
self.assertTrue(self.mock_sender.is_empty())
self.assertFalse(self.conduit_data.is_unchanged())
def test_D_UnexpectedClose(self):
branch, branch_data = abdt_branchmock.create_simple_new_review()
self._process_branches([branch])
revision = self.conduit_data.get_revision(branch_data.revision_id)
revision.set_closed()
branch_data.has_new_commits = True
self._process_branches([branch])
self.assertTrue(branch.is_status_bad())
def test_E_InvalidBaseBranch(self):
# set base to invalid
branch, branch_data = abdt_branchmock.create_new_review_invalid_base()
self._process_branches([branch])
self.assertTrue(branch.is_status_bad())
# set base ok again
branch_data.is_base_ok = True
self._process_branches([branch])
self.assertFalse(branch.is_status_bad())
# set base bad again
branch_data.is_base_ok = False
branch_data.has_new_commits = True
self._process_branches([branch])
self.assertTrue(branch.is_status_bad())
def test_F_NoInitialAuthor(self):
branch, branch_data = abdt_branchmock.create_review_no_initial_author()
self._process_branches([branch])
self.assertTrue(branch.is_status_bad_pre_review())
# we must have sent a message to warn about the user
self.assertFalse(self.mock_sender.is_empty())
# no review will have been created
self.assertTrue(self.conduit_data.is_unchanged())
# fix the user details and process
branch_data.names_emails = abdt_branchmock.create_ok_names_emails()
branch_data.has_new_commits = True
self._process_branches([branch])
self.assertFalse(branch.is_status_bad())
# check that a review was created
self.assertFalse(self.conduit_data.is_unchanged())
self.assertEqual(len(self.conduit_data.revisions), 1)
def test_G_NoCommitsOnBranch(self):
branch, branch_data = abdt_branchmock.create_review_no_commits()
self._process_branches([branch])
self.assertTrue(branch.is_status_bad())
def test_H_AbandonRemovedBranch(self):
branch, branch_data = abdt_branchmock.create_review_removed()
self._process_branches([branch])
self.assertTrue(branch.is_null())
# TODO: should probably abandon the review too, if the branch goes
# self.assertTrue(
# self.conduit_data.get_the_only_revision().is_abandoned())
def test_I_MergeConflicts(self):
def error_land(self, unused_name, unused_email, unused_message):
raise abdt_exception.LandingException(
'landing exception',
'<review branch name>',
'<base branch name>')
# create review ok
branch, branch_data = abdt_branchmock.create_simple_new_review()
self._process_branches([branch])
# fail to land
old_land = branch.land
branch.land = types.MethodType(error_land, branch)
self.conduit_data.accept_the_only_review()
self._process_branches([branch])
self.assertTrue(branch.is_status_bad_land())
# fix the landing error
branch.land = old_land
branch_data.has_new_commits = True
# land ok
self.conduit_data.accept_the_only_review()
self._process_branches([branch])
self.assertTrue(branch.is_null())
def test_J_DiffTooBig(self):
def error_diff(self):
raise abdt_exception.LargeDiffException("diff too big", 100, 10)
# fail to create review
branch, branch_data = abdt_branchmock.create_simple_new_review()
old_diff = branch.make_raw_diff
branch.make_raw_diff = types.MethodType(error_diff, branch)
self._process_branches([branch])
self.assertFalse(branch.is_status_bad_pre_review())
self.assertFalse(branch.is_status_bad_land())
self.assertTrue(branch.is_status_bad())
# fix the large diff
branch.make_raw_diff = old_diff
branch_data.has_new_commits = True
# update the review ok
self._process_branches([branch])
self.assertFalse(branch.is_status_bad())
# land ok
self.conduit_data.accept_the_only_review()
self._process_branches([branch])
self.assertTrue(branch.is_null())
def test_K_ExceptionDuringProcessing(self):
class test_K_ExceptionDuringProcessing_Exception(Exception):
pass
def error_diff(self):
raise test_K_ExceptionDuringProcessing_Exception()
# fail to create review
branch, branch_data = abdt_branchmock.create_simple_new_review()
branch.make_raw_diff = types.MethodType(error_diff, branch)
# make sure it raises our exception
self.assertRaises(
test_K_ExceptionDuringProcessing_Exception,
self._process_branches,
[branch])
# make sure the current branch is set in the report
self.assertEqual(
self.reporter_try[abdt_reporeporter.REPO_ATTRIB_STATUS_BRANCH],
branch.review_branch_name())
def test_L_EmptyDiff(self):
# fail to create review with empty diff
branch, branch_data = abdt_branchmock.create_simple_new_review()
branch_data.raw_diff = ""
self._process_branches([branch])
self.assertFalse(branch.is_status_bad_pre_review())
self.assertFalse(branch.is_status_bad_land())
self.assertTrue(branch.is_status_bad())
# fix the empty diff
branch_data.raw_diff = "raw diff"
branch_data.has_new_commits = True
self._process_branches([branch])
self.assertFalse(branch.is_status_bad())
# empty diff again
branch_data.raw_diff = ""
branch_data.has_new_commits = True
self._process_branches([branch])
self.assertFalse(branch.is_status_bad_pre_review())
self.assertFalse(branch.is_status_bad_land())
self.assertTrue(branch.is_status_bad())
# fix the empty diff
branch_data.raw_diff = "raw diff2"
branch_data.has_new_commits = True
self._process_branches([branch])
self.assertFalse(branch.is_status_bad())
# land ok
self.conduit_data.accept_the_only_review()
self._process_branches([branch])
self.assertTrue(branch.is_null())
def test_M_NoLandingAuthor(self):
branch, branch_data = abdt_branchmock.create_simple_new_review()
self._process_branches([branch])
self.assertFalse(branch.is_status_bad())
# set bad email addresses and accept the review
branch_data.names_emails = abdt_branchmock.create_bad_names_emails()
branch_data.has_new_commits = True
self.conduit_data.accept_the_only_review()
self._process_branches([branch])
# ensure that the review is landed
self.assertTrue(branch.is_null())
# factors affecting a review:
# age of the revisions
# editing the review page
# new revisions on the review branch
# rewriting history on the review branch
# author names
# author accounts
# base branch
# availability of the git repo
# availability of the phabricator instance
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
from __future__ import unicode_literals
from collections import OrderedDict
import keyword
import re
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
db_module = 'django.db'
def add_arguments(self, parser):
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.')
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
strip_prefix = lambda s: s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
try:
constraints = connection.introspection.get_constraints(cursor, table_name)
except NotImplementedError:
constraints = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = OrderedDict() # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = "self" if relations[i][1] == table_name else table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
if field_type not in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name, constraints):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
for index, params in constraints.items():
if params['unique']:
columns = params['columns']
if len(columns) > 1:
# we do not want to include the u"" or u'' prefix
# so we build the string rather than interpolate the tuple
tup = '(' + ', '.join("'%s'" % c for c in columns) + ')'
unique_together.append(tup)
meta = ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
|
|
import keras
from keras.models import Model
from keras.models import Sequential
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import Cropping2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import Activation
from keras.layers import GlobalAveragePooling2D
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras import regularizers
from keras import backend as K
def basic_model(iw=500, # Input width
ih=500, # Input height
ic=3,
ow=100, # Output width
oh=100, # Output heigth
dropout=0.9,
alpha=0.0):
input_image = Input((iw, ih, ic))
x = Conv2D(16, (3, 3), activation="linear", padding="same", name="block_1_layer_1")(input_image)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(16, (3, 3), activation="linear", padding="same", name="block_1_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_1_pooling")(x)
x = Conv2D(32, (3, 3), activation="linear", padding="same", name="block_2_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(32, (3, 3), activation="linear", padding="same", name="block_2_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_2_pooling")(x)
x = Conv2D(64, (3, 3), activation="linear", padding="same", name="block_3_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(64, (3, 3), activation="linear", padding="same", name="block_3_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(64, (3, 3), activation="linear", padding="same", name="block_3_layer_3")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_3_pooling")(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_4_layer_1")(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_4_layer_2")(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_4_layer_3")(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_4_pooling")(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_5_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_5_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_5_layer_3")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_5_pooling")(x)
x = Conv2D(256, (3, 3), activation="linear", padding="same", name="block_7_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(256, (3, 3), activation="linear", padding="same", name="block_7_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(256, (3, 3), activation="linear", padding="same", name="block_7_layer_3")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_7_pooling")(x)
x = Flatten(name="flatten")(x)
x = Dense(4096, activation="linear", name="full_connected_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Dropout(dropout)(x)
x = Dense(4096, activation="linear", name="full_connected_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Dropout(dropout)(x)
x = Dense(ow*oh, activation="sigmoid", name="predictions")(x)
model = Model(input_image, x, name="vgg16_based")
model.compile(loss="binary_crossentropy",
optimizer="adadelta")
print("\n ---> Model summary <--- \n")
model.summary()
return model
def basic_model_pooling(iw=500, # Input width
ih=500, # Input height
ic=3,
ow=100, # Output width
oh=100, # Output heigth
dropout=0.9,
alpha=0.001):
input_image = Input((iw, ih, ic))
x = Conv2D(16, (3, 3), activation="linear", padding="same", name="block_1_layer_1")(input_image)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(16, (3, 3), activation="linear", padding="same", name="block_1_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_1_pooling")(x)
x = Conv2D(32, (3, 3), activation="linear", padding="same", name="block_2_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(32, (3, 3), activation="linear", padding="same", name="block_2_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_2_pooling")(x)
x = Conv2D(64, (3, 3), activation="linear", padding="same", name="block_3_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(64, (3, 3), activation="linear", padding="same", name="block_3_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(64, (3, 3), activation="linear", padding="same", name="block_3_layer_3")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_3_pooling")(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_4_layer_1")(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_4_layer_2")(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_4_layer_3")(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_4_pooling")(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_5_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_5_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(128, (3, 3), activation="linear", padding="same", name="block_5_layer_3")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name="block_5_pooling")(x)
x = Conv2D(256, (3, 3), activation="linear", padding="same", name="block_6_layer_1")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(512, (3, 3), activation="linear", padding="same", name="block_6_layer_2")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = Conv2D(ow*oh, (3, 3), activation="linear", padding="same", name="block_6_layer_3")(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha)(x)
x = GlobalAveragePooling2D()(x)
model = Model(input_image, x, name="vgg16_based")
model.compile(loss="binary_crossentropy",
optimizer="adadelta")
print("\n ---> Model summary <--- \n")
model.summary()
return model
def vgg16_32s_fcn(iw=500, # Input width
ih=500, # Input height
ic=3,
dropout=0.5,
alpha=0.001,
classes=2):
# Based on:
# Fully Convolutional Models for Semantic Segmentation
# Evan Shelhamer*, Jonathan Long*, Trevor Darrell
# PAMI 2016
# arXiv:1605.06211
reg_fun = regularizers.l2(alpha)
input_image = Input((iw, ih, ic))
# Conv 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv1')(input_image)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv2')(x)
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Conv 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv1')(pool1)
x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv2')(x)
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Conv 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv1')(pool2)
x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv3')(x)
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Conv 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv1')(pool3)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv3')(x)
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Conv 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv1')(pool4)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv3')(x)
pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
# Fully Conv fc6
fc6 = Conv2D(4096, (7, 7), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc6')(pool5)
drop6 = Dropout(rate=dropout)(fc6)
# Fully Conv fc7
fc7 = Conv2D(4096, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc7')(drop6)
drop7 = Dropout(rate=dropout)(fc7)
score_fr = Conv2D(classes, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='score_fr')(drop7)
upscore = Conv2DTranspose(classes, kernel_size=(64, 64), strides=(32, 32), kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='upscore')(score_fr)
_, uw, uh, uc = upscore._keras_shape
cw = (uw - iw)//2
ch = (uh - ih)//2
print("cw: " + str(cw))
print("ch: " + str(ch))
score = Cropping2D(cropping=(cw, ch))(upscore)
output = Activation('softmax')(score)
model = Model(input_image, output, name="vgg16_based")
return model
def vgg16_16s_fcn(iw=500, # Input width
ih=500, # Input height
ic=3,
dropout=0.5,
alpha=0.001,
classes=2):
# Based on:
# Fully Convolutional Models for Semantic Segmentation
# Evan Shelhamer*, Jonathan Long*, Trevor Darrell
# PAMI 2016
# arXiv:1605.06211
reg_fun = regularizers.l2(alpha)
input_image = Input((iw, ih, ic))
# Conv 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv1')(input_image)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block1_conv2')(x)
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Conv 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv1')(pool1)
x = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block2_conv2')(x)
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Conv 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv1')(pool2)
x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block3_conv3')(x)
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Conv 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv1')(pool3)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block4_conv3')(x)
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Conv 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv1')(pool4)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='block5_conv3')(x)
pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
# Fully Conv fc6
fc6 = Conv2D(4096, (7, 7), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc6')(pool5)
drop6 = Dropout(rate=dropout)(fc6)
# Fully Conv fc7
fc7 = Conv2D(4096, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='fc7')(drop6)
drop7 = Dropout(rate=dropout)(fc7)
score_fr = Conv2D(classes, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='score_fr')(drop7)
upscore2 = Conv2DTranspose(classes, kernel_size=(4, 4), strides=(2, 2), kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='upscore2')(score_fr)
score_pool4 = Conv2D(classes, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='score_pool4')(pool4)
_, uw, uh, uc = upscore2._keras_shape
_, sw, sh, sc = score_pool4._keras_shape
if ((uw - sw) == 1) or ((uh - sh) == 1):
cw1 = 1
ch1 = 1
cropping = ((cw1, 0),(ch1, 0))
else:
cw1 = (uw - sw)//2
ch1 = (uh - sh)//2
cropping = (cw1, ch1)
print("cw1: " + str(cw1))
print("ch1: " + str(ch1))
print("upscore2._keras_shape " + str(upscore2._keras_shape))
print("score_pool4._keras_shape " + str(score_pool4._keras_shape))
# Technically score_pool4 should have a larger size then upscore2.
# At least that is what follows from crop(n.score_pool4, n.upscore2).
# This is, however, not the case and we nned to crop upscore2.
score_pool4c = Cropping2D(cropping=cropping)(upscore2)
fuse_pool4 = keras.layers.Add()([score_pool4c, score_pool4])
upscore16 = Conv2DTranspose(classes, kernel_size=(32, 32), strides=(16, 16), kernel_regularizer=regularizers.l2(alpha), bias_regularizer=regularizers.l2(alpha), name='upscore16')(fuse_pool4)
_, uw, uh, uc = upscore16._keras_shape
cw2 = (uw - iw)//2
ch2 = (uh - ih)//2
#print("cw2: " + str(cw2))
#print("ch2: " + str(ch2))
score = Cropping2D(cropping=(cw2, ch2))(upscore16)
output = Activation('softmax')(score)
model = Model(input_image, output, name="vgg16_based")
return model
|
|
# Copyright 2018 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import yaml
from osc_lib.command import command
from osc_lib import utils
from tackerclient.common import exceptions
from tackerclient.i18n import _
from tackerclient.osc import sdk_utils
from tackerclient.osc import utils as tacker_osc_utils
from tackerclient.tacker import v1_0 as tackerV10
_attr_map = (
('id', 'ID', tacker_osc_utils.LIST_BOTH),
('name', 'Name', tacker_osc_utils.LIST_BOTH),
('template_source', 'Template_Source',
tacker_osc_utils.LIST_BOTH),
('description', 'Description', tacker_osc_utils.LIST_BOTH),
)
_NSD = 'nsd'
_formatters = {
'attributes': tacker_osc_utils.format_dict_with_indention,
}
def _get_columns(item):
column_map = {
'tenant_id': 'project_id',
}
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
class CreateNSD(command.ShowOne):
_description = _("Create a new NSD.")
def get_parser(self, prog_name):
parser = super(CreateNSD, self).get_parser(prog_name)
parser.add_argument(
'name', metavar='NAME',
help=_('Name for NSD'))
parser.add_argument(
'--tenant-id', metavar='TENANT_ID',
help=_('The owner tenant ID or project ID'))
parser.add_argument(
'--nsd-file',
required=True,
help=_('YAML file with NSD parameters'))
parser.add_argument(
'--description',
help=_('Set a description for the NSD'))
return parser
def args2body(self, parsed_args):
body = {_NSD: {}}
nsd = None
if not parsed_args.nsd_file:
raise exceptions.InvalidInput(reason="Invalid input for nsd file")
with open(parsed_args.nsd_file) as f:
nsd = f.read()
try:
nsd = yaml.load(nsd, Loader=yaml.SafeLoader)
except yaml.YAMLError as e:
raise exceptions.InvalidInput(reason=e)
if not nsd:
raise exceptions.InvalidInput(reason="nsd file is empty")
body[_NSD]['attributes'] = {'nsd': nsd}
tackerV10.update_dict(parsed_args, body[_NSD],
['tenant_id', 'name', 'description'])
return body
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
nsd = client.create_nsd(self.args2body(parsed_args))
display_columns, columns = _get_columns(nsd[_NSD])
nsd[_NSD]['attributes']['nsd'] = yaml.load(
nsd[_NSD]['attributes']['nsd'])
data = utils.get_item_properties(
sdk_utils.DictModel(nsd[_NSD]),
columns, formatters=_formatters)
return (display_columns, data)
class DeleteNSD(command.Command):
_description = _("Delete NSD(s).")
def get_parser(self, prog_name):
parser = super(DeleteNSD, self).get_parser(prog_name)
parser.add_argument(
_NSD,
metavar="<NSD>",
nargs="+",
help=_("NSD(s) to delete (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
failure = False
deleted_ids = []
failed_items = {}
for resource_id in parsed_args.nsd:
try:
obj = tackerV10.find_resourceid_by_name_or_id(
client, _NSD, resource_id)
client.delete_nsd(obj)
deleted_ids.append(resource_id)
except Exception as e:
failure = True
failed_items[resource_id] = e
if failure:
msg = ''
if deleted_ids:
msg = (_('Successfully deleted %(resource)s(s):'
' %(deleted_list)s') % {'deleted_list':
', '.join(deleted_ids),
'resource': _NSD})
err_msg = _("\n\nUnable to delete the below"
" %s(s):") % _NSD
for failed_id, error in failed_items.items():
err_msg += (_('\n Cannot delete %(failed_id)s: %(error)s')
% {'failed_id': failed_id,
'error': error})
msg += err_msg
raise exceptions.CommandError(message=msg)
else:
print((_('All specified %(resource)s(s) deleted successfully')
% {'resource': _NSD}))
return
class ListNSD(command.Lister):
_description = ("List (NSD)s that belong to a given tenant.")
def get_parser(self, prog_name):
parser = super(ListNSD, self).get_parser(prog_name)
parser.add_argument(
'--template-source',
help=_("List NSD with specified template source. Available \
options are 'onboared' (default), 'inline' or 'all'"),
action='store',
default='onboarded')
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
data = client.list_nsds()
headers, columns = tacker_osc_utils.get_column_definitions(
_attr_map, long_listing=None)
return (headers,
(utils.get_dict_properties(
s, columns,
) for s in data[_NSD + 's']))
class ShowNSD(command.ShowOne):
_description = _("Display NSD details")
def get_parser(self, prog_name):
parser = super(ShowNSD, self).get_parser(prog_name)
parser.add_argument(
_NSD,
metavar="<NSD>",
help=_("NSD to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
obj_id = tackerV10.find_resourceid_by_name_or_id(
client, _NSD, parsed_args.nsd)
obj = client.show_nsd(obj_id)
obj[_NSD]['attributes']['nsd'] = yaml.load(
obj[_NSD]['attributes']['nsd'])
display_columns, columns = _get_columns(obj[_NSD])
data = utils.get_item_properties(
sdk_utils.DictModel(obj[_NSD]),
columns,
formatters=_formatters)
return (display_columns, data)
class ShowTemplateNSD(command.ShowOne):
_description = _("Display NSD Template details")
def get_parser(self, prog_name):
parser = super(ShowTemplateNSD, self).get_parser(prog_name)
parser.add_argument(
_NSD,
metavar="<NSD>",
help=_("NSD to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
obj_id = tackerV10.find_resourceid_by_name_or_id(
client, _NSD, parsed_args.nsd)
obj = client.show_nsd(obj_id)
obj[_NSD]['attributes']['nsd'] = yaml.load(
obj[_NSD]['attributes']['nsd'])
data = utils.get_item_properties(
sdk_utils.DictModel(obj[_NSD]),
('attributes',),
formatters=_formatters)
data = (data or _('Unable to display NSD template!'))
return (('attributes',), data)
|
|
"""
Pure Python GeoIP API. The API is based off of U{MaxMind's C-based Python API<http://www.maxmind.com/app/python>},
but the code itself is based on the U{pure PHP5 API<http://pear.php.net/package/Net_GeoIP/>}
by Jim Winstead and Hans Lellelid.
It is mostly a drop-in replacement, except the
C{new} and C{open} methods are gone. You should instantiate the L{GeoIP} class yourself:
C{gi = GeoIP('/path/to/GeoIP.dat', pygeoip.MEMORY_CACHE)}
@author: Jennifer Ennis <zaylea at gmail dot com>
@license:
Copyright(C) 2004 MaxMind LLC
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.txt>.
"""
from __future__ import with_statement, absolute_import, division
import os
import math
import socket
import mmap
import gzip
import codecs
from StringIO import StringIO
from . import const
from .util import ip2long
from .timezone import time_zone_by_country_and_region
from . import six
MMAP_CACHE = const.MMAP_CACHE
MEMORY_CACHE = const.MEMORY_CACHE
STANDARD = const.STANDARD
class GeoIPError(Exception):
pass
class GeoIPMetaclass(type):
def __new__(cls, *args, **kwargs):
"""
Singleton method to gets an instance without reparsing the db. Unique
instances are instantiated based on the filename of the db. Flags are
ignored for this, i.e. if you initialize one with STANDARD flag (default)
and then try later to initialize with MEMORY_CACHE, it will still
return the STANDARD one.
"""
if not hasattr(cls, '_instances'):
cls._instances = {}
if len(args) > 0:
filename = args[0]
elif 'filename' in kwargs:
filename = kwargs['filename']
if not filename in cls._instances:
cls._instances[filename] = type.__new__(cls, *args, **kwargs)
return cls._instances[filename]
GeoIPBase = GeoIPMetaclass('GeoIPBase', (object,), {})
class GeoIP(GeoIPBase):
def __init__(self, filename, flags=0):
"""
Initialize the class.
@param filename: path to a geoip database. If MEMORY_CACHE is used,
the file can be gzipped.
@type filename: str
@param flags: flags that affect how the database is processed.
Currently the only supported flags are STANDARD (the default),
MEMORY_CACHE (preload the whole file into memory), and
MMAP_CACHE (access the file via mmap).
@type flags: int
"""
self._filename = filename
self._flags = flags
if self._flags & const.MMAP_CACHE:
with open(filename, 'rb') as f:
self._filehandle = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
elif self._flags & const.MEMORY_CACHE:
if filename.endswith('.gz'):
opener = gzip.open
else:
opener = open
with opener(filename, 'rb') as f:
self._memoryBuffer = f.read()
self._filehandle = StringIO(self._memoryBuffer)
else:
self._filehandle = codecs.open(filename, 'rb','latin_1')
self._setup_segments()
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is being used and setup
segment sizes and start points that will be used by the seek*() methods later.
"""
self._databaseType = const.COUNTRY_EDITION
self._recordLength = const.STANDARD_RECORD_LENGTH
filepos = self._filehandle.tell()
self._filehandle.seek(-3, os.SEEK_END)
for i in range(const.STRUCTURE_INFO_MAX_SIZE):
delim = self._filehandle.read(3)
if delim == six.u(chr(255) * 3):
self._databaseType = ord(self._filehandle.read(1))
if (self._databaseType >= 106):
# backwards compatibility with databases from April 2003 and earlier
self._databaseType -= 105
if self._databaseType == const.REGION_EDITION_REV0:
self._databaseSegments = const.STATE_BEGIN_REV0
elif self._databaseType == const.REGION_EDITION_REV1:
self._databaseSegments = const.STATE_BEGIN_REV1
elif self._databaseType in (const.CITY_EDITION_REV0,
const.CITY_EDITION_REV1,
const.ORG_EDITION,
const.ISP_EDITION,
const.ASNUM_EDITION):
self._databaseSegments = 0
buf = self._filehandle.read(const.SEGMENT_RECORD_LENGTH)
for j in range(const.SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
if self._databaseType in (const.ORG_EDITION, const.ISP_EDITION):
self._recordLength = const.ORG_RECORD_LENGTH
break
else:
self._filehandle.seek(-4, os.SEEK_CUR)
if self._databaseType == const.COUNTRY_EDITION:
self._databaseSegments = const.COUNTRY_BEGIN
self._filehandle.seek(filepos, os.SEEK_SET)
def _lookup_country_id(self, addr):
"""
Get the country index.
This method is called by the _lookupCountryCode and _lookupCountryName
methods. It looks up the index ('id') for the country which is the key
for the code and name.
@param addr: The IP address
@type addr: str
@return: network byte order 32-bit integer
@rtype: int
"""
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if self._databaseType != const.COUNTRY_EDITION:
raise GeoIPError('Invalid database type; country_* methods expect '\
'Country database')
return self._seek_country(ipnum) - const.COUNTRY_BEGIN
def _seek_country(self, ipnum):
"""
Using the record length and appropriate start points, seek to the
country that corresponds to the converted IP address integer.
@param ipnum: result of ip2long conversion
@type ipnum: int
@return: offset of start of record
@rtype: int
"""
offset = 0
for depth in range(31, -1, -1):
if self._flags & const.MEMORY_CACHE:
startIndex = 2 * self._recordLength * offset
length = 2 * self._recordLength
endIndex = startIndex + length
buf = self._memoryBuffer[startIndex:endIndex]
else:
self._filehandle.seek(2 * self._recordLength * offset, os.SEEK_SET)
buf = self._filehandle.read(2 * self._recordLength)
x = [0,0]
for i in range(2):
for j in range(self._recordLength):
x[i] += ord(buf[self._recordLength * i + j]) << (j * 8)
if ipnum & (1 << depth):
if x[1] >= self._databaseSegments:
return x[1]
offset = x[1]
else:
if x[0] >= self._databaseSegments:
return x[0]
offset = x[0]
raise Exception('Error traversing database - perhaps it is corrupt?')
def _get_org(self, ipnum):
"""
Seek and return organization (or ISP) name for converted IP addr.
@param ipnum: Converted IP address
@type ipnum: int
@return: org/isp name
@rtype: str
"""
seek_org = self._seek_country(ipnum)
if seek_org == self._databaseSegments:
return None
record_pointer = seek_org + (2 * self._recordLength - 1) * self._databaseSegments
self._filehandle.seek(record_pointer, os.SEEK_SET)
org_buf = self._filehandle.read(const.MAX_ORG_RECORD_LENGTH)
return org_buf[:org_buf.index(chr(0))]
def _get_region(self, ipnum):
"""
Seek and return the region info (dict containing country_code and region_name).
@param ipnum: converted IP address
@type ipnum: int
@return: dict containing country_code and region_name
@rtype: dict
"""
country_code = ''
region = ''
if self._databaseType == const.REGION_EDITION_REV0:
seek_country = self._seek_country(ipnum)
seek_region = seek_country - const.STATE_BEGIN_REV0
if seek_region >= 1000:
country_code = 'US'
region = ''.join([chr((seek_region // 1000) // 26 + 65), chr((seek_region // 1000) % 26 + 65)])
else:
country_code = const.COUNTRY_CODES[seek_region]
region = ''
elif self._databaseType == const.REGION_EDITION_REV1:
seek_country = self._seek_country(ipnum)
seek_region = seek_country - const.STATE_BEGIN_REV1
if seek_region < const.US_OFFSET:
country_code = '';
region = ''
elif seek_region < const.CANADA_OFFSET:
country_code = 'US'
region = ''.join([chr((seek_region - const.US_OFFSET) // 26 + 65), chr((seek_region - const.US_OFFSET) % 26 + 65)])
elif seek_region < const.WORLD_OFFSET:
country_code = 'CA'
region = ''.join([chr((seek_region - const.CANADA_OFFSET) // 26 + 65), chr((seek_region - const.CANADA_OFFSET) % 26 + 65)])
else:
i = (seek_region - const.WORLD_OFFSET) // const.FIPS_RANGE
if i < len(const.COUNTRY_CODES):
#country_code = const.COUNTRY_CODES[(seek_region - const.WORLD_OFFSET) // const.FIPS_RANGE]
country_code = const.COUNTRY_CODES[i]
else:
country_code = ''
region = ''
elif self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
rec = self._get_record(ipnum)
country_code = rec['country_code'] if 'country_code' in rec else ''
region = rec['region_name'] if 'region_name' in rec else ''
return {'country_code' : country_code, 'region_name' : region }
def _get_record(self, ipnum):
"""
Populate location dict for converted IP.
@param ipnum: converted IP address
@type ipnum: int
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_code, region_name, time_zone
@rtype: dict
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
return None
record_pointer = seek_country + (2 * self._recordLength - 1) * self._databaseSegments
self._filehandle.seek(record_pointer, os.SEEK_SET)
record_buf = self._filehandle.read(const.FULL_RECORD_LENGTH)
record = {}
record_buf_pos = 0
char = ord(record_buf[record_buf_pos])
#char = record_buf[record_buf_pos] if six.PY3 else ord(record_buf[record_buf_pos])
record['country_code'] = const.COUNTRY_CODES[char]
record['country_code3'] = const.COUNTRY_CODES3[char]
record['country_name'] = const.COUNTRY_NAMES[char]
record_buf_pos += 1
str_length = 0
# get region
char = ord(record_buf[record_buf_pos+str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos+str_length])
if str_length > 0:
record['region_name'] = record_buf[record_buf_pos:record_buf_pos+str_length]
record_buf_pos += str_length + 1
str_length = 0
# get city
char = ord(record_buf[record_buf_pos+str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos+str_length])
if str_length > 0:
record['city'] = record_buf[record_buf_pos:record_buf_pos+str_length]
else:
record['city'] = ''
record_buf_pos += str_length + 1
str_length = 0
# get the postal code
char = ord(record_buf[record_buf_pos+str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos+str_length])
if str_length > 0:
record['postal_code'] = record_buf[record_buf_pos:record_buf_pos+str_length]
else:
record['postal_code'] = None
record_buf_pos += str_length + 1
str_length = 0
latitude = 0
longitude = 0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
latitude += (char << (j * 8))
record['latitude'] = (latitude/10000.0) - 180.0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
longitude += (char << (j * 8))
record['longitude'] = (longitude/10000.0) - 180.0
if self._databaseType == const.CITY_EDITION_REV1:
dmaarea_combo = 0
if record['country_code'] == 'US':
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
dmaarea_combo += (char << (j*8))
record['dma_code'] = int(math.floor(dmaarea_combo/1000))
record['area_code'] = dmaarea_combo%1000
else:
record['dma_code'] = 0
record['area_code'] = 0
if 'dma_code' in record and record['dma_code'] in const.DMA_MAP:
record['metro_code'] = const.DMA_MAP[record['dma_code']]
else:
record['metro_code'] = ''
if 'country_code' in record:
record['time_zone'] = time_zone_by_country_and_region(
record['country_code'], record.get('region_name')) or ''
else:
record['time_zone'] = ''
return record
def country_code_by_addr(self, addr):
"""
Returns 2-letter country code (e.g. 'US') for specified IP address.
Use this method if you have a Country, Region, or City database.
@param addr: IP address
@type addr: str
@return: 2-letter country code
@rtype: str
"""
try:
if self._databaseType == const.COUNTRY_EDITION:
country_id = self._lookup_country_id(addr)
return const.COUNTRY_CODES[country_id]
elif self._databaseType in (const.REGION_EDITION_REV0, const.REGION_EDITION_REV1,
const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
return self.region_by_addr(addr)['country_code']
else:
raise GeoIPError('Invalid database type; country_* methods expect '\
'Country, City, or Region database')
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def country_code_by_name(self, hostname):
"""
Returns 2-letter country code (e.g. 'US') for specified hostname.
Use this method if you have a Country, Region, or City database.
@param hostname: host name
@type hostname: str
@return: 2-letter country code
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.country_code_by_addr(addr)
def country_name_by_addr(self, addr):
"""
Returns full country name for specified IP address.
Use this method if you have a Country or City database.
@param addr: IP address
@type addr: str
@return: country name
@rtype: str
"""
try:
if self._databaseType == const.COUNTRY_EDITION:
country_id = self._lookup_country_id(addr)
return const.COUNTRY_NAMES[country_id]
elif self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
return self.record_by_addr(addr)['country_name']
else:
raise GeoIPError('Invalid database type; country_* methods expect '\
'Country or City database')
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def country_name_by_name(self, hostname):
"""
Returns full country name for specified hostname.
Use this method if you have a Country database.
@param hostname: host name
@type hostname: str
@return: country name
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.country_name_by_addr(addr)
def org_by_addr(self, addr):
"""
Lookup the organization (or ISP) for given IP address.
Use this method if you have an Organization/ISP database.
@param addr: IP address
@type addr: str
@return: organization or ISP name
@rtype: str
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if self._databaseType not in (const.ORG_EDITION, const.ISP_EDITION, const.ASNUM_EDITION):
raise GeoIPError('Invalid database type; org_* methods expect '\
'Org/ISP database')
return self._get_org(ipnum)
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def org_by_name(self, hostname):
"""
Lookup the organization (or ISP) for hostname.
Use this method if you have an Organization/ISP database.
@param hostname: host name
@type hostname: str
@return: organization or ISP name
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.org_by_addr(addr)
def record_by_addr(self, addr):
"""
Look up the record for a given IP address.
Use this method if you have a City database.
@param addr: IP address
@type addr: str
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_code, region_name, time_zone
@rtype: dict
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if not self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; record_* methods expect City database')
return self._get_record(ipnum)
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def record_by_name(self, hostname):
"""
Look up the record for a given hostname.
Use this method if you have a City database.
@param hostname: host name
@type hostname: str
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_code, region_name, time_zone
@rtype: dict
"""
addr = socket.gethostbyname(hostname)
return self.record_by_addr(addr)
def region_by_addr(self, addr):
"""
Lookup the region for given IP address.
Use this method if you have a Region database.
@param addr: IP address
@type addr: str
@return: dict containing country_code, region,
and region_name
@rtype: dict
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if not self._databaseType in (const.REGION_EDITION_REV0, const.REGION_EDITION_REV1,
const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; region_* methods expect '\
'Region or City database')
return self._get_region(ipnum)
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def region_by_name(self, hostname):
"""
Lookup the region for given hostname.
Use this method if you have a Region database.
@param hostname: host name
@type hostname: str
@return: dict containing country_code, region,
and region_name
@rtype: dict
"""
addr = socket.gethostbyname(hostname)
return self.region_by_addr(addr)
def time_zone_by_addr(self, addr):
"""
Look up the time zone for a given IP address.
Use this method if you have a Region or City database.
@param hostname: IP address
@type hostname: str
@return: Time zone
@rtype: str
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if not self._databaseType in (const.REGION_EDITION_REV0, const.REGION_EDITION_REV1,
const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; region_* methods expect '\
'Region or City database')
return self._get_record(ipnum)['time_zone']
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def time_zone_by_name(self, hostname):
"""
Look up the time zone for a given hostname.
Use this method if you have a Region or City database.
@param hostname: host name
@type hostname: str
@return: Time zone
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.time_zone_by_addr(addr)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pymatgen.io.qchem.sets import *
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.qchem.inputs import QCInput
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
test_dir = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "..", 'test_files',
"molecules")
class QChemDictSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_DictSet = QChemDictSet(
molecule=test_molecule,
job_type='opt',
basis_set='6-31G*',
scf_algorithm='diis')
self.assertEqual(
test_DictSet.rem, {
'job_type': 'opt',
'gen_scfman': 'true',
'basis': '6-31g*',
'max_scf_cycles': 200,
'method': 'wb97xv',
'scf_algorithm': 'diis',
'xc_grid': '3',
'geom_opt_max_cycles': 200,
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_DictSet.pcm, {})
self.assertEqual(test_DictSet.solvent, {})
self.assertEqual(test_DictSet.smx, {})
self.assertEqual(test_DictSet.molecule, test_molecule)
def test_full_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_DictSet = QChemDictSet(
molecule=test_molecule,
job_type='opt',
basis_set='6-31g*',
scf_algorithm='diis',
dft_rung=1,
pcm_dielectric=10.0,
max_scf_cycles=35)
self.assertEqual(
test_DictSet.rem, {
'job_type': 'opt',
'gen_scfman': 'true',
'basis': '6-31g*',
'max_scf_cycles': 35,
'method': 'b3lyp',
'geom_opt_max_cycles': 200,
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'pcm',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(
test_DictSet.pcm, {
'heavypoints': '194',
'hpoints': '194',
'radii': 'uff',
'theory': 'cpcm',
'vdwscale': '1.1'
})
self.assertEqual(test_DictSet.solvent, {'dielectric': 10.0})
self.assertEqual(test_DictSet.molecule, test_molecule)
test_DictSet = QChemDictSet(
molecule=test_molecule,
job_type='opt',
basis_set='6-31g*',
scf_algorithm='diis',
dft_rung=1,
smd_solvent='water',
max_scf_cycles=35)
self.assertEqual(
test_DictSet.rem, {
'job_type': 'opt',
'gen_scfman': 'true',
'basis': '6-31g*',
'max_scf_cycles': 35,
'method': 'b3lyp',
'geom_opt_max_cycles': 200,
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'smd',
'ideriv': '1',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_DictSet.smx, {'solvent': 'water'})
def test_overwrite_input(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
overwrite_inputs = {
"rem": {
'method': 'b3lyp',
'basis': '6-31g*',
'thresh': 10,
"xc_grid": "000150000302"
}
}
test_OptSet = OptSet(
molecule=test_molecule, overwrite_inputs=overwrite_inputs)
act_rem = {
'job_type': 'opt',
'gen_scfman': 'true',
'basis': '6-31g*',
'max_scf_cycles': 200,
'method': 'b3lyp',
'scf_algorithm': 'diis',
'xc_grid': '000150000302',
'geom_opt_max_cycles': 200,
'thresh': 10,
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
}
self.assertDictEqual(act_rem, test_OptSet.rem)
def test_double_solvation(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
raised_error = False
dict_set = None
try:
dict_set = QChemDictSet(molecule=test_molecule,
job_type='opt',
basis_set='6-31g*',
scf_algorithm='diis',
dft_rung=1,
pcm_dielectric=10.0,
smd_solvent="water",
max_scf_cycles=35)
except ValueError:
raised_error = True
self.assertTrue(raised_error)
self.assertEqual(dict_set, None)
def test_pcm_write(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
dict_set = QChemDictSet(molecule=test_molecule,
job_type='opt',
basis_set='6-31g*',
scf_algorithm='diis',
dft_rung=5,
pcm_dielectric=10.0,
max_scf_cycles=35)
dict_set.write("mol.qin")
test_dict = QCInput.from_file("mol.qin").as_dict()
rem = {
"job_type": "opt",
"basis": "6-31G*",
"max_scf_cycles": '35',
"method": "wb97mv",
"geom_opt_max_cycles": '200',
"gen_scfman": 'true',
"scf_algorithm": "diis",
"xc_grid": '3',
"solvent_method": "pcm",
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
}
pcm = {
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1"
}
qc_input = QCInput(molecule=test_molecule, rem=rem, pcm=pcm, solvent={"dielectric": "10.0"})
for k, v in qc_input.as_dict().items():
self.assertEqual(v, test_dict[k])
os.remove("mol.qin")
def test_smd_write(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
dict_set = QChemDictSet(molecule=test_molecule,
job_type='opt',
basis_set='6-31g*',
scf_algorithm='diis',
dft_rung=5,
smd_solvent="water",
max_scf_cycles=35)
dict_set.write("mol.qin")
test_dict = QCInput.from_file("mol.qin").as_dict()
rem = {
"job_type": "opt",
"basis": "6-31G*",
"max_scf_cycles": '35',
"method": "wb97mv",
"geom_opt_max_cycles": '200',
"gen_scfman": 'true',
"scf_algorithm": "diis",
"xc_grid": '3',
"solvent_method": "smd",
"ideriv": "1",
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
}
qc_input = QCInput(molecule=test_molecule, rem=rem, smx={"solvent": "water"})
for k, v in qc_input.as_dict().items():
self.assertEqual(v, test_dict[k])
os.remove("mol.qin")
def test_custom_smd_write(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
dict_set = QChemDictSet(molecule=test_molecule,
job_type='opt',
basis_set='6-31g*',
scf_algorithm='diis',
dft_rung=5,
smd_solvent="custom",
custom_smd="90.00,1.415,0.00,0.735,20.2,0.00,0.00",
max_scf_cycles=35)
dict_set.write("mol.qin")
test_dict = QCInput.from_file("mol.qin").as_dict()
rem = {
"job_type": "opt",
"basis": "6-31G*",
"max_scf_cycles": '35',
"method": "wb97mv",
"geom_opt_max_cycles": '200',
"gen_scfman": 'true',
"scf_algorithm": "diis",
"xc_grid": '3',
"solvent_method": "smd",
"ideriv": "1",
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
}
qc_input = QCInput(molecule=test_molecule, rem=rem, smx={"solvent": "other"})
for k, v in qc_input.as_dict().items():
self.assertEqual(v, test_dict[k])
os.remove("mol.qin")
with open("solvent_data") as sd:
lines = sd.readlines()
self.assertEqual(lines[0], "90.00,1.415,0.00,0.735,20.2,0.00,0.00")
os.remove("solvent_data")
class OptSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_OptSet = OptSet(molecule=test_molecule)
self.assertEqual(
test_OptSet.rem, {
'job_type': 'opt',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'scf_algorithm': 'diis',
'xc_grid': '3',
'geom_opt_max_cycles': 200,
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_OptSet.pcm, {})
self.assertEqual(test_OptSet.solvent, {})
self.assertEqual(test_OptSet.smx, {})
self.assertEqual(test_OptSet.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_OptSet = OptSet(molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_OptSet.rem, {
'job_type': 'opt',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'geom_opt_max_cycles': 200,
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'pcm',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(
test_OptSet.pcm, {
'heavypoints': '194',
'hpoints': '194',
'radii': 'uff',
'theory': 'cpcm',
'vdwscale': '1.1'
})
self.assertEqual(test_OptSet.solvent, {'dielectric': 10.0})
self.assertEqual(test_OptSet.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_OptSet = OptSet(molecule=test_molecule, smd_solvent='water')
self.assertEqual(
test_OptSet.rem, {
'job_type': 'opt',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'geom_opt_max_cycles': 200,
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'smd',
'ideriv': '1',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_OptSet.smx, {'solvent': 'water'})
self.assertEqual(test_OptSet.molecule, test_molecule)
class SinglePointSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_SPSet = SinglePointSet(molecule=test_molecule)
self.assertEqual(
test_SPSet.rem, {
'job_type': 'sp',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'scf_algorithm': 'diis',
'xc_grid': '3',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_SPSet.pcm, {})
self.assertEqual(test_SPSet.solvent, {})
self.assertEqual(test_SPSet.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_SPSet = SinglePointSet(
molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_SPSet.rem, {
'job_type': 'sp',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'pcm',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(
test_SPSet.pcm, {
'heavypoints': '194',
'hpoints': '194',
'radii': 'uff',
'theory': 'cpcm',
'vdwscale': '1.1'
})
self.assertEqual(test_SPSet.solvent, {'dielectric': 10.0})
self.assertEqual(test_SPSet.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_SPSet = SinglePointSet(molecule=test_molecule, smd_solvent='water')
self.assertEqual(
test_SPSet.rem, {
'job_type': 'sp',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'smd',
'ideriv': '1',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_SPSet.smx, {'solvent': 'water'})
self.assertEqual(test_SPSet.molecule, test_molecule)
class FreqSetTest(PymatgenTest):
def test_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_FreqSet = FreqSet(molecule=test_molecule)
self.assertEqual(
test_FreqSet.rem, {
'job_type': 'freq',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'scf_algorithm': 'diis',
'xc_grid': '3',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_FreqSet.pcm, {})
self.assertEqual(test_FreqSet.solvent, {})
self.assertEqual(test_FreqSet.molecule, test_molecule)
def test_pcm_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_FreqSet = FreqSet(molecule=test_molecule, pcm_dielectric=10.0)
self.assertEqual(
test_FreqSet.rem, {
'job_type': 'freq',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'pcm',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(
test_FreqSet.pcm, {
'heavypoints': '194',
'hpoints': '194',
'radii': 'uff',
'theory': 'cpcm',
'vdwscale': '1.1'
})
self.assertEqual(test_FreqSet.solvent, {'dielectric': 10.0})
self.assertEqual(test_FreqSet.molecule, test_molecule)
def test_smd_init(self):
test_molecule = QCInput.from_file(
os.path.join(test_dir, "new_qchem_files/pcm.qin")).molecule
test_FreqSet = FreqSet(molecule=test_molecule, smd_solvent='water')
self.assertEqual(
test_FreqSet.rem, {
'job_type': 'freq',
'gen_scfman': 'true',
'basis': 'def2-tzvppd',
'max_scf_cycles': 200,
'method': 'wb97xd',
'scf_algorithm': 'diis',
'xc_grid': '3',
'solvent_method': 'smd',
'ideriv': '1',
'symmetry': 'false',
'sym_ignore': 'true',
'resp_charges': 'true'
})
self.assertEqual(test_FreqSet.smx, {'solvent': 'water'})
self.assertEqual(test_FreqSet.molecule, test_molecule)
if __name__ == '__main__':
unittest.main()
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/glxext.h
Generated by tools/gengl.py.
Do not modify this file.
"""
import ctypes
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
from pyglet.gl.lib import c_ptrdiff_t
if not hasattr(ctypes, 'c_int64'):
# TODO: TODO completely wrong, but at least can import.
# Can c_longlong still be used?
c_int64 = c_long
c_uint64 = c_ulong
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://www.opengl.org/registry/api/glxext.h
import pyglet.libs.x11.xlib
import pyglet.gl.glx
# H (/usr/include/GL/glx.h:26)
# ARB_get_proc_address (/usr/include/GL/glx.h:317)
# GLXEXT_LEGACY (/usr/include/GL/glx.h:334)
GLX_GLXEXT_VERSION = 32 # GL/glxext.h:53
# VERSION_1_3 (GL/glxext.h:55)
# VERSION_1_4 (GL/glxext.h:114)
# ARB_get_proc_address (GL/glxext.h:119)
# ARB_multisample (GL/glxext.h:122)
GLX_SAMPLE_BUFFERS_ARB = 100000 # GL/glxext.h:123
GLX_SAMPLES_ARB = 100001 # GL/glxext.h:124
# ARB_vertex_buffer_object (GL/glxext.h:127)
GLX_CONTEXT_ALLOW_BUFFER_BYTE_ORDER_MISMATCH_ARB = 8341 # GL/glxext.h:128
# ARB_fbconfig_float (GL/glxext.h:131)
GLX_RGBA_FLOAT_TYPE_ARB = 8377 # GL/glxext.h:132
GLX_RGBA_FLOAT_BIT_ARB = 4 # GL/glxext.h:133
# ARB_framebuffer_sRGB (GL/glxext.h:136)
GLX_FRAMEBUFFER_SRGB_CAPABLE_ARB = 8370 # GL/glxext.h:137
# ARB_create_context (GL/glxext.h:140)
GLX_CONTEXT_DEBUG_BIT_ARB = 1 # GL/glxext.h:141
GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB = 2 # GL/glxext.h:142
GLX_CONTEXT_MAJOR_VERSION_ARB = 8337 # GL/glxext.h:143
GLX_CONTEXT_MINOR_VERSION_ARB = 8338 # GL/glxext.h:144
GLX_CONTEXT_FLAGS_ARB = 8340 # GL/glxext.h:145
# ARB_create_context_profile (GL/glxext.h:148)
GLX_CONTEXT_CORE_PROFILE_BIT_ARB = 1 # GL/glxext.h:149
GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB = 2 # GL/glxext.h:150
GLX_CONTEXT_PROFILE_MASK_ARB = 37158 # GL/glxext.h:151
# ARB_create_context_robustness (GL/glxext.h:154)
GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB = 4 # GL/glxext.h:155
GLX_LOSE_CONTEXT_ON_RESET_ARB = 33362 # GL/glxext.h:156
GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB = 33366 # GL/glxext.h:157
GLX_NO_RESET_NOTIFICATION_ARB = 33377 # GL/glxext.h:158
# SGIS_multisample (GL/glxext.h:161)
GLX_SAMPLE_BUFFERS_SGIS = 100000 # GL/glxext.h:162
GLX_SAMPLES_SGIS = 100001 # GL/glxext.h:163
# EXT_visual_info (GL/glxext.h:166)
GLX_X_VISUAL_TYPE_EXT = 34 # GL/glxext.h:167
GLX_TRANSPARENT_TYPE_EXT = 35 # GL/glxext.h:168
GLX_TRANSPARENT_INDEX_VALUE_EXT = 36 # GL/glxext.h:169
GLX_TRANSPARENT_RED_VALUE_EXT = 37 # GL/glxext.h:170
GLX_TRANSPARENT_GREEN_VALUE_EXT = 38 # GL/glxext.h:171
GLX_TRANSPARENT_BLUE_VALUE_EXT = 39 # GL/glxext.h:172
GLX_TRANSPARENT_ALPHA_VALUE_EXT = 40 # GL/glxext.h:173
GLX_NONE_EXT = 32768 # GL/glxext.h:174
GLX_TRUE_COLOR_EXT = 32770 # GL/glxext.h:175
GLX_DIRECT_COLOR_EXT = 32771 # GL/glxext.h:176
GLX_PSEUDO_COLOR_EXT = 32772 # GL/glxext.h:177
GLX_STATIC_COLOR_EXT = 32773 # GL/glxext.h:178
GLX_GRAY_SCALE_EXT = 32774 # GL/glxext.h:179
GLX_STATIC_GRAY_EXT = 32775 # GL/glxext.h:180
GLX_TRANSPARENT_RGB_EXT = 32776 # GL/glxext.h:181
GLX_TRANSPARENT_INDEX_EXT = 32777 # GL/glxext.h:182
# SGI_swap_control (GL/glxext.h:185)
# SGI_video_sync (GL/glxext.h:188)
# SGI_make_current_read (GL/glxext.h:191)
# SGIX_video_source (GL/glxext.h:194)
# EXT_visual_rating (GL/glxext.h:197)
GLX_VISUAL_CAVEAT_EXT = 32 # GL/glxext.h:198
GLX_SLOW_VISUAL_EXT = 32769 # GL/glxext.h:199
GLX_NON_CONFORMANT_VISUAL_EXT = 32781 # GL/glxext.h:200
# EXT_import_context (GL/glxext.h:204)
GLX_SHARE_CONTEXT_EXT = 32778 # GL/glxext.h:205
GLX_VISUAL_ID_EXT = 32779 # GL/glxext.h:206
GLX_SCREEN_EXT = 32780 # GL/glxext.h:207
# SGIX_fbconfig (GL/glxext.h:210)
GLX_WINDOW_BIT_SGIX = 1 # GL/glxext.h:211
GLX_PIXMAP_BIT_SGIX = 2 # GL/glxext.h:212
GLX_RGBA_BIT_SGIX = 1 # GL/glxext.h:213
GLX_COLOR_INDEX_BIT_SGIX = 2 # GL/glxext.h:214
GLX_DRAWABLE_TYPE_SGIX = 32784 # GL/glxext.h:215
GLX_RENDER_TYPE_SGIX = 32785 # GL/glxext.h:216
GLX_X_RENDERABLE_SGIX = 32786 # GL/glxext.h:217
GLX_FBCONFIG_ID_SGIX = 32787 # GL/glxext.h:218
GLX_RGBA_TYPE_SGIX = 32788 # GL/glxext.h:219
GLX_COLOR_INDEX_TYPE_SGIX = 32789 # GL/glxext.h:220
# SGIX_pbuffer (GL/glxext.h:224)
GLX_PBUFFER_BIT_SGIX = 4 # GL/glxext.h:225
GLX_BUFFER_CLOBBER_MASK_SGIX = 134217728 # GL/glxext.h:226
GLX_FRONT_LEFT_BUFFER_BIT_SGIX = 1 # GL/glxext.h:227
GLX_FRONT_RIGHT_BUFFER_BIT_SGIX = 2 # GL/glxext.h:228
GLX_BACK_LEFT_BUFFER_BIT_SGIX = 4 # GL/glxext.h:229
GLX_BACK_RIGHT_BUFFER_BIT_SGIX = 8 # GL/glxext.h:230
GLX_AUX_BUFFERS_BIT_SGIX = 16 # GL/glxext.h:231
GLX_DEPTH_BUFFER_BIT_SGIX = 32 # GL/glxext.h:232
GLX_STENCIL_BUFFER_BIT_SGIX = 64 # GL/glxext.h:233
GLX_ACCUM_BUFFER_BIT_SGIX = 128 # GL/glxext.h:234
GLX_SAMPLE_BUFFERS_BIT_SGIX = 256 # GL/glxext.h:235
GLX_MAX_PBUFFER_WIDTH_SGIX = 32790 # GL/glxext.h:236
GLX_MAX_PBUFFER_HEIGHT_SGIX = 32791 # GL/glxext.h:237
GLX_MAX_PBUFFER_PIXELS_SGIX = 32792 # GL/glxext.h:238
GLX_OPTIMAL_PBUFFER_WIDTH_SGIX = 32793 # GL/glxext.h:239
GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX = 32794 # GL/glxext.h:240
GLX_PRESERVED_CONTENTS_SGIX = 32795 # GL/glxext.h:241
GLX_LARGEST_PBUFFER_SGIX = 32796 # GL/glxext.h:242
GLX_WIDTH_SGIX = 32797 # GL/glxext.h:243
GLX_HEIGHT_SGIX = 32798 # GL/glxext.h:244
GLX_EVENT_MASK_SGIX = 32799 # GL/glxext.h:245
GLX_DAMAGED_SGIX = 32800 # GL/glxext.h:246
GLX_SAVED_SGIX = 32801 # GL/glxext.h:247
GLX_WINDOW_SGIX = 32802 # GL/glxext.h:248
GLX_PBUFFER_SGIX = 32803 # GL/glxext.h:249
# SGI_cushion (GL/glxext.h:252)
# SGIX_video_resize (GL/glxext.h:255)
GLX_SYNC_FRAME_SGIX = 0 # GL/glxext.h:256
GLX_SYNC_SWAP_SGIX = 1 # GL/glxext.h:257
# SGIX_dmbuffer (GL/glxext.h:260)
GLX_DIGITAL_MEDIA_PBUFFER_SGIX = 32804 # GL/glxext.h:261
# SGIX_swap_group (GL/glxext.h:264)
# SGIX_swap_barrier (GL/glxext.h:267)
# SGIS_blended_overlay (GL/glxext.h:270)
GLX_BLENDED_RGBA_SGIS = 32805 # GL/glxext.h:271
# SGIS_shared_multisample (GL/glxext.h:274)
GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS = 32806 # GL/glxext.h:275
GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS = 32807 # GL/glxext.h:276
# SUN_get_transparent_index (GL/glxext.h:279)
# 3DFX_multisample (GL/glxext.h:282)
GLX_SAMPLE_BUFFERS_3DFX = 32848 # GL/glxext.h:283
GLX_SAMPLES_3DFX = 32849 # GL/glxext.h:284
# MESA_copy_sub_buffer (GL/glxext.h:287)
# MESA_pixmap_colormap (GL/glxext.h:290)
# MESA_release_buffers (GL/glxext.h:293)
# MESA_set_3dfx_mode (GL/glxext.h:296)
GLX_3DFX_WINDOW_MODE_MESA = 1 # GL/glxext.h:297
GLX_3DFX_FULLSCREEN_MODE_MESA = 2 # GL/glxext.h:298
# SGIX_visual_select_group (GL/glxext.h:301)
GLX_VISUAL_SELECT_GROUP_SGIX = 32808 # GL/glxext.h:302
# OML_swap_method (GL/glxext.h:305)
GLX_SWAP_METHOD_OML = 32864 # GL/glxext.h:306
GLX_SWAP_EXCHANGE_OML = 32865 # GL/glxext.h:307
GLX_SWAP_COPY_OML = 32866 # GL/glxext.h:308
GLX_SWAP_UNDEFINED_OML = 32867 # GL/glxext.h:309
# OML_sync_control (GL/glxext.h:312)
# NV_float_buffer (GL/glxext.h:315)
GLX_FLOAT_COMPONENTS_NV = 8368 # GL/glxext.h:316
# SGIX_hyperpipe (GL/glxext.h:319)
GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX = 80 # GL/glxext.h:320
GLX_BAD_HYPERPIPE_CONFIG_SGIX = 91 # GL/glxext.h:321
GLX_BAD_HYPERPIPE_SGIX = 92 # GL/glxext.h:322
GLX_HYPERPIPE_DISPLAY_PIPE_SGIX = 1 # GL/glxext.h:323
GLX_HYPERPIPE_RENDER_PIPE_SGIX = 2 # GL/glxext.h:324
GLX_PIPE_RECT_SGIX = 1 # GL/glxext.h:325
GLX_PIPE_RECT_LIMITS_SGIX = 2 # GL/glxext.h:326
GLX_HYPERPIPE_STEREO_SGIX = 3 # GL/glxext.h:327
GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX = 4 # GL/glxext.h:328
GLX_HYPERPIPE_ID_SGIX = 32816 # GL/glxext.h:329
# MESA_agp_offset (GL/glxext.h:332)
# EXT_fbconfig_packed_float (GL/glxext.h:335)
GLX_RGBA_UNSIGNED_FLOAT_TYPE_EXT = 8369 # GL/glxext.h:336
GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT = 8 # GL/glxext.h:337
# EXT_framebuffer_sRGB (GL/glxext.h:340)
GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT = 8370 # GL/glxext.h:341
# EXT_texture_from_pixmap (GL/glxext.h:344)
GLX_TEXTURE_1D_BIT_EXT = 1 # GL/glxext.h:345
GLX_TEXTURE_2D_BIT_EXT = 2 # GL/glxext.h:346
GLX_TEXTURE_RECTANGLE_BIT_EXT = 4 # GL/glxext.h:347
GLX_BIND_TO_TEXTURE_RGB_EXT = 8400 # GL/glxext.h:348
GLX_BIND_TO_TEXTURE_RGBA_EXT = 8401 # GL/glxext.h:349
GLX_BIND_TO_MIPMAP_TEXTURE_EXT = 8402 # GL/glxext.h:350
GLX_BIND_TO_TEXTURE_TARGETS_EXT = 8403 # GL/glxext.h:351
GLX_Y_INVERTED_EXT = 8404 # GL/glxext.h:352
GLX_TEXTURE_FORMAT_EXT = 8405 # GL/glxext.h:353
GLX_TEXTURE_TARGET_EXT = 8406 # GL/glxext.h:354
GLX_MIPMAP_TEXTURE_EXT = 8407 # GL/glxext.h:355
GLX_TEXTURE_FORMAT_NONE_EXT = 8408 # GL/glxext.h:356
GLX_TEXTURE_FORMAT_RGB_EXT = 8409 # GL/glxext.h:357
GLX_TEXTURE_FORMAT_RGBA_EXT = 8410 # GL/glxext.h:358
GLX_TEXTURE_1D_EXT = 8411 # GL/glxext.h:359
GLX_TEXTURE_2D_EXT = 8412 # GL/glxext.h:360
GLX_TEXTURE_RECTANGLE_EXT = 8413 # GL/glxext.h:361
GLX_FRONT_LEFT_EXT = 8414 # GL/glxext.h:362
GLX_FRONT_RIGHT_EXT = 8415 # GL/glxext.h:363
GLX_BACK_LEFT_EXT = 8416 # GL/glxext.h:364
GLX_BACK_RIGHT_EXT = 8417 # GL/glxext.h:365
GLX_FRONT_EXT = 8414 # GL/glxext.h:366
GLX_BACK_EXT = 8416 # GL/glxext.h:367
GLX_AUX0_EXT = 8418 # GL/glxext.h:368
GLX_AUX1_EXT = 8419 # GL/glxext.h:369
GLX_AUX2_EXT = 8420 # GL/glxext.h:370
GLX_AUX3_EXT = 8421 # GL/glxext.h:371
GLX_AUX4_EXT = 8422 # GL/glxext.h:372
GLX_AUX5_EXT = 8423 # GL/glxext.h:373
GLX_AUX6_EXT = 8424 # GL/glxext.h:374
GLX_AUX7_EXT = 8425 # GL/glxext.h:375
GLX_AUX8_EXT = 8426 # GL/glxext.h:376
GLX_AUX9_EXT = 8427 # GL/glxext.h:377
# NV_present_video (GL/glxext.h:380)
GLX_NUM_VIDEO_SLOTS_NV = 8432 # GL/glxext.h:381
# NV_video_out (GL/glxext.h:384)
GLX_VIDEO_OUT_COLOR_NV = 8387 # GL/glxext.h:385
GLX_VIDEO_OUT_ALPHA_NV = 8388 # GL/glxext.h:386
GLX_VIDEO_OUT_DEPTH_NV = 8389 # GL/glxext.h:387
GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV = 8390 # GL/glxext.h:388
GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV = 8391 # GL/glxext.h:389
GLX_VIDEO_OUT_FRAME_NV = 8392 # GL/glxext.h:390
GLX_VIDEO_OUT_FIELD_1_NV = 8393 # GL/glxext.h:391
GLX_VIDEO_OUT_FIELD_2_NV = 8394 # GL/glxext.h:392
GLX_VIDEO_OUT_STACKED_FIELDS_1_2_NV = 8395 # GL/glxext.h:393
GLX_VIDEO_OUT_STACKED_FIELDS_2_1_NV = 8396 # GL/glxext.h:394
# NV_swap_group (GL/glxext.h:397)
# NV_video_capture (GL/glxext.h:400)
GLX_DEVICE_ID_NV = 8397 # GL/glxext.h:401
GLX_UNIQUE_ID_NV = 8398 # GL/glxext.h:402
GLX_NUM_VIDEO_CAPTURE_SLOTS_NV = 8399 # GL/glxext.h:403
# EXT_swap_control (GL/glxext.h:406)
GLX_SWAP_INTERVAL_EXT = 8433 # GL/glxext.h:407
GLX_MAX_SWAP_INTERVAL_EXT = 8434 # GL/glxext.h:408
# NV_copy_image (GL/glxext.h:411)
# INTEL_swap_event (GL/glxext.h:414)
GLX_BUFFER_SWAP_COMPLETE_INTEL_MASK = 67108864 # GL/glxext.h:415
GLX_EXCHANGE_COMPLETE_INTEL = 33152 # GL/glxext.h:416
GLX_COPY_COMPLETE_INTEL = 33153 # GL/glxext.h:417
GLX_FLIP_COMPLETE_INTEL = 33154 # GL/glxext.h:418
# NV_multisample_coverage (GL/glxext.h:421)
GLX_COVERAGE_SAMPLES_NV = 100001 # GL/glxext.h:422
GLX_COLOR_SAMPLES_NV = 8371 # GL/glxext.h:423
# AMD_gpu_association (GL/glxext.h:426)
GLX_GPU_VENDOR_AMD = 7936 # GL/glxext.h:427
GLX_GPU_RENDERER_STRING_AMD = 7937 # GL/glxext.h:428
GLX_GPU_OPENGL_VERSION_STRING_AMD = 7938 # GL/glxext.h:429
GLX_GPU_FASTEST_TARGET_GPUS_AMD = 8610 # GL/glxext.h:430
GLX_GPU_RAM_AMD = 8611 # GL/glxext.h:431
GLX_GPU_CLOCK_AMD = 8612 # GL/glxext.h:432
GLX_GPU_NUM_PIPES_AMD = 8613 # GL/glxext.h:433
GLX_GPU_NUM_SIMD_AMD = 8614 # GL/glxext.h:434
GLX_GPU_NUM_RB_AMD = 8615 # GL/glxext.h:435
GLX_GPU_NUM_SPI_AMD = 8616 # GL/glxext.h:436
# EXT_create_context_es2_profile (GL/glxext.h:439)
GLX_CONTEXT_ES2_PROFILE_BIT_EXT = 4 # GL/glxext.h:440
# ARB_get_proc_address (GL/glxext.h:446)
# SGIX_video_source (GL/glxext.h:450)
XID = pyglet.libs.x11.xlib.XID
GLXVideoSourceSGIX = XID # GL/glxext.h:451
# SGIX_fbconfig (GL/glxext.h:454)
GLXFBConfigIDSGIX = XID # GL/glxext.h:455
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
GLXFBConfigSGIX = POINTER(struct___GLXFBConfigRec) # GL/glxext.h:456
# SGIX_pbuffer (GL/glxext.h:459)
GLXPbufferSGIX = XID # GL/glxext.h:460
class struct_anon_106(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'event_type',
'draw_type',
'mask',
'x',
'y',
'width',
'height',
'count',
]
Display = pyglet.libs.x11.xlib.Display
GLXDrawable = pyglet.gl.glx.GLXDrawable
struct_anon_106._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('event_type', c_int),
('draw_type', c_int),
('mask', c_uint),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
GLXBufferClobberEventSGIX = struct_anon_106 # GL/glxext.h:473
# NV_video_output (GL/glxext.h:476)
GLXVideoDeviceNV = c_uint # GL/glxext.h:477
# NV_video_capture (GL/glxext.h:480)
GLXVideoCaptureDeviceNV = XID # GL/glxext.h:481
# VERSION_1_3 (GL/glxext.h:521)
# VERSION_1_4 (GL/glxext.h:563)
# ARB_get_proc_address (GL/glxext.h:571)
# ARB_multisample (GL/glxext.h:579)
GLX_ARB_multisample = 1 # GL/glxext.h:580
# ARB_fbconfig_float (GL/glxext.h:583)
GLX_ARB_fbconfig_float = 1 # GL/glxext.h:584
# ARB_framebuffer_sRGB (GL/glxext.h:587)
GLX_ARB_framebuffer_sRGB = 1 # GL/glxext.h:588
# ARB_create_context (GL/glxext.h:591)
GLX_ARB_create_context = 1 # GL/glxext.h:592
GLXContext = pyglet.gl.glx.GLXContext
GLXFBConfig = pyglet.gl.glx.GLXFBConfig
# GL/glxext.h:594
glXCreateContextAttribsARB = _link_function('glXCreateContextAttribsARB', GLXContext, [
POINTER(Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)], 'ARB_create_context')
PFNGLXCREATECONTEXTATTRIBSARBPROC = CFUNCTYPE(GLXContext, POINTER(
Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:596
# ARB_create_context_profile (GL/glxext.h:599)
GLX_ARB_create_context_profile = 1 # GL/glxext.h:600
# ARB_create_context_robustness (GL/glxext.h:603)
GLX_ARB_create_context_robustness = 1 # GL/glxext.h:604
# SGIS_multisample (GL/glxext.h:607)
GLX_SGIS_multisample = 1 # GL/glxext.h:608
# EXT_visual_info (GL/glxext.h:611)
GLX_EXT_visual_info = 1 # GL/glxext.h:612
# SGI_swap_control (GL/glxext.h:615)
GLX_SGI_swap_control = 1 # GL/glxext.h:616
# GL/glxext.h:618
glXSwapIntervalSGI = _link_function(
'glXSwapIntervalSGI', c_int, [c_int], 'SGI_swap_control')
PFNGLXSWAPINTERVALSGIPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:620
# SGI_video_sync (GL/glxext.h:623)
GLX_SGI_video_sync = 1 # GL/glxext.h:624
# GL/glxext.h:626
glXGetVideoSyncSGI = _link_function(
'glXGetVideoSyncSGI', c_int, [POINTER(c_uint)], 'SGI_video_sync')
# GL/glxext.h:627
glXWaitVideoSyncSGI = _link_function(
'glXWaitVideoSyncSGI', c_int, [c_int, c_int, POINTER(c_uint)], 'SGI_video_sync')
PFNGLXGETVIDEOSYNCSGIPROC = CFUNCTYPE(
c_int, POINTER(c_uint)) # GL/glxext.h:629
PFNGLXWAITVIDEOSYNCSGIPROC = CFUNCTYPE(
c_int, c_int, c_int, POINTER(c_uint)) # GL/glxext.h:630
# SGI_make_current_read (GL/glxext.h:633)
GLX_SGI_make_current_read = 1 # GL/glxext.h:634
# GL/glxext.h:636
glXMakeCurrentReadSGI = _link_function('glXMakeCurrentReadSGI', c_int, [POINTER(
Display), GLXDrawable, GLXDrawable, GLXContext], 'SGI_make_current_read')
# GL/glxext.h:637
glXGetCurrentReadDrawableSGI = _link_function(
'glXGetCurrentReadDrawableSGI', GLXDrawable, list(), 'SGI_make_current_read')
PFNGLXMAKECURRENTREADSGIPROC = CFUNCTYPE(
c_int, POINTER(Display), GLXDrawable, GLXDrawable, GLXContext) # GL/glxext.h:639
PFNGLXGETCURRENTREADDRAWABLESGIPROC = CFUNCTYPE(GLXDrawable) # GL/glxext.h:640
# SGIX_video_source (GL/glxext.h:643)
GLX_SGIX_video_source = 1 # GL/glxext.h:644
# EXT_visual_rating (GL/glxext.h:655)
GLX_EXT_visual_rating = 1 # GL/glxext.h:656
# EXT_import_context (GL/glxext.h:659)
GLX_EXT_import_context = 1 # GL/glxext.h:660
# GL/glxext.h:662
glXGetCurrentDisplayEXT = _link_function(
'glXGetCurrentDisplayEXT', POINTER(Display), list(), 'EXT_import_context')
# GL/glxext.h:663
glXQueryContextInfoEXT = _link_function('glXQueryContextInfoEXT', c_int, [
POINTER(Display), GLXContext, c_int, POINTER(c_int)], 'EXT_import_context')
GLXContextID = pyglet.gl.glx.GLXContextID
# GL/glxext.h:664
glXGetContextIDEXT = _link_function(
'glXGetContextIDEXT', GLXContextID, [GLXContext], 'EXT_import_context')
# GL/glxext.h:665
glXImportContextEXT = _link_function('glXImportContextEXT', GLXContext, [
POINTER(Display), GLXContextID], 'EXT_import_context')
# GL/glxext.h:666
glXFreeContextEXT = _link_function(
'glXFreeContextEXT', None, [POINTER(Display), GLXContext], 'EXT_import_context')
PFNGLXGETCURRENTDISPLAYEXTPROC = CFUNCTYPE(POINTER(Display)) # GL/glxext.h:668
PFNGLXQUERYCONTEXTINFOEXTPROC = CFUNCTYPE(
c_int, POINTER(Display), GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:669
PFNGLXGETCONTEXTIDEXTPROC = CFUNCTYPE(
GLXContextID, GLXContext) # GL/glxext.h:670
PFNGLXIMPORTCONTEXTEXTPROC = CFUNCTYPE(
GLXContext, POINTER(Display), GLXContextID) # GL/glxext.h:671
PFNGLXFREECONTEXTEXTPROC = CFUNCTYPE(
None, POINTER(Display), GLXContext) # GL/glxext.h:672
# SGIX_fbconfig (GL/glxext.h:675)
GLX_SGIX_fbconfig = 1 # GL/glxext.h:676
# GL/glxext.h:678
glXGetFBConfigAttribSGIX = _link_function('glXGetFBConfigAttribSGIX', c_int, [
POINTER(Display), GLXFBConfigSGIX, c_int, POINTER(c_int)], 'SGIX_fbconfig')
# GL/glxext.h:679
glXChooseFBConfigSGIX = _link_function('glXChooseFBConfigSGIX', POINTER(
GLXFBConfigSGIX), [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)], 'SGIX_fbconfig')
GLXPixmap = pyglet.gl.glx.GLXPixmap
Pixmap = pyglet.libs.x11.xlib.Pixmap
# GL/glxext.h:680
glXCreateGLXPixmapWithConfigSGIX = _link_function('glXCreateGLXPixmapWithConfigSGIX', GLXPixmap, [
POINTER(Display), GLXFBConfigSGIX, Pixmap], 'SGIX_fbconfig')
# GL/glxext.h:681
glXCreateContextWithConfigSGIX = _link_function('glXCreateContextWithConfigSGIX', GLXContext, [
POINTER(Display), GLXFBConfigSGIX, c_int, GLXContext, c_int], 'SGIX_fbconfig')
XVisualInfo = pyglet.libs.x11.xlib.XVisualInfo
# GL/glxext.h:682
glXGetVisualFromFBConfigSGIX = _link_function('glXGetVisualFromFBConfigSGIX', POINTER(
XVisualInfo), [POINTER(Display), GLXFBConfigSGIX], 'SGIX_fbconfig')
# GL/glxext.h:683
glXGetFBConfigFromVisualSGIX = _link_function('glXGetFBConfigFromVisualSGIX', GLXFBConfigSGIX, [
POINTER(Display), POINTER(XVisualInfo)], 'SGIX_fbconfig')
PFNGLXGETFBCONFIGATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(
Display), GLXFBConfigSGIX, c_int, POINTER(c_int)) # GL/glxext.h:685
PFNGLXCHOOSEFBCONFIGSGIXPROC = CFUNCTYPE(POINTER(GLXFBConfigSGIX), POINTER(
Display), c_int, POINTER(c_int), POINTER(c_int)) # GL/glxext.h:686
PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC = CFUNCTYPE(
GLXPixmap, POINTER(Display), GLXFBConfigSGIX, Pixmap) # GL/glxext.h:687
PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC = CFUNCTYPE(GLXContext, POINTER(
Display), GLXFBConfigSGIX, c_int, GLXContext, c_int) # GL/glxext.h:688
PFNGLXGETVISUALFROMFBCONFIGSGIXPROC = CFUNCTYPE(
POINTER(XVisualInfo), POINTER(Display), GLXFBConfigSGIX) # GL/glxext.h:689
PFNGLXGETFBCONFIGFROMVISUALSGIXPROC = CFUNCTYPE(
GLXFBConfigSGIX, POINTER(Display), POINTER(XVisualInfo)) # GL/glxext.h:690
# SGIX_pbuffer (GL/glxext.h:693)
GLX_SGIX_pbuffer = 1 # GL/glxext.h:694
# GL/glxext.h:696
glXCreateGLXPbufferSGIX = _link_function('glXCreateGLXPbufferSGIX', GLXPbufferSGIX, [
POINTER(Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)], 'SGIX_pbuffer')
# GL/glxext.h:697
glXDestroyGLXPbufferSGIX = _link_function(
'glXDestroyGLXPbufferSGIX', None, [POINTER(Display), GLXPbufferSGIX], 'SGIX_pbuffer')
# GL/glxext.h:698
glXQueryGLXPbufferSGIX = _link_function('glXQueryGLXPbufferSGIX', c_int, [
POINTER(Display), GLXPbufferSGIX, c_int, POINTER(c_uint)], 'SGIX_pbuffer')
# GL/glxext.h:699
glXSelectEventSGIX = _link_function(
'glXSelectEventSGIX', None, [POINTER(Display), GLXDrawable, c_ulong], 'SGIX_pbuffer')
# GL/glxext.h:700
glXGetSelectedEventSGIX = _link_function('glXGetSelectedEventSGIX', None, [
POINTER(Display), GLXDrawable, POINTER(c_ulong)], 'SGIX_pbuffer')
PFNGLXCREATEGLXPBUFFERSGIXPROC = CFUNCTYPE(GLXPbufferSGIX, POINTER(
Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)) # GL/glxext.h:702
PFNGLXDESTROYGLXPBUFFERSGIXPROC = CFUNCTYPE(
None, POINTER(Display), GLXPbufferSGIX) # GL/glxext.h:703
PFNGLXQUERYGLXPBUFFERSGIXPROC = CFUNCTYPE(c_int, POINTER(
Display), GLXPbufferSGIX, c_int, POINTER(c_uint)) # GL/glxext.h:704
PFNGLXSELECTEVENTSGIXPROC = CFUNCTYPE(
None, POINTER(Display), GLXDrawable, c_ulong) # GL/glxext.h:705
PFNGLXGETSELECTEDEVENTSGIXPROC = CFUNCTYPE(
None, POINTER(Display), GLXDrawable, POINTER(c_ulong)) # GL/glxext.h:706
# SGI_cushion (GL/glxext.h:709)
GLX_SGI_cushion = 1 # GL/glxext.h:710
Window = pyglet.libs.x11.xlib.Window
# GL/glxext.h:712
glXCushionSGI = _link_function(
'glXCushionSGI', None, [POINTER(Display), Window, c_float], 'SGI_cushion')
PFNGLXCUSHIONSGIPROC = CFUNCTYPE(
None, POINTER(Display), Window, c_float) # GL/glxext.h:714
# SGIX_video_resize (GL/glxext.h:717)
GLX_SGIX_video_resize = 1 # GL/glxext.h:718
# GL/glxext.h:720
glXBindChannelToWindowSGIX = _link_function('glXBindChannelToWindowSGIX', c_int, [
POINTER(Display), c_int, c_int, Window], 'SGIX_video_resize')
# GL/glxext.h:721
glXChannelRectSGIX = _link_function('glXChannelRectSGIX', c_int, [POINTER(
Display), c_int, c_int, c_int, c_int, c_int, c_int], 'SGIX_video_resize')
# GL/glxext.h:722
glXQueryChannelRectSGIX = _link_function('glXQueryChannelRectSGIX', c_int, [POINTER(
Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize')
# GL/glxext.h:723
glXQueryChannelDeltasSGIX = _link_function('glXQueryChannelDeltasSGIX', c_int, [POINTER(
Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize')
GLenum = c_uint # /usr/include/GL/gl.h:153
# GL/glxext.h:724
glXChannelRectSyncSGIX = _link_function('glXChannelRectSyncSGIX', c_int, [
POINTER(Display), c_int, c_int, GLenum], 'SGIX_video_resize')
PFNGLXBINDCHANNELTOWINDOWSGIXPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int, c_int, Window) # GL/glxext.h:726
PFNGLXCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(
Display), c_int, c_int, c_int, c_int, c_int, c_int) # GL/glxext.h:727
PFNGLXQUERYCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(
c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:728
PFNGLXQUERYCHANNELDELTASSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(
c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:729
PFNGLXCHANNELRECTSYNCSGIXPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int, c_int, GLenum) # GL/glxext.h:730
# SGIX_dmbuffer (GL/glxext.h:733)
GLX_SGIX_dmbuffer = 1 # GL/glxext.h:734
# SGIX_swap_group (GL/glxext.h:743)
GLX_SGIX_swap_group = 1 # GL/glxext.h:744
# GL/glxext.h:746
glXJoinSwapGroupSGIX = _link_function('glXJoinSwapGroupSGIX', None, [
POINTER(Display), GLXDrawable, GLXDrawable], 'SGIX_swap_group')
PFNGLXJOINSWAPGROUPSGIXPROC = CFUNCTYPE(
None, POINTER(Display), GLXDrawable, GLXDrawable) # GL/glxext.h:748
# SGIX_swap_barrier (GL/glxext.h:751)
GLX_SGIX_swap_barrier = 1 # GL/glxext.h:752
# GL/glxext.h:754
glXBindSwapBarrierSGIX = _link_function('glXBindSwapBarrierSGIX', None, [
POINTER(Display), GLXDrawable, c_int], 'SGIX_swap_barrier')
# GL/glxext.h:755
glXQueryMaxSwapBarriersSGIX = _link_function('glXQueryMaxSwapBarriersSGIX', c_int, [
POINTER(Display), c_int, POINTER(c_int)], 'SGIX_swap_barrier')
PFNGLXBINDSWAPBARRIERSGIXPROC = CFUNCTYPE(
None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:757
PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:758
# SUN_get_transparent_index (GL/glxext.h:761)
GLX_SUN_get_transparent_index = 1 # GL/glxext.h:762
# GL/glxext.h:764
glXGetTransparentIndexSUN = _link_function('glXGetTransparentIndexSUN', c_int, [
POINTER(Display), Window, Window, POINTER(c_long)], 'SUN_get_transparent_index')
PFNGLXGETTRANSPARENTINDEXSUNPROC = CFUNCTYPE(
c_int, POINTER(Display), Window, Window, POINTER(c_long)) # GL/glxext.h:766
# MESA_copy_sub_buffer (GL/glxext.h:769)
GLX_MESA_copy_sub_buffer = 1 # GL/glxext.h:770
# GL/glxext.h:772
glXCopySubBufferMESA = _link_function('glXCopySubBufferMESA', None, [POINTER(
Display), GLXDrawable, c_int, c_int, c_int, c_int], 'MESA_copy_sub_buffer')
PFNGLXCOPYSUBBUFFERMESAPROC = CFUNCTYPE(None, POINTER(
Display), GLXDrawable, c_int, c_int, c_int, c_int) # GL/glxext.h:774
# MESA_pixmap_colormap (GL/glxext.h:777)
GLX_MESA_pixmap_colormap = 1 # GL/glxext.h:778
Colormap = pyglet.libs.x11.xlib.Colormap
# GL/glxext.h:780
glXCreateGLXPixmapMESA = _link_function('glXCreateGLXPixmapMESA', GLXPixmap, [
POINTER(Display), POINTER(XVisualInfo), Pixmap, Colormap], 'MESA_pixmap_colormap')
PFNGLXCREATEGLXPIXMAPMESAPROC = CFUNCTYPE(GLXPixmap, POINTER(
Display), POINTER(XVisualInfo), Pixmap, Colormap) # GL/glxext.h:782
# MESA_release_buffers (GL/glxext.h:785)
GLX_MESA_release_buffers = 1 # GL/glxext.h:786
# GL/glxext.h:788
glXReleaseBuffersMESA = _link_function('glXReleaseBuffersMESA', c_int, [
POINTER(Display), GLXDrawable], 'MESA_release_buffers')
PFNGLXRELEASEBUFFERSMESAPROC = CFUNCTYPE(
c_int, POINTER(Display), GLXDrawable) # GL/glxext.h:790
# MESA_set_3dfx_mode (GL/glxext.h:793)
GLX_MESA_set_3dfx_mode = 1 # GL/glxext.h:794
# GL/glxext.h:796
glXSet3DfxModeMESA = _link_function(
'glXSet3DfxModeMESA', c_int, [c_int], 'MESA_set_3dfx_mode')
PFNGLXSET3DFXMODEMESAPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:798
# SGIX_visual_select_group (GL/glxext.h:801)
GLX_SGIX_visual_select_group = 1 # GL/glxext.h:802
# OML_swap_method (GL/glxext.h:805)
GLX_OML_swap_method = 1 # GL/glxext.h:806
# OML_sync_control (GL/glxext.h:809)
GLX_OML_sync_control = 1 # GL/glxext.h:810
# GL/glxext.h:812
glXGetSyncValuesOML = _link_function('glXGetSyncValuesOML', c_int, [POINTER(
Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
# GL/glxext.h:813
glXGetMscRateOML = _link_function('glXGetMscRateOML', c_int, [POINTER(
Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)], 'OML_sync_control')
# GL/glxext.h:814
glXSwapBuffersMscOML = _link_function('glXSwapBuffersMscOML', c_int64, [
POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64], 'OML_sync_control')
# GL/glxext.h:815
glXWaitForMscOML = _link_function('glXWaitForMscOML', c_int, [POINTER(
Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
# GL/glxext.h:816
glXWaitForSbcOML = _link_function('glXWaitForSbcOML', c_int, [POINTER(
Display), GLXDrawable, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
PFNGLXGETSYNCVALUESOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(
c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:818
PFNGLXGETMSCRATEOMLPROC = CFUNCTYPE(c_int, POINTER(
Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)) # GL/glxext.h:819
PFNGLXSWAPBUFFERSMSCOMLPROC = CFUNCTYPE(c_int64, POINTER(
Display), GLXDrawable, c_int64, c_int64, c_int64) # GL/glxext.h:820
PFNGLXWAITFORMSCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(
c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:821
PFNGLXWAITFORSBCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, POINTER(
c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:822
# NV_float_buffer (GL/glxext.h:825)
GLX_NV_float_buffer = 1 # GL/glxext.h:826
# SGIX_hyperpipe (GL/glxext.h:829)
GLX_SGIX_hyperpipe = 1 # GL/glxext.h:830
class struct_anon_107(Structure):
__slots__ = [
'pipeName',
'networkId',
]
struct_anon_107._fields_ = [
('pipeName', c_char * 80),
('networkId', c_int),
]
GLXHyperpipeNetworkSGIX = struct_anon_107 # GL/glxext.h:835
class struct_anon_108(Structure):
__slots__ = [
'pipeName',
'channel',
'participationType',
'timeSlice',
]
struct_anon_108._fields_ = [
('pipeName', c_char * 80),
('channel', c_int),
('participationType', c_uint),
('timeSlice', c_int),
]
GLXHyperpipeConfigSGIX = struct_anon_108 # GL/glxext.h:843
class struct_anon_109(Structure):
__slots__ = [
'pipeName',
'srcXOrigin',
'srcYOrigin',
'srcWidth',
'srcHeight',
'destXOrigin',
'destYOrigin',
'destWidth',
'destHeight',
]
struct_anon_109._fields_ = [
('pipeName', c_char * 80),
('srcXOrigin', c_int),
('srcYOrigin', c_int),
('srcWidth', c_int),
('srcHeight', c_int),
('destXOrigin', c_int),
('destYOrigin', c_int),
('destWidth', c_int),
('destHeight', c_int),
]
GLXPipeRect = struct_anon_109 # GL/glxext.h:849
class struct_anon_110(Structure):
__slots__ = [
'pipeName',
'XOrigin',
'YOrigin',
'maxHeight',
'maxWidth',
]
struct_anon_110._fields_ = [
('pipeName', c_char * 80),
('XOrigin', c_int),
('YOrigin', c_int),
('maxHeight', c_int),
('maxWidth', c_int),
]
GLXPipeRectLimits = struct_anon_110 # GL/glxext.h:854
# GL/glxext.h:857
glXQueryHyperpipeNetworkSGIX = _link_function('glXQueryHyperpipeNetworkSGIX', POINTER(
GLXHyperpipeNetworkSGIX), [POINTER(Display), POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:858
glXHyperpipeConfigSGIX = _link_function('glXHyperpipeConfigSGIX', c_int, [POINTER(
Display), c_int, c_int, POINTER(GLXHyperpipeConfigSGIX), POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:859
glXQueryHyperpipeConfigSGIX = _link_function('glXQueryHyperpipeConfigSGIX', POINTER(
GLXHyperpipeConfigSGIX), [POINTER(Display), c_int, POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:860
glXDestroyHyperpipeConfigSGIX = _link_function(
'glXDestroyHyperpipeConfigSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe')
# GL/glxext.h:861
glXBindHyperpipeSGIX = _link_function(
'glXBindHyperpipeSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe')
# GL/glxext.h:862
glXQueryHyperpipeBestAttribSGIX = _link_function('glXQueryHyperpipeBestAttribSGIX', c_int, [
POINTER(Display), c_int, c_int, c_int, POINTER(None), POINTER(None)], 'SGIX_hyperpipe')
# GL/glxext.h:863
glXHyperpipeAttribSGIX = _link_function('glXHyperpipeAttribSGIX', c_int, [
POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe')
# GL/glxext.h:864
glXQueryHyperpipeAttribSGIX = _link_function('glXQueryHyperpipeAttribSGIX', c_int, [
POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe')
PFNGLXQUERYHYPERPIPENETWORKSGIXPROC = CFUNCTYPE(POINTER(
GLXHyperpipeNetworkSGIX), POINTER(Display), POINTER(c_int)) # GL/glxext.h:866
PFNGLXHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(
GLXHyperpipeConfigSGIX), POINTER(c_int)) # GL/glxext.h:867
PFNGLXQUERYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(POINTER(
GLXHyperpipeConfigSGIX), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:868
PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int) # GL/glxext.h:869
PFNGLXBINDHYPERPIPESGIXPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int) # GL/glxext.h:870
PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(
Display), c_int, c_int, c_int, POINTER(None), POINTER(None)) # GL/glxext.h:871
PFNGLXHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:872
PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:873
# MESA_agp_offset (GL/glxext.h:876)
GLX_MESA_agp_offset = 1 # GL/glxext.h:877
# GL/glxext.h:879
glXGetAGPOffsetMESA = _link_function(
'glXGetAGPOffsetMESA', c_uint, [POINTER(None)], 'MESA_agp_offset')
PFNGLXGETAGPOFFSETMESAPROC = CFUNCTYPE(
c_uint, POINTER(None)) # GL/glxext.h:881
# EXT_fbconfig_packed_float (GL/glxext.h:884)
GLX_EXT_fbconfig_packed_float = 1 # GL/glxext.h:885
# EXT_framebuffer_sRGB (GL/glxext.h:888)
GLX_EXT_framebuffer_sRGB = 1 # GL/glxext.h:889
# EXT_texture_from_pixmap (GL/glxext.h:892)
GLX_EXT_texture_from_pixmap = 1 # GL/glxext.h:893
# GL/glxext.h:895
glXBindTexImageEXT = _link_function('glXBindTexImageEXT', None, [POINTER(
Display), GLXDrawable, c_int, POINTER(c_int)], 'EXT_texture_from_pixmap')
# GL/glxext.h:896
glXReleaseTexImageEXT = _link_function('glXReleaseTexImageEXT', None, [
POINTER(Display), GLXDrawable, c_int], 'EXT_texture_from_pixmap')
PFNGLXBINDTEXIMAGEEXTPROC = CFUNCTYPE(
None, POINTER(Display), GLXDrawable, c_int, POINTER(c_int)) # GL/glxext.h:898
PFNGLXRELEASETEXIMAGEEXTPROC = CFUNCTYPE(
None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:899
# NV_present_video (GL/glxext.h:902)
GLX_NV_present_video = 1 # GL/glxext.h:903
# GL/glxext.h:905
glXEnumerateVideoDevicesNV = _link_function('glXEnumerateVideoDevicesNV', POINTER(
c_uint), [POINTER(Display), c_int, POINTER(c_int)], 'NV_present_video')
# GL/glxext.h:906
glXBindVideoDeviceNV = _link_function('glXBindVideoDeviceNV', c_int, [POINTER(
Display), c_uint, c_uint, POINTER(c_int)], 'NV_present_video')
PFNGLXENUMERATEVIDEODEVICESNVPROC = CFUNCTYPE(
POINTER(c_uint), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:908
PFNGLXBINDVIDEODEVICENVPROC = CFUNCTYPE(
c_int, POINTER(Display), c_uint, c_uint, POINTER(c_int)) # GL/glxext.h:909
# NV_video_output (GL/glxext.h:912)
GLX_NV_video_output = 1 # GL/glxext.h:913
# GL/glxext.h:915
glXGetVideoDeviceNV = _link_function('glXGetVideoDeviceNV', c_int, [POINTER(
Display), c_int, c_int, POINTER(GLXVideoDeviceNV)], 'NV_video_output')
# GL/glxext.h:916
glXReleaseVideoDeviceNV = _link_function('glXReleaseVideoDeviceNV', c_int, [
POINTER(Display), c_int, GLXVideoDeviceNV], 'NV_video_output')
GLXPbuffer = pyglet.gl.glx.GLXPbuffer
# GL/glxext.h:917
glXBindVideoImageNV = _link_function('glXBindVideoImageNV', c_int, [POINTER(
Display), GLXVideoDeviceNV, GLXPbuffer, c_int], 'NV_video_output')
# GL/glxext.h:918
glXReleaseVideoImageNV = _link_function(
'glXReleaseVideoImageNV', c_int, [POINTER(Display), GLXPbuffer], 'NV_video_output')
GLboolean = c_ubyte # /usr/include/GL/gl.h:154
# GL/glxext.h:919
glXSendPbufferToVideoNV = _link_function('glXSendPbufferToVideoNV', c_int, [POINTER(
Display), GLXPbuffer, c_int, POINTER(c_ulong), GLboolean], 'NV_video_output')
# GL/glxext.h:920
glXGetVideoInfoNV = _link_function('glXGetVideoInfoNV', c_int, [POINTER(
Display), c_int, GLXVideoDeviceNV, POINTER(c_ulong), POINTER(c_ulong)], 'NV_video_output')
PFNGLXGETVIDEODEVICENVPROC = CFUNCTYPE(c_int, POINTER(
Display), c_int, c_int, POINTER(GLXVideoDeviceNV)) # GL/glxext.h:922
PFNGLXRELEASEVIDEODEVICENVPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int, GLXVideoDeviceNV) # GL/glxext.h:923
PFNGLXBINDVIDEOIMAGENVPROC = CFUNCTYPE(
c_int, POINTER(Display), GLXVideoDeviceNV, GLXPbuffer, c_int) # GL/glxext.h:924
PFNGLXRELEASEVIDEOIMAGENVPROC = CFUNCTYPE(
c_int, POINTER(Display), GLXPbuffer) # GL/glxext.h:925
PFNGLXSENDPBUFFERTOVIDEONVPROC = CFUNCTYPE(c_int, POINTER(
Display), GLXPbuffer, c_int, POINTER(c_ulong), GLboolean) # GL/glxext.h:926
PFNGLXGETVIDEOINFONVPROC = CFUNCTYPE(c_int, POINTER(
Display), c_int, GLXVideoDeviceNV, POINTER(c_ulong), POINTER(c_ulong)) # GL/glxext.h:927
# NV_swap_group (GL/glxext.h:930)
GLX_NV_swap_group = 1 # GL/glxext.h:931
GLuint = c_uint # /usr/include/GL/gl.h:162
# GL/glxext.h:933
glXJoinSwapGroupNV = _link_function('glXJoinSwapGroupNV', c_int, [
POINTER(Display), GLXDrawable, GLuint], 'NV_swap_group')
# GL/glxext.h:934
glXBindSwapBarrierNV = _link_function(
'glXBindSwapBarrierNV', c_int, [POINTER(Display), GLuint, GLuint], 'NV_swap_group')
# GL/glxext.h:935
glXQuerySwapGroupNV = _link_function('glXQuerySwapGroupNV', c_int, [POINTER(
Display), GLXDrawable, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# GL/glxext.h:936
glXQueryMaxSwapGroupsNV = _link_function('glXQueryMaxSwapGroupsNV', c_int, [
POINTER(Display), c_int, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# GL/glxext.h:937
glXQueryFrameCountNV = _link_function('glXQueryFrameCountNV', c_int, [
POINTER(Display), c_int, POINTER(GLuint)], 'NV_swap_group')
# GL/glxext.h:938
glXResetFrameCountNV = _link_function(
'glXResetFrameCountNV', c_int, [POINTER(Display), c_int], 'NV_swap_group')
PFNGLXJOINSWAPGROUPNVPROC = CFUNCTYPE(
c_int, POINTER(Display), GLXDrawable, GLuint) # GL/glxext.h:940
PFNGLXBINDSWAPBARRIERNVPROC = CFUNCTYPE(
c_int, POINTER(Display), GLuint, GLuint) # GL/glxext.h:941
PFNGLXQUERYSWAPGROUPNVPROC = CFUNCTYPE(c_int, POINTER(
Display), GLXDrawable, POINTER(GLuint), POINTER(GLuint)) # GL/glxext.h:942
PFNGLXQUERYMAXSWAPGROUPSNVPROC = CFUNCTYPE(c_int, POINTER(
Display), c_int, POINTER(GLuint), POINTER(GLuint)) # GL/glxext.h:943
PFNGLXQUERYFRAMECOUNTNVPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int, POINTER(GLuint)) # GL/glxext.h:944
PFNGLXRESETFRAMECOUNTNVPROC = CFUNCTYPE(
c_int, POINTER(Display), c_int) # GL/glxext.h:945
# NV_video_capture (GL/glxext.h:948)
GLX_NV_video_capture = 1 # GL/glxext.h:949
# GL/glxext.h:951
glXBindVideoCaptureDeviceNV = _link_function('glXBindVideoCaptureDeviceNV', c_int, [
POINTER(Display), c_uint, GLXVideoCaptureDeviceNV], 'NV_video_capture')
# GL/glxext.h:952
glXEnumerateVideoCaptureDevicesNV = _link_function('glXEnumerateVideoCaptureDevicesNV', POINTER(
GLXVideoCaptureDeviceNV), [POINTER(Display), c_int, POINTER(c_int)], 'NV_video_capture')
# GL/glxext.h:953
glXLockVideoCaptureDeviceNV = _link_function('glXLockVideoCaptureDeviceNV', None, [
POINTER(Display), GLXVideoCaptureDeviceNV], 'NV_video_capture')
# GL/glxext.h:954
glXQueryVideoCaptureDeviceNV = _link_function('glXQueryVideoCaptureDeviceNV', c_int, [
POINTER(Display), GLXVideoCaptureDeviceNV, c_int, POINTER(c_int)], 'NV_video_capture')
# GL/glxext.h:955
glXReleaseVideoCaptureDeviceNV = _link_function('glXReleaseVideoCaptureDeviceNV', None, [
POINTER(Display), GLXVideoCaptureDeviceNV], 'NV_video_capture')
PFNGLXBINDVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(
c_int, POINTER(Display), c_uint, GLXVideoCaptureDeviceNV) # GL/glxext.h:957
PFNGLXENUMERATEVIDEOCAPTUREDEVICESNVPROC = CFUNCTYPE(POINTER(
GLXVideoCaptureDeviceNV), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:958
PFNGLXLOCKVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(
None, POINTER(Display), GLXVideoCaptureDeviceNV) # GL/glxext.h:959
PFNGLXQUERYVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(c_int, POINTER(
Display), GLXVideoCaptureDeviceNV, c_int, POINTER(c_int)) # GL/glxext.h:960
PFNGLXRELEASEVIDEOCAPTUREDEVICENVPROC = CFUNCTYPE(
None, POINTER(Display), GLXVideoCaptureDeviceNV) # GL/glxext.h:961
# EXT_swap_control (GL/glxext.h:964)
GLX_EXT_swap_control = 1 # GL/glxext.h:965
# GL/glxext.h:967
glXSwapIntervalEXT = _link_function('glXSwapIntervalEXT', c_int, [
POINTER(Display), GLXDrawable, c_int], 'EXT_swap_control')
PFNGLXSWAPINTERVALEXTPROC = CFUNCTYPE(
c_int, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:969
# NV_copy_image (GL/glxext.h:972)
GLX_NV_copy_image = 1 # GL/glxext.h:973
GLint = c_int # /usr/include/GL/gl.h:159
GLsizei = c_int # /usr/include/GL/gl.h:163
# GL/glxext.h:975
glXCopyImageSubDataNV = _link_function('glXCopyImageSubDataNV', None, [POINTER(
Display), GLXContext, GLuint, GLenum, GLint, GLint, GLint, GLint, GLXContext, GLuint, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei], 'NV_copy_image')
PFNGLXCOPYIMAGESUBDATANVPROC = CFUNCTYPE(None, POINTER(Display), GLXContext, GLuint, GLenum, GLint, GLint, GLint,
GLint, GLXContext, GLuint, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei) # GL/glxext.h:977
# INTEL_swap_event (GL/glxext.h:980)
GLX_INTEL_swap_event = 1 # GL/glxext.h:981
# NV_multisample_coverage (GL/glxext.h:984)
GLX_NV_multisample_coverage = 1 # GL/glxext.h:985
# NV_vertex_array_range (/usr/include/GL/glx.h:349)
# MESA_allocate_memory (/usr/include/GL/glx.h:363)
# ARB_render_texture (/usr/include/GL/glx.h:380)
# NV_float_buffer (/usr/include/GL/glx.h:393)
# MESA_swap_frame_usage (/usr/include/GL/glx.h:405)
# MESA_swap_control (/usr/include/GL/glx.h:425)
# EXT_texture_from_pixmap (/usr/include/GL/glx.h:442)
__all__ = ['GLX_GLXEXT_VERSION', 'GLX_SAMPLE_BUFFERS_ARB', 'GLX_SAMPLES_ARB',
'GLX_CONTEXT_ALLOW_BUFFER_BYTE_ORDER_MISMATCH_ARB', 'GLX_RGBA_FLOAT_TYPE_ARB',
'GLX_RGBA_FLOAT_BIT_ARB', 'GLX_FRAMEBUFFER_SRGB_CAPABLE_ARB',
'GLX_CONTEXT_DEBUG_BIT_ARB', 'GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB',
'GLX_CONTEXT_MAJOR_VERSION_ARB', 'GLX_CONTEXT_MINOR_VERSION_ARB',
'GLX_CONTEXT_FLAGS_ARB', 'GLX_CONTEXT_CORE_PROFILE_BIT_ARB',
'GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB', 'GLX_CONTEXT_PROFILE_MASK_ARB',
'GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB', 'GLX_LOSE_CONTEXT_ON_RESET_ARB',
'GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB',
'GLX_NO_RESET_NOTIFICATION_ARB', 'GLX_SAMPLE_BUFFERS_SGIS',
'GLX_SAMPLES_SGIS', 'GLX_X_VISUAL_TYPE_EXT', 'GLX_TRANSPARENT_TYPE_EXT',
'GLX_TRANSPARENT_INDEX_VALUE_EXT', 'GLX_TRANSPARENT_RED_VALUE_EXT',
'GLX_TRANSPARENT_GREEN_VALUE_EXT', 'GLX_TRANSPARENT_BLUE_VALUE_EXT',
'GLX_TRANSPARENT_ALPHA_VALUE_EXT', 'GLX_NONE_EXT', 'GLX_TRUE_COLOR_EXT',
'GLX_DIRECT_COLOR_EXT', 'GLX_PSEUDO_COLOR_EXT', 'GLX_STATIC_COLOR_EXT',
'GLX_GRAY_SCALE_EXT', 'GLX_STATIC_GRAY_EXT', 'GLX_TRANSPARENT_RGB_EXT',
'GLX_TRANSPARENT_INDEX_EXT', 'GLX_VISUAL_CAVEAT_EXT', 'GLX_SLOW_VISUAL_EXT',
'GLX_NON_CONFORMANT_VISUAL_EXT', 'GLX_SHARE_CONTEXT_EXT', 'GLX_VISUAL_ID_EXT',
'GLX_SCREEN_EXT', 'GLX_WINDOW_BIT_SGIX', 'GLX_PIXMAP_BIT_SGIX',
'GLX_RGBA_BIT_SGIX', 'GLX_COLOR_INDEX_BIT_SGIX', 'GLX_DRAWABLE_TYPE_SGIX',
'GLX_RENDER_TYPE_SGIX', 'GLX_X_RENDERABLE_SGIX', 'GLX_FBCONFIG_ID_SGIX',
'GLX_RGBA_TYPE_SGIX', 'GLX_COLOR_INDEX_TYPE_SGIX', 'GLX_PBUFFER_BIT_SGIX',
'GLX_BUFFER_CLOBBER_MASK_SGIX', 'GLX_FRONT_LEFT_BUFFER_BIT_SGIX',
'GLX_FRONT_RIGHT_BUFFER_BIT_SGIX', 'GLX_BACK_LEFT_BUFFER_BIT_SGIX',
'GLX_BACK_RIGHT_BUFFER_BIT_SGIX', 'GLX_AUX_BUFFERS_BIT_SGIX',
'GLX_DEPTH_BUFFER_BIT_SGIX', 'GLX_STENCIL_BUFFER_BIT_SGIX',
'GLX_ACCUM_BUFFER_BIT_SGIX', 'GLX_SAMPLE_BUFFERS_BIT_SGIX',
'GLX_MAX_PBUFFER_WIDTH_SGIX', 'GLX_MAX_PBUFFER_HEIGHT_SGIX',
'GLX_MAX_PBUFFER_PIXELS_SGIX', 'GLX_OPTIMAL_PBUFFER_WIDTH_SGIX',
'GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX', 'GLX_PRESERVED_CONTENTS_SGIX',
'GLX_LARGEST_PBUFFER_SGIX', 'GLX_WIDTH_SGIX', 'GLX_HEIGHT_SGIX',
'GLX_EVENT_MASK_SGIX', 'GLX_DAMAGED_SGIX', 'GLX_SAVED_SGIX',
'GLX_WINDOW_SGIX', 'GLX_PBUFFER_SGIX', 'GLX_SYNC_FRAME_SGIX',
'GLX_SYNC_SWAP_SGIX', 'GLX_DIGITAL_MEDIA_PBUFFER_SGIX',
'GLX_BLENDED_RGBA_SGIS', 'GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS',
'GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS', 'GLX_SAMPLE_BUFFERS_3DFX',
'GLX_SAMPLES_3DFX', 'GLX_3DFX_WINDOW_MODE_MESA',
'GLX_3DFX_FULLSCREEN_MODE_MESA', 'GLX_VISUAL_SELECT_GROUP_SGIX',
'GLX_SWAP_METHOD_OML', 'GLX_SWAP_EXCHANGE_OML', 'GLX_SWAP_COPY_OML',
'GLX_SWAP_UNDEFINED_OML', 'GLX_FLOAT_COMPONENTS_NV',
'GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX', 'GLX_BAD_HYPERPIPE_CONFIG_SGIX',
'GLX_BAD_HYPERPIPE_SGIX', 'GLX_HYPERPIPE_DISPLAY_PIPE_SGIX',
'GLX_HYPERPIPE_RENDER_PIPE_SGIX', 'GLX_PIPE_RECT_SGIX',
'GLX_PIPE_RECT_LIMITS_SGIX', 'GLX_HYPERPIPE_STEREO_SGIX',
'GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX', 'GLX_HYPERPIPE_ID_SGIX',
'GLX_RGBA_UNSIGNED_FLOAT_TYPE_EXT', 'GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT',
'GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT', 'GLX_TEXTURE_1D_BIT_EXT',
'GLX_TEXTURE_2D_BIT_EXT', 'GLX_TEXTURE_RECTANGLE_BIT_EXT',
'GLX_BIND_TO_TEXTURE_RGB_EXT', 'GLX_BIND_TO_TEXTURE_RGBA_EXT',
'GLX_BIND_TO_MIPMAP_TEXTURE_EXT', 'GLX_BIND_TO_TEXTURE_TARGETS_EXT',
'GLX_Y_INVERTED_EXT', 'GLX_TEXTURE_FORMAT_EXT', 'GLX_TEXTURE_TARGET_EXT',
'GLX_MIPMAP_TEXTURE_EXT', 'GLX_TEXTURE_FORMAT_NONE_EXT',
'GLX_TEXTURE_FORMAT_RGB_EXT', 'GLX_TEXTURE_FORMAT_RGBA_EXT',
'GLX_TEXTURE_1D_EXT', 'GLX_TEXTURE_2D_EXT', 'GLX_TEXTURE_RECTANGLE_EXT',
'GLX_FRONT_LEFT_EXT', 'GLX_FRONT_RIGHT_EXT', 'GLX_BACK_LEFT_EXT',
'GLX_BACK_RIGHT_EXT', 'GLX_FRONT_EXT', 'GLX_BACK_EXT', 'GLX_AUX0_EXT',
'GLX_AUX1_EXT', 'GLX_AUX2_EXT', 'GLX_AUX3_EXT', 'GLX_AUX4_EXT',
'GLX_AUX5_EXT', 'GLX_AUX6_EXT', 'GLX_AUX7_EXT', 'GLX_AUX8_EXT',
'GLX_AUX9_EXT', 'GLX_NUM_VIDEO_SLOTS_NV', 'GLX_VIDEO_OUT_COLOR_NV',
'GLX_VIDEO_OUT_ALPHA_NV', 'GLX_VIDEO_OUT_DEPTH_NV',
'GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV', 'GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV',
'GLX_VIDEO_OUT_FRAME_NV', 'GLX_VIDEO_OUT_FIELD_1_NV',
'GLX_VIDEO_OUT_FIELD_2_NV', 'GLX_VIDEO_OUT_STACKED_FIELDS_1_2_NV',
'GLX_VIDEO_OUT_STACKED_FIELDS_2_1_NV', 'GLX_DEVICE_ID_NV', 'GLX_UNIQUE_ID_NV',
'GLX_NUM_VIDEO_CAPTURE_SLOTS_NV', 'GLX_SWAP_INTERVAL_EXT',
'GLX_MAX_SWAP_INTERVAL_EXT', 'GLX_BUFFER_SWAP_COMPLETE_INTEL_MASK',
'GLX_EXCHANGE_COMPLETE_INTEL', 'GLX_COPY_COMPLETE_INTEL',
'GLX_FLIP_COMPLETE_INTEL', 'GLX_COVERAGE_SAMPLES_NV', 'GLX_COLOR_SAMPLES_NV',
'GLX_GPU_VENDOR_AMD', 'GLX_GPU_RENDERER_STRING_AMD',
'GLX_GPU_OPENGL_VERSION_STRING_AMD', 'GLX_GPU_FASTEST_TARGET_GPUS_AMD',
'GLX_GPU_RAM_AMD', 'GLX_GPU_CLOCK_AMD', 'GLX_GPU_NUM_PIPES_AMD',
'GLX_GPU_NUM_SIMD_AMD', 'GLX_GPU_NUM_RB_AMD', 'GLX_GPU_NUM_SPI_AMD',
'GLX_CONTEXT_ES2_PROFILE_BIT_EXT', 'GLXVideoSourceSGIX', 'GLXFBConfigIDSGIX',
'GLXFBConfigSGIX', 'GLXPbufferSGIX', 'GLXBufferClobberEventSGIX',
'GLXVideoDeviceNV', 'GLXVideoCaptureDeviceNV', 'GLX_ARB_multisample',
'GLX_ARB_fbconfig_float', 'GLX_ARB_framebuffer_sRGB',
'GLX_ARB_create_context', 'glXCreateContextAttribsARB',
'PFNGLXCREATECONTEXTATTRIBSARBPROC', 'GLX_ARB_create_context_profile',
'GLX_ARB_create_context_robustness', 'GLX_SGIS_multisample',
'GLX_EXT_visual_info', 'GLX_SGI_swap_control', 'glXSwapIntervalSGI',
'PFNGLXSWAPINTERVALSGIPROC', 'GLX_SGI_video_sync', 'glXGetVideoSyncSGI',
'glXWaitVideoSyncSGI', 'PFNGLXGETVIDEOSYNCSGIPROC',
'PFNGLXWAITVIDEOSYNCSGIPROC', 'GLX_SGI_make_current_read',
'glXMakeCurrentReadSGI', 'glXGetCurrentReadDrawableSGI',
'PFNGLXMAKECURRENTREADSGIPROC', 'PFNGLXGETCURRENTREADDRAWABLESGIPROC',
'GLX_SGIX_video_source', 'GLX_EXT_visual_rating', 'GLX_EXT_import_context',
'glXGetCurrentDisplayEXT', 'glXQueryContextInfoEXT', 'glXGetContextIDEXT',
'glXImportContextEXT', 'glXFreeContextEXT', 'PFNGLXGETCURRENTDISPLAYEXTPROC',
'PFNGLXQUERYCONTEXTINFOEXTPROC', 'PFNGLXGETCONTEXTIDEXTPROC',
'PFNGLXIMPORTCONTEXTEXTPROC', 'PFNGLXFREECONTEXTEXTPROC', 'GLX_SGIX_fbconfig',
'glXGetFBConfigAttribSGIX', 'glXChooseFBConfigSGIX',
'glXCreateGLXPixmapWithConfigSGIX', 'glXCreateContextWithConfigSGIX',
'glXGetVisualFromFBConfigSGIX', 'glXGetFBConfigFromVisualSGIX',
'PFNGLXGETFBCONFIGATTRIBSGIXPROC', 'PFNGLXCHOOSEFBCONFIGSGIXPROC',
'PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC',
'PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC',
'PFNGLXGETVISUALFROMFBCONFIGSGIXPROC', 'PFNGLXGETFBCONFIGFROMVISUALSGIXPROC',
'GLX_SGIX_pbuffer', 'glXCreateGLXPbufferSGIX', 'glXDestroyGLXPbufferSGIX',
'glXQueryGLXPbufferSGIX', 'glXSelectEventSGIX', 'glXGetSelectedEventSGIX',
'PFNGLXCREATEGLXPBUFFERSGIXPROC', 'PFNGLXDESTROYGLXPBUFFERSGIXPROC',
'PFNGLXQUERYGLXPBUFFERSGIXPROC', 'PFNGLXSELECTEVENTSGIXPROC',
'PFNGLXGETSELECTEDEVENTSGIXPROC', 'GLX_SGI_cushion', 'glXCushionSGI',
'PFNGLXCUSHIONSGIPROC', 'GLX_SGIX_video_resize', 'glXBindChannelToWindowSGIX',
'glXChannelRectSGIX', 'glXQueryChannelRectSGIX', 'glXQueryChannelDeltasSGIX',
'glXChannelRectSyncSGIX', 'PFNGLXBINDCHANNELTOWINDOWSGIXPROC',
'PFNGLXCHANNELRECTSGIXPROC', 'PFNGLXQUERYCHANNELRECTSGIXPROC',
'PFNGLXQUERYCHANNELDELTASSGIXPROC', 'PFNGLXCHANNELRECTSYNCSGIXPROC',
'GLX_SGIX_dmbuffer', 'GLX_SGIX_swap_group', 'glXJoinSwapGroupSGIX',
'PFNGLXJOINSWAPGROUPSGIXPROC', 'GLX_SGIX_swap_barrier',
'glXBindSwapBarrierSGIX', 'glXQueryMaxSwapBarriersSGIX',
'PFNGLXBINDSWAPBARRIERSGIXPROC', 'PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC',
'GLX_SUN_get_transparent_index', 'glXGetTransparentIndexSUN',
'PFNGLXGETTRANSPARENTINDEXSUNPROC', 'GLX_MESA_copy_sub_buffer',
'glXCopySubBufferMESA', 'PFNGLXCOPYSUBBUFFERMESAPROC',
'GLX_MESA_pixmap_colormap', 'glXCreateGLXPixmapMESA',
'PFNGLXCREATEGLXPIXMAPMESAPROC', 'GLX_MESA_release_buffers',
'glXReleaseBuffersMESA', 'PFNGLXRELEASEBUFFERSMESAPROC',
'GLX_MESA_set_3dfx_mode', 'glXSet3DfxModeMESA', 'PFNGLXSET3DFXMODEMESAPROC',
'GLX_SGIX_visual_select_group', 'GLX_OML_swap_method', 'GLX_OML_sync_control',
'glXGetSyncValuesOML', 'glXGetMscRateOML', 'glXSwapBuffersMscOML',
'glXWaitForMscOML', 'glXWaitForSbcOML', 'PFNGLXGETSYNCVALUESOMLPROC',
'PFNGLXGETMSCRATEOMLPROC', 'PFNGLXSWAPBUFFERSMSCOMLPROC',
'PFNGLXWAITFORMSCOMLPROC', 'PFNGLXWAITFORSBCOMLPROC', 'GLX_NV_float_buffer',
'GLX_SGIX_hyperpipe', 'GLXHyperpipeNetworkSGIX', 'GLXHyperpipeConfigSGIX',
'GLXPipeRect', 'GLXPipeRectLimits', 'glXQueryHyperpipeNetworkSGIX',
'glXHyperpipeConfigSGIX', 'glXQueryHyperpipeConfigSGIX',
'glXDestroyHyperpipeConfigSGIX', 'glXBindHyperpipeSGIX',
'glXQueryHyperpipeBestAttribSGIX', 'glXHyperpipeAttribSGIX',
'glXQueryHyperpipeAttribSGIX', 'PFNGLXQUERYHYPERPIPENETWORKSGIXPROC',
'PFNGLXHYPERPIPECONFIGSGIXPROC', 'PFNGLXQUERYHYPERPIPECONFIGSGIXPROC',
'PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC', 'PFNGLXBINDHYPERPIPESGIXPROC',
'PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC', 'PFNGLXHYPERPIPEATTRIBSGIXPROC',
'PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC', 'GLX_MESA_agp_offset',
'glXGetAGPOffsetMESA', 'PFNGLXGETAGPOFFSETMESAPROC',
'GLX_EXT_fbconfig_packed_float', 'GLX_EXT_framebuffer_sRGB',
'GLX_EXT_texture_from_pixmap', 'glXBindTexImageEXT', 'glXReleaseTexImageEXT',
'PFNGLXBINDTEXIMAGEEXTPROC', 'PFNGLXRELEASETEXIMAGEEXTPROC',
'GLX_NV_present_video', 'glXEnumerateVideoDevicesNV', 'glXBindVideoDeviceNV',
'PFNGLXENUMERATEVIDEODEVICESNVPROC', 'PFNGLXBINDVIDEODEVICENVPROC',
'GLX_NV_video_output', 'glXGetVideoDeviceNV', 'glXReleaseVideoDeviceNV',
'glXBindVideoImageNV', 'glXReleaseVideoImageNV', 'glXSendPbufferToVideoNV',
'glXGetVideoInfoNV', 'PFNGLXGETVIDEODEVICENVPROC',
'PFNGLXRELEASEVIDEODEVICENVPROC', 'PFNGLXBINDVIDEOIMAGENVPROC',
'PFNGLXRELEASEVIDEOIMAGENVPROC', 'PFNGLXSENDPBUFFERTOVIDEONVPROC',
'PFNGLXGETVIDEOINFONVPROC', 'GLX_NV_swap_group', 'glXJoinSwapGroupNV',
'glXBindSwapBarrierNV', 'glXQuerySwapGroupNV', 'glXQueryMaxSwapGroupsNV',
'glXQueryFrameCountNV', 'glXResetFrameCountNV', 'PFNGLXJOINSWAPGROUPNVPROC',
'PFNGLXBINDSWAPBARRIERNVPROC', 'PFNGLXQUERYSWAPGROUPNVPROC',
'PFNGLXQUERYMAXSWAPGROUPSNVPROC', 'PFNGLXQUERYFRAMECOUNTNVPROC',
'PFNGLXRESETFRAMECOUNTNVPROC', 'GLX_NV_video_capture',
'glXBindVideoCaptureDeviceNV', 'glXEnumerateVideoCaptureDevicesNV',
'glXLockVideoCaptureDeviceNV', 'glXQueryVideoCaptureDeviceNV',
'glXReleaseVideoCaptureDeviceNV', 'PFNGLXBINDVIDEOCAPTUREDEVICENVPROC',
'PFNGLXENUMERATEVIDEOCAPTUREDEVICESNVPROC',
'PFNGLXLOCKVIDEOCAPTUREDEVICENVPROC', 'PFNGLXQUERYVIDEOCAPTUREDEVICENVPROC',
'PFNGLXRELEASEVIDEOCAPTUREDEVICENVPROC', 'GLX_EXT_swap_control',
'glXSwapIntervalEXT', 'PFNGLXSWAPINTERVALEXTPROC', 'GLX_NV_copy_image',
'glXCopyImageSubDataNV', 'PFNGLXCOPYIMAGESUBDATANVPROC',
'GLX_INTEL_swap_event', 'GLX_NV_multisample_coverage']
# END GENERATED CONTENT (do not edit above this line)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
from py4j.java_gateway import java_import, JavaObject
from pyspark import RDD, SparkConf
from pyspark.serializers import NoOpSerializer, UTF8Deserializer, CloudPickleSerializer
from pyspark.context import SparkContext
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.dstream import DStream
from pyspark.streaming.util import TransformFunction, TransformFunctionSerializer
__all__ = ["StreamingContext"]
def _daemonize_callback_server():
"""
Hack Py4J to daemonize callback server
The thread of callback server has daemon=False, it will block the driver
from exiting if it's not shutdown. The following code replace `start()`
of CallbackServer with a new version, which set daemon=True for this
thread.
Also, it will update the port number (0) with real port
"""
# TODO: create a patch for Py4J
import socket
import py4j.java_gateway
logger = py4j.java_gateway.logger
from py4j.java_gateway import Py4JNetworkError
from threading import Thread
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
1)
try:
self.server_socket.bind((self.address, self.port))
if not self.port:
# update port with real port
self.port = self.server_socket.getsockname()[1]
except Exception as e:
msg = 'An error occurred while trying to start the callback server: %s' % e
logger.exception(msg)
raise Py4JNetworkError(msg)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
py4j.java_gateway.CallbackServer.start = start
class StreamingContext(object):
"""
Main entry point for Spark Streaming functionality. A StreamingContext
represents the connection to a Spark cluster, and can be used to create
L{DStream} various input sources. It can be from an existing L{SparkContext}.
After creating and transforming DStreams, the streaming computation can
be started and stopped using `context.start()` and `context.stop()`,
respectively. `context.awaitTermination()` allows the current thread
to wait for the termination of the context by `stop()` or by an exception.
"""
_transformerSerializer = None
def __init__(self, sparkContext, batchDuration=None, jssc=None):
"""
Create a new StreamingContext.
@param sparkContext: L{SparkContext} object.
@param batchDuration: the time interval (in seconds) at which streaming
data will be divided into batches
"""
self._sc = sparkContext
self._jvm = self._sc._jvm
self._jssc = jssc or self._initialize_context(self._sc, batchDuration)
def _initialize_context(self, sc, duration):
self._ensure_initialized()
return self._jvm.JavaStreamingContext(sc._jsc, self._jduration(duration))
def _jduration(self, seconds):
"""
Create Duration object given number of seconds
"""
return self._jvm.Duration(int(seconds * 1000))
@classmethod
def _ensure_initialized(cls):
SparkContext._ensure_initialized()
gw = SparkContext._gateway
java_import(gw.jvm, "org.apache.spark.streaming.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.java.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.python.*")
# start callback server
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__:
_daemonize_callback_server()
# use random port
gw._start_callback_server(0)
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
gw.jvm.PythonDStream.updatePythonGatewayPort(jgws, gw._python_proxy_port)
# register serializer for TransformFunction
# it happens before creating SparkContext when loading from checkpointing
cls._transformerSerializer = TransformFunctionSerializer(
SparkContext._active_spark_context, CloudPickleSerializer(), gw)
@classmethod
def getOrCreate(cls, checkpointPath, setupFunc):
"""
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
recreated from the checkpoint data. If the data does not exist, then the provided setupFunc
will be used to create a JavaStreamingContext.
@param checkpointPath: Checkpoint directory used in an earlier JavaStreamingContext program
@param setupFunc: Function to create a new JavaStreamingContext and setup DStreams
"""
# TODO: support checkpoint in HDFS
if not os.path.exists(checkpointPath) or not os.listdir(checkpointPath):
ssc = setupFunc()
ssc.checkpoint(checkpointPath)
return ssc
cls._ensure_initialized()
gw = SparkContext._gateway
try:
jssc = gw.jvm.JavaStreamingContext(checkpointPath)
except Exception:
print("failed to load StreamingContext from checkpoint", file=sys.stderr)
raise
jsc = jssc.sparkContext()
conf = SparkConf(_jconf=jsc.getConf())
sc = SparkContext(conf=conf, gateway=gw, jsc=jsc)
# update ctx in serializer
SparkContext._active_spark_context = sc
cls._transformerSerializer.ctx = sc
return StreamingContext(sc, None, jssc)
@property
def sparkContext(self):
"""
Return SparkContext which is associated with this StreamingContext.
"""
return self._sc
def start(self):
"""
Start the execution of the streams.
"""
self._jssc.start()
def awaitTermination(self, timeout=None):
"""
Wait for the execution to stop.
@param timeout: time to wait in seconds
"""
if timeout is None:
self._jssc.awaitTermination()
else:
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
def awaitTerminationOrTimeout(self, timeout):
"""
Wait for the execution to stop. Return `true` if it's stopped; or
throw the reported error during the execution; or `false` if the
waiting time elapsed before returning from the method.
@param timeout: time to wait in seconds
"""
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
def stop(self, stopSparkContext=True, stopGraceFully=False):
"""
Stop the execution of the streams, with option of ensuring all
received data has been processed.
@param stopSparkContext: Stop the associated SparkContext or not
@param stopGracefully: Stop gracefully by waiting for the processing
of all received data to be completed
"""
self._jssc.stop(stopSparkContext, stopGraceFully)
if stopSparkContext:
self._sc.stop()
def remember(self, duration):
"""
Set each DStreams in this context to remember RDDs it generated
in the last given duration. DStreams remember RDDs only for a
limited duration of time and releases them for garbage collection.
This method allows the developer to specify how to long to remember
the RDDs (if the developer wishes to query old data outside the
DStream computation).
@param duration: Minimum duration (in seconds) that each DStream
should remember its RDDs
"""
self._jssc.remember(self._jduration(duration))
def checkpoint(self, directory):
"""
Sets the context to periodically checkpoint the DStream operations for master
fault-tolerance. The graph will be checkpointed every batch interval.
@param directory: HDFS-compatible directory where the checkpoint data
will be reliably stored
"""
self._jssc.checkpoint(directory)
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_SER_2):
"""
Create an input from TCP source hostname:port. Data is received using
a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited
lines.
@param hostname: Hostname to connect to for receiving data
@param port: Port to connect to for receiving data
@param storageLevel: Storage level to use for storing the received objects
"""
jlevel = self._sc._getJavaStorageLevel(storageLevel)
return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self,
UTF8Deserializer())
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting with . are ignored.
@param directory: Directory to load data from
@param recordLength: Length of each record in bytes
"""
return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self,
NoOpSerializer())
def _check_serializers(self, rdds):
# make sure they have same serializer
if len(set(rdd._jrdd_deserializer for rdd in rdds)) > 1:
for i in range(len(rdds)):
# reset them to sc.serializer
rdds[i] = rdds[i]._reserialize()
def queueStream(self, rdds, oneAtATime=True, default=None):
"""
Create an input stream from an queue of RDDs or list. In each batch,
it will process either one or all of the RDDs returned by the queue.
NOTE: changes to the queue after the stream is created will not be recognized.
@param rdds: Queue of RDDs
@param oneAtATime: pick one rdd each time or pick all of them once.
@param default: The default rdd if no more in rdds
"""
if default and not isinstance(default, RDD):
default = self._sc.parallelize(default)
if not rdds and default:
rdds = [rdds]
if rdds and not isinstance(rdds[0], RDD):
rdds = [self._sc.parallelize(input) for input in rdds]
self._check_serializers(rdds)
queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds])
if default:
default = default._reserialize(rdds[0]._jrdd_deserializer)
jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd)
else:
jdstream = self._jssc.queueStream(queue, oneAtATime)
return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
def transform(self, dstreams, transformFunc):
"""
Create a new DStream in which each RDD is generated by applying
a function on RDDs of the DStreams. The order of the JavaRDDs in
the transform function parameter will be the same as the order
of corresponding DStreams in the list.
"""
jdstreams = [d._jdstream for d in dstreams]
# change the final serializer to sc.serializer
func = TransformFunction(self._sc,
lambda t, *rdds: transformFunc(rdds).map(lambda x: x),
*[d._jrdd_deserializer for d in dstreams])
jfunc = self._jvm.TransformFunction(func)
jdstream = self._jssc.transform(jdstreams, jfunc)
return DStream(jdstream, self, self._sc.serializer)
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
first = dstreams[0]
jrest = [d._jdstream for d in dstreams[1:]]
return DStream(self._jssc.union(first._jdstream, jrest), self, first._jrdd_deserializer)
|
|
# -*- coding: utf-8 -*-
"""
flaskbb.forum.models
~~~~~~~~~~~~~~~~~~~~
It provides the models for the forum
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime, timedelta
from flask import url_for, abort
from sqlalchemy.orm import aliased
from flaskbb.extensions import db
from flaskbb.utils.decorators import can_access_forum, can_access_topic
from flaskbb.utils.helpers import slugify, get_categories_and_forums, \
get_forums
from flaskbb.utils.database import CRUDMixin
from flaskbb.utils.settings import flaskbb_config
moderators = db.Table(
'moderators',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id'),
nullable=False),
db.Column('forum_id', db.Integer(),
db.ForeignKey('forums.id', use_alter=True, name="fk_forum_id"),
nullable=False))
topictracker = db.Table(
'topictracker',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id'),
nullable=False),
db.Column('topic_id', db.Integer(),
db.ForeignKey('topics.id',
use_alter=True, name="fk_tracker_topic_id"),
nullable=False))
# m2m table for group-forum permission mapping
forumgroups = db.Table(
'forumgroups',
db.Column(
'group_id',
db.Integer(),
db.ForeignKey('groups.id'),
nullable=False
),
db.Column(
'forum_id',
db.Integer(),
db.ForeignKey('forums.id', use_alter=True, name="fk_forum_id"),
nullable=False
)
)
class TopicsRead(db.Model, CRUDMixin):
__tablename__ = "topicsread"
user_id = db.Column(db.Integer, db.ForeignKey("users.id"),
primary_key=True)
topic_id = db.Column(db.Integer,
db.ForeignKey("topics.id", use_alter=True,
name="fk_tr_topic_id"),
primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forums.id", use_alter=True,
name="fk_tr_forum_id"),
primary_key=True)
last_read = db.Column(db.DateTime, default=datetime.utcnow())
class ForumsRead(db.Model, CRUDMixin):
__tablename__ = "forumsread"
user_id = db.Column(db.Integer, db.ForeignKey("users.id"),
primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forums.id", use_alter=True,
name="fk_fr_forum_id"),
primary_key=True)
last_read = db.Column(db.DateTime, default=datetime.utcnow())
cleared = db.Column(db.DateTime)
class Report(db.Model, CRUDMixin):
__tablename__ = "reports"
id = db.Column(db.Integer, primary_key=True)
reporter_id = db.Column(db.Integer, db.ForeignKey("users.id"),
nullable=False)
reported = db.Column(db.DateTime, default=datetime.utcnow())
post_id = db.Column(db.Integer, db.ForeignKey("posts.id"), nullable=False)
zapped = db.Column(db.DateTime)
zapped_by = db.Column(db.Integer, db.ForeignKey("users.id"))
reason = db.Column(db.Text)
post = db.relationship("Post", backref="report", lazy="joined")
reporter = db.relationship("User", lazy="joined",
foreign_keys=[reporter_id])
zapper = db.relationship("User", lazy="joined", foreign_keys=[zapped_by])
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self, post=None, user=None):
"""Saves a report.
:param post: The post that should be reported
:param user: The user who has reported the post
:param reason: The reason why the user has reported the post
"""
if self.id:
db.session.add(self)
db.session.commit()
return self
if post and user:
self.reporter_id = user.id
self.reported = datetime.utcnow()
self.post_id = post.id
db.session.add(self)
db.session.commit()
return self
class Post(db.Model, CRUDMixin):
__tablename__ = "posts"
__searchable__ = ['content', 'username']
id = db.Column(db.Integer, primary_key=True)
topic_id = db.Column(db.Integer,
db.ForeignKey("topics.id",
use_alter=True,
name="fk_post_topic_id",
ondelete="CASCADE"))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True)
username = db.Column(db.String(200), nullable=False)
content = db.Column(db.Text, nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
date_modified = db.Column(db.DateTime)
modified_by = db.Column(db.String(200))
# Properties
@property
def url(self):
"""Returns the url for the post"""
return url_for("forum.view_post", post_id=self.id)
# Methods
def __init__(self, content=None):
if content:
self.content = content
def __repr__(self):
"""
Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self, user=None, topic=None):
"""Saves a new post. If no parameters are passed we assume that
you will just update an existing post. It returns the object after the
operation was successful.
:param user: The user who has created the post
:param topic: The topic in which the post was created
"""
# update/edit the post
if self.id:
db.session.add(self)
db.session.commit()
return self
# Adding a new post
if user and topic:
created = datetime.utcnow()
self.user_id = user.id
self.username = user.username
self.topic_id = topic.id
self.date_created = created
topic.last_updated = created
# This needs to be done before the last_post_id gets updated.
db.session.add(self)
db.session.commit()
# Now lets update the last post id
topic.last_post_id = self.id
# Update the last post info for the forum
topic.forum.last_post_id = self.id
topic.forum.last_post_title = topic.title
topic.forum.last_post_user_id = user.id
topic.forum.last_post_username = user.username
topic.forum.last_post_created = created
# Update the post counts
user.post_count += 1
topic.post_count += 1
topic.forum.post_count += 1
# And commit it!
db.session.add(topic)
db.session.commit()
return self
def delete(self):
"""Deletes a post and returns self."""
# This will delete the whole topic
if self.topic.first_post_id == self.id:
self.topic.delete()
return self
# Delete the last post
if self.topic.last_post_id == self.id:
# update the last post in the forum
if self.topic.last_post_id == self.topic.forum.last_post_id:
# We need the second last post in the forum here,
# because the last post will be deleted
second_last_post = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.topic.forum.id).\
order_by(Post.id.desc()).limit(2).offset(0).\
all()
second_last_post = second_last_post[1]
self.topic.forum.last_post_id = second_last_post.id
# check if there is a second last post, else it is the first post
if self.topic.second_last_post:
# Now the second last post will be the last post
self.topic.last_post_id = self.topic.second_last_post
# there is no second last post, now the last post is also the
# first post
else:
self.topic.last_post_id = self.topic.first_post_id
# Update the post counts
self.user.post_count -= 1
self.topic.post_count -= 1
self.topic.forum.post_count -= 1
db.session.commit()
db.session.delete(self)
db.session.commit()
return self
class Topic(db.Model, CRUDMixin):
__tablename__ = "topics"
__searchable__ = ['title', 'username']
id = db.Column(db.Integer, primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forums.id",
use_alter=True,
name="fk_topic_forum_id"),
nullable=False)
title = db.Column(db.String(255), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
username = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
last_updated = db.Column(db.DateTime, default=datetime.utcnow())
locked = db.Column(db.Boolean, default=False)
important = db.Column(db.Boolean, default=False)
views = db.Column(db.Integer, default=0)
post_count = db.Column(db.Integer, default=0)
# One-to-one (uselist=False) relationship between first_post and topic
first_post_id = db.Column(db.Integer, db.ForeignKey("posts.id",
ondelete="CASCADE"))
first_post = db.relationship("Post", backref="first_post", uselist=False,
foreign_keys=[first_post_id])
# One-to-one
last_post_id = db.Column(db.Integer, db.ForeignKey("posts.id"))
last_post = db.relationship("Post", backref="last_post", uselist=False,
foreign_keys=[last_post_id])
# One-to-many
posts = db.relationship("Post", backref="topic", lazy="dynamic",
primaryjoin="Post.topic_id == Topic.id",
cascade="all, delete-orphan", post_update=True)
# Properties
@property
def second_last_post(self):
"""Returns the second last post."""
return self.posts[-2].id
@property
def slug(self):
"""Returns a slugified version from the topic title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the topic"""
return url_for("forum.view_topic", topic_id=self.id, slug=self.slug)
# Methods
def __init__(self, title=None):
if title:
self.title = title
def __repr__(self):
"""
Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
@classmethod
@can_access_topic
def get_topic(cls, topic_id, user):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
return topic
def tracker_needs_update(self, forumsread, topicsread):
"""Returns True if the topicsread tracker needs an update.
Also, if the ``TRACKER_LENGTH`` is configured, it will just recognize
topics that are newer than the ``TRACKER_LENGTH`` (in days) as unread.
:param forumsread: The ForumsRead object is needed because we also
need to check if the forum has been cleared
sometime ago.
:param topicsread: The topicsread object is used to check if there is
a new post in the topic.
"""
read_cutoff = None
if flaskbb_config['TRACKER_LENGTH'] > 0:
read_cutoff = datetime.utcnow() - timedelta(
days=flaskbb_config['TRACKER_LENGTH'])
# The tracker is disabled - abort
if read_cutoff is None:
return False
# Else the topic is still below the read_cutoff
elif read_cutoff > self.last_post.date_created:
return False
# Can be None (cleared) if the user has never marked the forum as read.
# If this condition is false - we need to update the tracker
if forumsread and forumsread.cleared is not None and \
forumsread.cleared >= self.last_post.date_created:
return False
if topicsread and topicsread.last_read >= self.last_post.date_created:
return False
return True
def update_read(self, user, forum, forumsread):
"""Updates the topicsread and forumsread tracker for a specified user,
if the topic contains new posts or the user hasn't read the topic.
Returns True if the tracker has been updated.
:param user: The user for whom the readstracker should be updated.
:param forum: The forum in which the topic is.
:param forumsread: The forumsread object. It is used to check if there
is a new post since the forum has been marked as
read.
"""
# User is not logged in - abort
if not user.is_authenticated():
return False
topicsread = TopicsRead.query.\
filter(TopicsRead.user_id == user.id,
TopicsRead.topic_id == self.id).first()
if not self.tracker_needs_update(forumsread, topicsread):
return False
# Because we return True/False if the trackers have been
# updated, we need to store the status in a temporary variable
updated = False
# A new post has been submitted that the user hasn't read.
# Updating...
if topicsread:
topicsread.last_read = datetime.utcnow()
topicsread.save()
updated = True
# The user has not visited the topic before. Inserting him in
# the TopicsRead model.
elif not topicsread:
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = self.id
topicsread.forum_id = self.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
updated = True
# No unread posts
else:
updated = False
# Save True/False if the forums tracker has been updated.
updated = forum.update_read(user, forumsread, topicsread)
return updated
def recalculate(self):
"""Recalculates the post count in the topic."""
post_count = Post.query.filter_by(topic_id=self.id).count()
self.post_count = post_count
self.save()
return self
def move(self, new_forum):
"""Moves a topic to the given forum.
Returns True if it could successfully move the topic to forum.
:param new_forum: The new forum for the topic
"""
# if the target forum is the current forum, abort
if self.forum_id == new_forum.id:
return False
old_forum = self.forum
self.forum.post_count -= self.post_count
self.forum.topic_count -= 1
self.forum_id = new_forum.id
new_forum.post_count += self.post_count
new_forum.topic_count += 1
db.session.commit()
new_forum.update_last_post()
old_forum.update_last_post()
TopicsRead.query.filter_by(topic_id=self.id).delete()
return True
def save(self, user=None, forum=None, post=None):
"""Saves a topic and returns the topic object. If no parameters are
given, it will only update the topic.
:param user: The user who has created the topic
:param forum: The forum where the topic is stored
:param post: The post object which is connected to the topic
"""
# Updates the topic
if self.id:
db.session.add(self)
db.session.commit()
return self
# Set the forum and user id
self.forum_id = forum.id
self.user_id = user.id
self.username = user.username
# Set the last_updated time. Needed for the readstracker
self.last_updated = datetime.utcnow()
self.date_created = datetime.utcnow()
# Insert and commit the topic
db.session.add(self)
db.session.commit()
# Create the topic post
post.save(user, self)
# Update the first post id
self.first_post_id = post.id
# Update the topic count
forum.topic_count += 1
db.session.commit()
return self
def delete(self, users=None):
"""Deletes a topic with the corresponding posts. If a list with
user objects is passed it will also update their post counts
:param users: A list with user objects
"""
# Grab the second last topic in the forum + parents/childs
topic = Topic.query.\
filter_by(forum_id=self.forum_id).\
order_by(Topic.last_post_id.desc()).limit(2).offset(0).all()
# do we want to delete the topic with the last post in the forum?
if topic and topic[0].id == self.id:
try:
# Now the second last post will be the last post
self.forum.last_post_id = topic[1].last_post_id
self.forum.last_post_title = topic[1].title
self.forum.last_post_user_id = topic[1].user_id
self.forum.last_post_username = topic[1].username
self.forum.last_post_created = topic[1].last_updated
# Catch an IndexError when you delete the last topic in the forum
# There is no second last post
except IndexError:
self.forum.last_post_id = None
self.forum.last_post_title = None
self.forum.last_post_user_id = None
self.forum.last_post_username = None
self.forum.last_post_created = None
# Commit the changes
db.session.commit()
# These things needs to be stored in a variable before they are deleted
forum = self.forum
TopicsRead.query.filter_by(topic_id=self.id).delete()
# Delete the topic
db.session.delete(self)
db.session.commit()
# Update the post counts
if users:
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
db.session.commit()
forum.topic_count = Topic.query.\
filter_by(forum_id=self.forum_id).\
count()
forum.post_count = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.forum_id).\
count()
db.session.commit()
return self
class Forum(db.Model, CRUDMixin):
__tablename__ = "forums"
__searchable__ = ['title', 'description']
id = db.Column(db.Integer, primary_key=True)
category_id = db.Column(db.Integer, db.ForeignKey("categories.id"),
nullable=False)
title = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text)
position = db.Column(db.Integer, default=1, nullable=False)
locked = db.Column(db.Boolean, default=False, nullable=False)
show_moderators = db.Column(db.Boolean, default=False, nullable=False)
external = db.Column(db.String(200))
post_count = db.Column(db.Integer, default=0, nullable=False)
topic_count = db.Column(db.Integer, default=0, nullable=False)
# One-to-one
last_post_id = db.Column(db.Integer, db.ForeignKey("posts.id"))
last_post = db.relationship("Post", backref="last_post_forum",
uselist=False, foreign_keys=[last_post_id])
# Not nice, but needed to improve the performance
last_post_title = db.Column(db.String(255))
last_post_user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
last_post_username = db.Column(db.String(255))
last_post_created = db.Column(db.DateTime, default=datetime.utcnow())
# One-to-many
topics = db.relationship(
"Topic",
backref="forum",
lazy="dynamic",
cascade="all, delete-orphan"
)
# Many-to-many
moderators = db.relationship(
"User",
secondary=moderators,
primaryjoin=(moderators.c.forum_id == id),
backref=db.backref("forummoderator", lazy="dynamic"),
lazy="joined"
)
groups = db.relationship(
"Group",
secondary=forumgroups,
primaryjoin=(forumgroups.c.forum_id == id),
backref="forumgroups",
lazy="joined",
)
# Properties
@property
def slug(self):
"""Returns a slugified version from the forum title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the forum"""
if self.external:
return self.external
return url_for("forum.view_forum", forum_id=self.id, slug=self.slug)
@property
def last_post_url(self):
"""Returns the url for the last post in the forum"""
return url_for("forum.view_post", post_id=self.last_post_id)
# Methods
def __repr__(self):
"""Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def update_last_post(self):
"""Updates the last post in the forum."""
last_post = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.id).\
order_by(Post.date_created.desc()).\
first()
# Last post is none when there are no topics in the forum
if last_post is not None:
# a new last post was found in the forum
if not last_post.id == self.last_post_id:
self.last_post_id = last_post.id
self.last_post_title = last_post.topic.title
self.last_post_user_id = last_post.user_id
self.last_post_username = last_post.username
self.last_post_created = last_post.date_created
# No post found..
else:
self.last_post_id = None
self.last_post_title = None
self.last_post_user_id = None
self.last_post_username = None
self.last_post_created = None
db.session.commit()
def update_read(self, user, forumsread, topicsread):
"""Updates the ForumsRead status for the user. In order to work
correctly, be sure that `topicsread is **not** `None`.
:param user: The user for whom we should check if he has read the
forum.
:param forumsread: The forumsread object. It is needed to check if
if the forum is unread. If `forumsread` is `None`
and the forum is unread, it will create a new entry
in the `ForumsRead` relation, else (and the forum
is still unread) we are just going to update the
entry in the `ForumsRead` relation.
:param topicsread: The topicsread object is used in combination
with the forumsread object to check if the
forumsread relation should be updated and
therefore is unread.
"""
if not user.is_authenticated() or topicsread is None:
return False
read_cutoff = None
if flaskbb_config['TRACKER_LENGTH'] > 0:
read_cutoff = datetime.utcnow() - timedelta(
days=flaskbb_config['TRACKER_LENGTH'])
# fetch the unread posts in the forum
unread_count = Topic.query.\
outerjoin(TopicsRead,
db.and_(TopicsRead.topic_id == Topic.id,
TopicsRead.user_id == user.id)).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Topic.forum_id,
ForumsRead.user_id == user.id)).\
filter(Topic.forum_id == self.id,
Topic.last_updated > read_cutoff,
db.or_(TopicsRead.last_read == None,
TopicsRead.last_read < Topic.last_updated)).\
count()
# No unread topics available - trying to mark the forum as read
if unread_count == 0:
if forumsread and forumsread.last_read > topicsread.last_read:
return False
# ForumRead Entry exists - Updating it because a new topic/post
# has been submitted and has read everything (obviously, else the
# unread_count would be useless).
elif forumsread:
forumsread.last_read = datetime.utcnow()
forumsread.save()
return True
# No ForumRead Entry existing - creating one.
forumsread = ForumsRead()
forumsread.user_id = user.id
forumsread.forum_id = self.id
forumsread.last_read = datetime.utcnow()
forumsread.save()
return True
# Nothing updated, because there are still more than 0 unread
# topicsread
return False
def recalculate(self, last_post=False):
"""Recalculates the post_count and topic_count in the forum.
Returns the forum with the recounted stats.
:param last_post: If set to ``True`` it will also try to update
the last post columns in the forum.
"""
topic_count = Topic.query.filter_by(forum_id=self.id).count()
post_count = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.id).\
count()
self.topic_count = topic_count
self.post_count = post_count
if last_post:
self.update_last_post()
self.save()
return self
def save(self, groups=None):
"""Saves a forum
:param moderators: If given, it will update the moderators in this
forum with the given iterable of user objects.
:param groups: A list with group objects.
"""
if self.id:
db.session.merge(self)
else:
if groups is None:
# importing here because of circular dependencies
from flaskbb.user.models import Group
self.groups = Group.query.order_by(Group.name.asc()).all()
db.session.add(self)
db.session.commit()
return self
def delete(self, users=None):
"""Deletes forum. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects
"""
# Delete the forum
db.session.delete(self)
db.session.commit()
# Delete the entries for the forum in the ForumsRead and TopicsRead
# relation
ForumsRead.query.filter_by(forum_id=self.id).delete()
TopicsRead.query.filter_by(forum_id=self.id).delete()
# Update the users post count
if users:
users_list = []
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
users_list.append(user)
db.session.add_all(users_list)
db.session.commit()
return self
def move_topics_to(self, topics):
"""Moves a bunch a topics to the forum. Returns ``True`` if all
topics were moved successfully to the forum.
:param topics: A iterable with topic objects.
"""
status = False
for topic in topics:
status = topic.move(self)
return status
# Classmethods
@classmethod
@can_access_forum
def get_forum(cls, forum_id, user):
"""Returns the forum and forumsread object as a tuple for the user.
:param forum_id: The forum id
:param user: The user object is needed to check if we also need their
forumsread object.
"""
if user.is_authenticated():
forum, forumsread = Forum.query.\
filter(Forum.id == forum_id).\
options(db.joinedload("category")).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Forum.id,
ForumsRead.user_id == user.id)).\
add_entity(ForumsRead).\
first_or_404()
else:
forum = Forum.query.filter(Forum.id == forum_id).first_or_404()
forumsread = None
return forum, forumsread
@classmethod
def get_topics(cls, forum_id, user, page=1, per_page=20):
"""Get the topics for the forum. If the user is logged in,
it will perform an outerjoin for the topics with the topicsread and
forumsread relation to check if it is read or unread.
:param forum_id: The forum id
:param user: The user object
:param page: The page whom should be loaded
:param per_page: How many topics per page should be shown
"""
if user.is_authenticated():
topics = Topic.query.filter_by(forum_id=forum_id).\
outerjoin(TopicsRead,
db.and_(TopicsRead.topic_id == Topic.id,
TopicsRead.user_id == user.id)).\
add_entity(TopicsRead).\
order_by(Topic.important.desc(), Topic.last_updated.desc()).\
paginate(page, per_page, True)
else:
topics = Topic.query.filter_by(forum_id=forum_id).\
order_by(Topic.important.desc(), Topic.last_updated.desc()).\
paginate(page, per_page, True)
topics.items = [(topic, None) for topic in topics.items]
return topics
class Category(db.Model, CRUDMixin):
__tablename__ = "categories"
__searchable__ = ['title', 'description']
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text)
position = db.Column(db.Integer, default=1, nullable=False)
# One-to-many
forums = db.relationship("Forum", backref="category", lazy="dynamic",
primaryjoin='Forum.category_id == Category.id',
order_by='asc(Forum.position)',
cascade="all, delete-orphan")
# Properties
@property
def slug(self):
"""Returns a slugified version from the category title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the category"""
return url_for("forum.view_category", category_id=self.id,
slug=self.slug)
# Methods
def __repr__(self):
"""Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def delete(self, users=None):
"""Deletes a category. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects
"""
# and finally delete the category itself
db.session.delete(self)
db.session.commit()
# Update the users post count
if users:
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
db.session.commit()
return self
# Classmethods
@classmethod
def get_all(cls, user):
"""Get all categories with all associated forums.
It returns a list with tuples. Those tuples are containing the category
and their associated forums (whose are stored in a list).
For example::
[(<Category 1>, [(<Forum 2>, <ForumsRead>), (<Forum 1>, None)]),
(<Category 2>, [(<Forum 3>, None), (<Forum 4>, None)])]
:param user: The user object is needed to check if we also need their
forumsread object.
"""
# import Group model locally to avoid cicular imports
from flaskbb.user.models import Group
if user.is_authenticated():
# get list of user group ids
user_groups = [gr.id for gr in user.groups]
# filter forums by user groups
user_forums = Forum.query.\
filter(Forum.groups.any(Group.id.in_(user_groups))).\
subquery()
forum_alias = aliased(Forum, user_forums)
# get all
forums = cls.query.\
join(forum_alias, cls.id == forum_alias.category_id).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == forum_alias.id,
ForumsRead.user_id == user.id)).\
add_entity(forum_alias).\
add_entity(ForumsRead).\
order_by(Category.position, Category.id,
forum_alias.position).\
all()
else:
guest_group = Group.get_guest_group()
# filter forums by guest groups
guest_forums = Forum.query.\
filter(Forum.groups.any(Group.id == guest_group.id)).\
subquery()
forum_alias = aliased(Forum, guest_forums)
forums = cls.query.\
join(forum_alias, cls.id == forum_alias.category_id).\
add_entity(forum_alias).\
order_by(Category.position, Category.id,
forum_alias.position).\
all()
return get_categories_and_forums(forums, user)
@classmethod
def get_forums(cls, category_id, user):
"""Get the forums for the category.
It returns a tuple with the category and the forums with their
forumsread object are stored in a list.
A return value can look like this for a category with two forums::
(<Category 1>, [(<Forum 1>, None), (<Forum 2>, None)])
:param category_id: The category id
:param user: The user object is needed to check if we also need their
forumsread object.
"""
from flaskbb.user.models import Group
if user.is_authenticated():
# get list of user group ids
user_groups = [gr.id for gr in user.groups]
# filter forums by user groups
user_forums = Forum.query.\
filter(Forum.groups.any(Group.id.in_(user_groups))).\
subquery()
forum_alias = aliased(Forum, user_forums)
forums = cls.query.\
filter(cls.id == category_id).\
join(forum_alias, cls.id == forum_alias.category_id).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == forum_alias.id,
ForumsRead.user_id == user.id)).\
add_entity(forum_alias).\
add_entity(ForumsRead).\
order_by(forum_alias.position).\
all()
else:
guest_group = Group.get_guest_group()
# filter forums by guest groups
guest_forums = Forum.query.\
filter(Forum.groups.any(Group.id == guest_group.id)).\
subquery()
forum_alias = aliased(Forum, guest_forums)
forums = cls.query.\
filter(cls.id == category_id).\
join(forum_alias, cls.id == forum_alias.category_id).\
add_entity(forum_alias).\
order_by(forum_alias.position).\
all()
if not forums:
abort(404)
return get_forums(forums, user)
|
|
import json
import re
import warnings
import time
from random import randint
from ..utils import gen_user_breadcrumb
from ..compatpatch import ClientCompatPatch
class MediaEndpointsMixin(object):
"""For endpoints in ``/media/``."""
def media_info(self, media_id):
"""
Get media info
:param media_id:
:return:
"""
endpoint = 'media/{media_id!s}/info/'.format(**{'media_id': media_id})
res = self._call_api(endpoint)
if self.auto_patch:
[ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys)
for m in res.get('items', [])]
return res
def medias_info(self, media_ids):
"""
Get multiple media infos
:param media_ids: list of media ids
:return:
"""
if isinstance(media_ids, str):
media_ids = [media_ids]
params = {
'_uuid': self.uuid,
'_csrftoken': self.csrftoken,
'media_ids': ','.join(media_ids),
'ranked_content': 'true',
'include_inactive_reel': 'true',
}
res = self._call_api('media/infos/', query=params)
if self.auto_patch:
[ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys)
for m in res.get('items', [])]
return res
def media_permalink(self, media_id):
"""
Get media permalink
:param media_id:
:return:
"""
endpoint = 'media/{media_id!s}/permalink/'.format(**{'media_id': media_id})
res = self._call_api(endpoint)
return res
def media_comments(self, media_id, **kwargs):
"""
Get media comments. Fixed at 20 comments returned per page.
:param media_id: Media id
:param kwargs:
**max_id**: For pagination
:return:
"""
endpoint = 'media/{media_id!s}/comments/'.format(**{'media_id': media_id})
res = self._call_api(endpoint, query=kwargs)
if self.auto_patch:
[ClientCompatPatch.comment(c, drop_incompat_keys=self.drop_incompat_keys)
for c in res.get('comments', [])]
return res
def media_n_comments(self, media_id, n=150, reverse=False, **kwargs):
"""
Helper method to retrieve n number of comments for a media id
:param media_id: Media id
:param n: Minimum number of comments to fetch
:param reverse: Reverse list of comments (ordered by created_time)
:param kwargs:
:return:
"""
endpoint = 'media/{media_id!s}/comments/'.format(**{'media_id': media_id})
comments = []
results = self._call_api(endpoint, query=kwargs)
comments.extend(results.get('comments', []))
while results.get('has_more_comments') and results.get('next_max_id') and len(comments) < n:
kwargs.update({'max_id': results.get('next_max_id')})
results = self._call_api(endpoint, query=kwargs)
comments.extend(results.get('comments', []))
if not results.get('next_max_id') or not results.get('comments'):
# bail out if no max_id or comments returned
break
if self.auto_patch:
[ClientCompatPatch.comment(c, drop_incompat_keys=self.drop_incompat_keys)
for c in comments]
return sorted(comments, key=lambda k: k['created_time'], reverse=reverse)
def edit_media(self, media_id, caption, usertags=None):
"""
Edit a media's caption
:param media_id: Media id
:param caption: Caption text
:param usertags: array of user_ids and positions in the format below:
.. code-block:: javascript
usertags = [
{"user_id":4292127751, "position":[0.625347,0.4384531]}
]
:return:
"""
if usertags is None:
usertags = []
endpoint = 'media/{media_id!s}/edit_media/'.format(**{'media_id': media_id})
params = {'caption_text': caption}
params.update(self.authenticated_params)
if usertags:
utags = {'in': [{'user_id': u['user_id'], 'position': u['position']} for u in usertags]}
params['usertags'] = json.dumps(utags, separators=(',', ':'))
res = self._call_api(endpoint, params=params)
if self.auto_patch:
ClientCompatPatch.media(res.get('media'))
return res
def delete_media(self, media_id):
"""
Delete a media
:param media_id: Media id
:return:
.. code-block:: javascript
{"status": "ok", "did_delete": true}
"""
endpoint = 'media/{media_id!s}/delete/'.format(**{'media_id': media_id})
params = {'media_id': media_id}
params.update(self.authenticated_params)
return self._call_api(endpoint, params=params)
def post_comment(self, media_id, comment_text):
"""
Post a comment.
Comment text validation according to https://www.instagram.com/developer/endpoints/comments/#post_media_comments
:param media_id: Media id
:param comment_text: Comment text
:return:
.. code-block:: javascript
{
"comment": {
"status": "Active",
"media_id": 123456789,
"text": ":)",
"created_at": 1479453671.0,
"user": {
"username": "x",
"has_anonymous_profile_picture": false,
"profile_pic_url": "http://scontent-sit4-1.cdninstagram.com/abc.jpg",
"full_name": "x",
"pk": 123456789,
"is_verified": false,
"is_private": false
},
"content_type": "comment",
"created_at_utc": 1479482471,
"pk": 17865505612040669,
"type": 0
},
"status": "ok"
}
"""
if len(comment_text) > 300:
raise ValueError('The total length of the comment cannot exceed 300 characters.')
if re.search(r'[a-z]+', comment_text, re.IGNORECASE) and comment_text == comment_text.upper():
raise ValueError('The comment cannot consist of all capital letters.')
if len(re.findall(r'#[^#]+\b', comment_text, re.UNICODE | re.MULTILINE)) > 4:
raise ValueError('The comment cannot contain more than 4 hashtags.')
if len(re.findall(r'\bhttps?://\S+\.\S+', comment_text)) > 1:
raise ValueError('The comment cannot contain more than 1 URL.')
endpoint = 'media/{media_id!s}/comment/'.format(**{'media_id': media_id})
params = {
'comment_text': comment_text,
'user_breadcrumb': gen_user_breadcrumb(len(comment_text)),
'idempotence_token': self.generate_uuid(),
'containermodule': 'comments_feed_timeline',
'radio_type': self.radio_type,
}
params.update(self.authenticated_params)
res = self._call_api(endpoint, params=params)
if self.auto_patch:
ClientCompatPatch.comment(res['comment'], drop_incompat_keys=self.drop_incompat_keys)
return res
def delete_comment(self, media_id, comment_id):
"""
Delete a comment
:param media_id: Media id
:param comment_id: Comment id
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{media_id!s}/comment/{comment_id!s}/delete/'.format(**{
'media_id': media_id, 'comment_id': comment_id})
params = {}
params.update(self.authenticated_params)
res = self._call_api(endpoint, params=params)
return res
def bulk_delete_comments(self, media_id, comment_ids):
"""
Bulk delete comment
:param media_id: Media id
:param comment_ids: List of comment ids
:return:
.. code-block:: javascript
{"status": "ok"}
"""
if not isinstance(comment_ids, list):
comment_ids = [comment_ids]
endpoint = 'media/{media_id!s}/comment/bulk_delete/'.format(**{
'media_id': media_id})
params = {
'comment_ids_to_delete': ','.join(
[str(comment_id) for comment_id in comment_ids])
}
params.update(self.authenticated_params)
res = self._call_api(endpoint, params=params)
return res
def media_likers(self, media_id, **kwargs):
"""
Get users who have liked a post
:param media_id:
:return:
"""
endpoint = 'media/{media_id!s}/likers/'.format(**{'media_id': media_id})
res = self._call_api(endpoint, query=kwargs)
if self.auto_patch:
[ClientCompatPatch.list_user(u, drop_incompat_keys=self.drop_incompat_keys)
for u in res.get('users', [])]
return res
def media_likers_chrono(self, media_id):
"""
EXPERIMENTAL ENDPOINT, INADVISABLE TO USE.
Get users who have liked a post in chronological order
:param media_id:
:return:
"""
warnings.warn('This endpoint is experimental. Do not use.', UserWarning)
res = self._call_api('media/{media_id!s}/likers_chrono/'.format(**{'media_id': media_id}))
if self.auto_patch:
[ClientCompatPatch.list_user(u, drop_incompat_keys=self.drop_incompat_keys)
for u in res.get('users', [])]
return res
def post_like(self, media_id, module_name='feed_timeline'):
"""
Like a post
:param media_id: Media id
:param module_name: Example: 'feed_timeline', 'video_view', 'photo_view'
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{media_id!s}/like/'.format(**{'media_id': media_id})
params = {
'media_id': media_id,
'module_name': module_name,
'radio_type': self.radio_type,
}
params.update(self.authenticated_params)
# d query param = flag for double tap
res = self._call_api(endpoint, params=params, query={'d': '1'})
return res
def delete_like(self, media_id, module_name='feed_timeline'):
"""
Unlike a post
:param media_id:
:param module_name: Example: 'feed_timeline', 'video_view', 'photo_view'
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{media_id!s}/unlike/'.format(**{'media_id': media_id})
params = {
'media_id': media_id,
'module_name': module_name,
'radio_type': self.radio_type,
}
params.update(self.authenticated_params)
res = self._call_api(endpoint, params=params)
return res
def media_seen(self, reels):
"""
Mark multiple stories as seen
:param reels: A list of reel media objects, or a dict of media_ids and timings
as defined below.
.. code-block:: javascript
{
"1309763051087626108_124317_124317": ["1470355944_1470372029"],
"1309764045355643149_124317_124317": ["1470356063_1470372039"],
"1309818450243415912_124317_124317": ["1470362548_1470372060"],
"1309764653429046112_124317_124317": ["1470356135_1470372049"],
"1309209597843679372_124317_124317": ["1470289967_1470372013"]
}
where
1309763051087626108_124317 = <media_id>,
124317 = <media.owner_id>
1470355944_1470372029 is <media_created_time>_<view_time>
:return:
"""
if isinstance(reels, list):
# is a list of reel media
reels_seen = {}
reels = sorted(reels, key=lambda m: m['taken_at'], reverse=True)
now = int(time.time())
for i, reel in enumerate(reels):
reel_seen_at = now - min(i + 1 + randint(0, 2), max(0, now - reel['taken_at']))
reels_seen['{0!s}_{1!s}'.format(reel['id'], reel['user']['pk'])] = [
'{0!s}_{1!s}'.format(reel['taken_at'], reel_seen_at)]
params = {'reels': reels_seen}
else:
params = {'reels': reels}
params.update(self.authenticated_params)
res = self._call_api('media/seen/', params=params, version='v2')
return res
def comment_like(self, comment_id):
"""
Like a comment
:param comment_id:
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{comment_id!s}/comment_like/'.format(**{'comment_id': comment_id})
params = self.authenticated_params
return self._call_api(endpoint, params=params)
def comment_likers(self, comment_id):
"""
Get users who have liked a comment
:param comment_id:
:return:
"""
endpoint = 'media/{comment_id!s}/comment_likers/'.format(**{'comment_id': comment_id})
res = self._call_api(endpoint)
if self.auto_patch:
[ClientCompatPatch.list_user(u, drop_incompat_keys=self.drop_incompat_keys)
for u in res.get('users', [])]
return res
def comment_unlike(self, comment_id):
"""
Unlike a comment
:param comment_id:
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{comment_id!s}/comment_unlike/'.format(**{'comment_id': comment_id})
params = self.authenticated_params
return self._call_api(endpoint, params=params)
def save_photo(self, media_id, added_collection_ids=None):
"""
Save a photo
:param media_id: Media id
:param added_collection_ids: optional list of collection IDs to add the media to
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{media_id!s}/save/'.format(**{'media_id': media_id})
params = {'radio_type': self.radio_type}
if added_collection_ids:
if isinstance(added_collection_ids, str):
added_collection_ids = [added_collection_ids]
params['added_collection_ids'] = json.dumps(added_collection_ids, separators=(',', ':'))
params.update(self.authenticated_params)
return self._call_api(endpoint, params=params)
def unsave_photo(self, media_id, removed_collection_ids=None):
"""
Unsave a photo
:param media_id:
:param removed_collection_ids: optional list of collection IDs to remove the media from
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{media_id!s}/unsave/'.format(**{'media_id': media_id})
params = {'radio_type': self.radio_type}
if removed_collection_ids:
if isinstance(removed_collection_ids, str):
removed_collection_ids = [removed_collection_ids]
params['removed_collection_ids'] = json.dumps(removed_collection_ids, separators=(',', ':'))
params.update(self.authenticated_params)
return self._call_api(endpoint, params=params)
def disable_comments(self, media_id):
"""
Disable comments for a media
:param media_id:
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{media_id!s}/disable_comments/'.format(**{'media_id': media_id})
params = {
'_csrftoken': self.csrftoken,
'_uuid': self.uuid,
}
res = self._call_api(endpoint, params=params, unsigned=True)
return res
def enable_comments(self, media_id):
"""
Enable comments for a media
:param media_id:
:return:
.. code-block:: javascript
{"status": "ok"}
"""
endpoint = 'media/{media_id!s}/enable_comments/'.format(**{'media_id': media_id})
params = {
'_csrftoken': self.csrftoken,
'_uuid': self.uuid,
}
res = self._call_api(endpoint, params=params, unsigned=True)
return res
|
|
"""Extract reference documentation from the NumPy source tree.
"""
# copied from numpydoc/docscrape.py
import inspect
import textwrap
import re
import pydoc
from warnings import warn
from collections import namedtuple
from collections.abc import Callable, Mapping
import copy
import sys
def strip_blank_lines(l): # noqa
"Remove leading and trailing blank lines from a list of lines"
while l and not l[0].strip():
del l[0]
while l and not l[-1].strip():
del l[-1]
return l
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]: # noqa
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class ParseError(Exception):
def __str__(self):
message = self.args[0]
if hasattr(self, 'docstring'):
message = "%s in %r" % (message, self.docstring)
return message
Parameter = namedtuple('Parameter', ['name', 'type', 'desc'])
class NumpyDocString(Mapping):
"""Parses a numpydoc string to an abstract representation
Instances define a mapping from section title to structured data.
"""
sections = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Yields': [],
'Receives': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
def __init__(self, docstring, config={}):
orig_docstring = docstring
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = copy.deepcopy(self.sections)
try:
self._parse()
except ParseError as e:
e.docstring = orig_docstring
raise
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
self._error_location("Unknown section %s" % key, error=False)
else:
self._parsed_data[key] = val
def __iter__(self):
return iter(self._parsed_data)
def __len__(self):
return len(self._parsed_data)
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content, single_element_is_type=False):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
if single_element_is_type:
arg_name, arg_type = '', header
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
desc = strip_blank_lines(desc)
params.append(Parameter(arg_name, arg_type, desc))
return params
# See also supports the following formats.
#
# <FUNCNAME>
# <FUNCNAME> SPACE* COLON SPACE+ <DESC> SPACE*
# <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)+ (COMMA | PERIOD)? SPACE*
# <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE* COLON SPACE+ <DESC> SPACE*
# <FUNCNAME> is one of
# <PLAIN_FUNCNAME>
# COLON <ROLE> COLON BACKTICK <PLAIN_FUNCNAME> BACKTICK
# where
# <PLAIN_FUNCNAME> is a legal function name, and
# <ROLE> is any nonempty sequence of word characters.
# Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
# <DESC> is a string describing the function.
_role = r":(?P<role>\w+):"
_funcbacktick = r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`"
_funcplain = r"(?P<name2>[a-zA-Z0-9_\.-]+)"
_funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
_funcnamenext = _funcname.replace('role', 'rolenext')
_funcnamenext = _funcnamenext.replace('name', 'namenext')
_description = r"(?P<description>\s*:(\s+(?P<desc>\S+.*))?)?\s*$"
_func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
_line_rgx = re.compile(
r"^\s*" +
r"(?P<allfuncs>" + # group for all function names
_funcname +
r"(?P<morefuncs>([,]\s+" + _funcnamenext + r")*)" +
r")" + # end of "allfuncs"
# Some function lists have a trailing comma (or period) '\s*'
r"(?P<trailing>[,\.])?" +
_description)
# Empty <DESC> elements are replaced with '..'
empty_description = '..'
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'."""
m = self._func_rgx.match(text)
if not m:
raise ParseError("%s is not a item name" % text)
role = m.group('role')
name = m.group('name') if role else m.group('name2')
return name, role, m.end()
rest = []
for line in content:
if not line.strip():
continue
line_match = self._line_rgx.match(line)
description = None
if line_match:
description = line_match.group('desc')
if line_match.group('trailing') and description:
self._error_location(
'Unexpected comma or period after function list at '
'index %d of line "%s"' % (line_match.end('trailing'),
line),
error=False)
if not description and line.startswith(' '):
rest.append(line.strip())
elif line_match:
funcs = []
text = line_match.group('allfuncs')
while True:
if not text.strip():
break
name, role, match_end = parse_item_name(text)
funcs.append((name, role))
text = text[match_end:].strip()
if text and text[0] == ',':
text = text[1:].strip()
rest = list(filter(None, [description]))
items.append((funcs, rest))
else:
raise ParseError("%s is not a item name" % line)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$')
if compiled.match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
sections = list(self._read_sections())
section_names = set([section for section, content in sections])
has_returns = 'Returns' in section_names
has_yields = 'Yields' in section_names
# We could do more tests, but we are not. Arbitrarily.
if has_returns and has_yields:
msg = 'Docstring contains both a Returns and Yields section.'
raise ValueError(msg)
if not has_yields and 'Receives' in section_names:
msg = 'Docstring contains a Receives section but not Yields.'
raise ValueError(msg)
for (section, content) in sections:
if not section.startswith('..'):
section = (s.capitalize() for s in section.split(' '))
section = ' '.join(section)
if self.get(section):
self._error_location("The section %s appears twice"
% section)
if section in ('Parameters', 'Other Parameters', 'Attributes',
'Methods'):
self[section] = self._parse_param_list(content)
elif section in ('Returns', 'Yields', 'Raises', 'Warns',
'Receives'):
self[section] = self._parse_param_list(
content, single_element_is_type=True)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
def _error_location(self, msg, error=True):
if hasattr(self, '_obj'):
# we know where the docs came from:
try:
filename = inspect.getsourcefile(self._obj)
except TypeError:
filename = None
msg = msg + (" in the docstring of %s in %s."
% (self._obj, filename))
if error:
raise ValueError(msg)
else:
warn(msg)
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', r'\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param in self[name]:
parts = []
if param.name:
parts.append(param.name)
if param.type:
parts.append(param.type)
out += [' : '.join(parts)]
if param.desc and ''.join(param.desc).strip():
out += self._str_indent(param.desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
out += ['']
last_had_desc = True
for funcs, desc in self['See Also']:
assert isinstance(funcs, list)
links = []
for func, role in funcs:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
links.append(link)
link = ', '.join(links)
out += [link]
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += self._str_indent([self.empty_description])
if last_had_desc:
out += ['']
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
output_index = False
default_index = idx.get('default', '')
if default_index:
output_index = True
out += ['.. index:: %s' % default_index]
for section, references in idx.items():
if section == 'default':
continue
output_index = True
out += [' :%s: %s' % (section, ', '.join(references))]
if output_index:
return out
else:
return ''
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',
'Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4): # noqa
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines) # noqa
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc, config)
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if 'sphinx' in sys.modules:
from sphinx.ext.autodoc import ALL # type: ignore[import]
else:
ALL = object()
self.show_inherited_members = config.get(
'show_inherited_class_members', True)
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
_members = config.get('members', [])
if _members is ALL:
_members = None
_exclude = config.get('exclude-members', [])
if config.get('show_class_members', True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [('Methods', self.methods),
('Attributes', self.properties)]:
if not self[field]:
doc_list = []
for name in sorted(items):
if (name in _exclude or
(_members and name not in _members)):
continue
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append(
Parameter(name, '', splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and isinstance(func, Callable)
and self._is_show_member(name))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if (not name.startswith('_') and
(func is None or isinstance(func, property) or
inspect.isdatadescriptor(func))
and self._is_show_member(name))]
def _is_show_member(self, name):
if self.show_inherited_members:
return True # show all class members
if name not in self._cls.__dict__:
return False # class member is inherited, we do not show it
return True
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import collections.abc
import datetime
import functools
import operator
import warnings
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Collection,
Dict,
FrozenSet,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import attr
import pendulum
from sqlalchemy import func, or_
from sqlalchemy.orm.session import Session
from airflow.compat.functools import cache
from airflow.exceptions import UnmappableOperator
from airflow.models.abstractoperator import (
DEFAULT_OWNER,
DEFAULT_POOL_SLOTS,
DEFAULT_PRIORITY_WEIGHT,
DEFAULT_QUEUE,
DEFAULT_RETRIES,
DEFAULT_RETRY_DELAY,
DEFAULT_TRIGGER_RULE,
DEFAULT_WEIGHT_RULE,
AbstractOperator,
TaskStateChangeCallback,
)
from airflow.models.pool import Pool
from airflow.serialization.enums import DagAttributeTypes
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.ti_deps.deps.mapped_task_expanded import MappedTaskIsExpanded
from airflow.typing_compat import Literal
from airflow.utils.context import Context
from airflow.utils.helpers import is_container
from airflow.utils.operator_resources import Resources
from airflow.utils.state import State, TaskInstanceState
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import NOTSET
if TYPE_CHECKING:
import jinja2 # Slow import.
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DAG
from airflow.models.taskinstance import TaskInstance
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
# BaseOperator.expand() can be called on an XComArg, sequence, or dict (not
# any mapping since we need the value to be ordered).
Mappable = Union[XComArg, Sequence, dict]
ValidationSource = Union[Literal["map"], Literal["partial"]]
# For isinstance() check.
@cache
def get_mappable_types() -> Tuple[type, ...]:
from airflow.models.xcom_arg import XComArg
return (XComArg, dict, list)
def validate_mapping_kwargs(op: Type["BaseOperator"], func: ValidationSource, value: Dict[str, Any]) -> None:
# use a dict so order of args is same as code order
unknown_args = value.copy()
for klass in op.mro():
init = klass.__init__ # type: ignore[misc]
try:
param_names = init._BaseOperatorMeta__param_names
except AttributeError:
continue
for name in param_names:
value = unknown_args.pop(name, NOTSET)
if func != "expand":
continue
if value is NOTSET:
continue
if isinstance(value, get_mappable_types()):
continue
type_name = type(value).__name__
error = f"{op.__name__}.expand() got an unexpected type {type_name!r} for keyword argument {name}"
raise ValueError(error)
if not unknown_args:
return # If we have no args left to check: stop looking at the MRO chain.
if len(unknown_args) == 1:
error = f"an unexpected keyword argument {unknown_args.popitem()[0]!r}"
else:
names = ", ".join(repr(n) for n in unknown_args)
error = f"unexpected keyword arguments {names}"
raise TypeError(f"{op.__name__}.{func}() got {error}")
def prevent_duplicates(kwargs1: Dict[str, Any], kwargs2: Dict[str, Any], *, fail_reason: str) -> None:
duplicated_keys = set(kwargs1).intersection(kwargs2)
if not duplicated_keys:
return
if len(duplicated_keys) == 1:
raise TypeError(f"{fail_reason} argument: {duplicated_keys.pop()}")
duplicated_keys_display = ", ".join(sorted(duplicated_keys))
raise TypeError(f"{fail_reason} arguments: {duplicated_keys_display}")
def ensure_xcomarg_return_value(arg: Any) -> None:
from airflow.models.xcom_arg import XCOM_RETURN_KEY, XComArg
if isinstance(arg, XComArg):
if arg.key != XCOM_RETURN_KEY:
raise ValueError(f"cannot map over XCom with custom key {arg.key!r} from {arg.operator}")
elif not is_container(arg):
return
elif isinstance(arg, collections.abc.Mapping):
for v in arg.values():
ensure_xcomarg_return_value(v)
elif isinstance(arg, collections.abc.Iterable):
for v in arg:
ensure_xcomarg_return_value(v)
@attr.define(kw_only=True, repr=False)
class OperatorPartial:
"""An "intermediate state" returned by ``BaseOperator.partial()``.
This only exists at DAG-parsing time; the only intended usage is for the
user to call ``.expand()`` on it at some point (usually in a method chain) to
create a ``MappedOperator`` to add into the DAG.
"""
operator_class: Type["BaseOperator"]
kwargs: Dict[str, Any]
_expand_called: bool = False # Set when expand() is called to ease user debugging.
def __attrs_post_init__(self):
from airflow.operators.subdag import SubDagOperator
if issubclass(self.operator_class, SubDagOperator):
raise TypeError("Mapping over deprecated SubDagOperator is not supported")
validate_mapping_kwargs(self.operator_class, "partial", self.kwargs)
def __repr__(self) -> str:
args = ", ".join(f"{k}={v!r}" for k, v in self.kwargs.items())
return f"{self.operator_class.__name__}.partial({args})"
def __del__(self):
if not self._expand_called:
warnings.warn(f"{self!r} was never mapped!")
def expand(self, **mapped_kwargs: "Mappable") -> "MappedOperator":
from airflow.operators.dummy import DummyOperator
validate_mapping_kwargs(self.operator_class, "expand", mapped_kwargs)
prevent_duplicates(self.kwargs, mapped_kwargs, fail_reason="mapping already partial")
ensure_xcomarg_return_value(mapped_kwargs)
partial_kwargs = self.kwargs.copy()
task_id = partial_kwargs.pop("task_id")
params = partial_kwargs.pop("params")
dag = partial_kwargs.pop("dag")
task_group = partial_kwargs.pop("task_group")
start_date = partial_kwargs.pop("start_date")
end_date = partial_kwargs.pop("end_date")
op = MappedOperator(
operator_class=self.operator_class,
mapped_kwargs=mapped_kwargs,
partial_kwargs=partial_kwargs,
task_id=task_id,
params=params,
deps=MappedOperator.deps_for(self.operator_class),
operator_extra_links=self.operator_class.operator_extra_links,
template_ext=self.operator_class.template_ext,
template_fields=self.operator_class.template_fields,
ui_color=self.operator_class.ui_color,
ui_fgcolor=self.operator_class.ui_fgcolor,
is_dummy=issubclass(self.operator_class, DummyOperator),
task_module=self.operator_class.__module__,
task_type=self.operator_class.__name__,
dag=dag,
task_group=task_group,
start_date=start_date,
end_date=end_date,
)
self._expand_called = True
return op
@attr.define(kw_only=True)
class MappedOperator(AbstractOperator):
"""Object representing a mapped operator in a DAG."""
operator_class: Union[Type["BaseOperator"], str]
mapped_kwargs: Dict[str, "Mappable"]
partial_kwargs: Dict[str, Any]
# Needed for serialization.
task_id: str
params: Optional[dict]
deps: FrozenSet[BaseTIDep]
operator_extra_links: Collection["BaseOperatorLink"]
template_ext: Collection[str]
template_fields: Collection[str]
ui_color: str
ui_fgcolor: str
_is_dummy: bool
_task_module: str
_task_type: str
dag: Optional["DAG"]
task_group: Optional["TaskGroup"]
start_date: Optional[pendulum.DateTime]
end_date: Optional[pendulum.DateTime]
upstream_task_ids: Set[str] = attr.ib(factory=set, init=False)
downstream_task_ids: Set[str] = attr.ib(factory=set, init=False)
is_mapped: ClassVar[bool] = True
subdag: None = None # Since we don't support SubDagOperator, this is always None.
def __repr__(self):
return f"<Mapped({self._task_type}): {self.task_id}>"
def __attrs_post_init__(self):
from airflow.models.xcom_arg import XComArg
self._validate_argument_count()
if self.task_group:
self.task_group.add(self)
if self.dag:
self.dag.add_task(self)
for k, v in self.mapped_kwargs.items():
if k in self.template_fields:
XComArg.apply_upstream_relationship(self, v)
for k, v in self.partial_kwargs.items():
if k in self.template_fields:
XComArg.apply_upstream_relationship(self, v)
@classmethod
@cache
def get_serialized_fields(cls):
# Not using 'cls' here since we only want to serialize base fields.
return frozenset(attr.fields_dict(MappedOperator)) - {
"dag",
"deps",
"is_mapped",
"subdag",
"task_group",
"upstream_task_ids",
}
@staticmethod
@cache
def deps_for(operator_class: Type["BaseOperator"]) -> FrozenSet[BaseTIDep]:
operator_deps = operator_class.deps
if not isinstance(operator_deps, collections.abc.Set):
raise UnmappableOperator(
f"'deps' must be a set defined as a class-level variable on {operator_class.__name__}, "
f"not a {type(operator_deps).__name__}"
)
return operator_deps | {MappedTaskIsExpanded()}
def _validate_argument_count(self) -> None:
"""Validate mapping arguments by unmapping with mocked values.
This ensures the user passed enough arguments in the DAG definition for
the operator to work in the task runner. This does not guarantee the
arguments are *valid* (that depends on the actual mapping values), but
makes sure there are *enough* of them.
"""
if isinstance(self.operator_class, str):
return # No need to validate deserialized operator.
self.operator_class.validate_mapped_arguments(**self._get_unmap_kwargs())
@property
def task_type(self) -> str:
"""Implementing Operator."""
return self._task_type
@property
def inherits_from_dummy_operator(self) -> bool:
"""Implementing Operator."""
return self._is_dummy
@property
def roots(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def leaves(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def owner(self) -> str: # type: ignore[override]
return self.partial_kwargs.get("owner", DEFAULT_OWNER)
@property
def email(self) -> Union[None, str, Iterable[str]]:
return self.partial_kwargs.get("email")
@property
def trigger_rule(self) -> TriggerRule:
return self.partial_kwargs.get("trigger_rule", DEFAULT_TRIGGER_RULE)
@property
def depends_on_past(self) -> bool:
return bool(self.partial_kwargs.get("depends_on_past"))
@property
def wait_for_downstream(self) -> bool:
return bool(self.partial_kwargs.get("wait_for_downstream"))
@property
def retries(self) -> Optional[int]:
return self.partial_kwargs.get("retries", DEFAULT_RETRIES)
@property
def queue(self) -> str:
return self.partial_kwargs.get("queue", DEFAULT_QUEUE)
@property
def pool(self) -> str:
return self.partial_kwargs.get("pool", Pool.DEFAULT_POOL_NAME)
@property
def pool_slots(self) -> Optional[str]:
return self.partial_kwargs.get("pool_slots", DEFAULT_POOL_SLOTS)
@property
def execution_timeout(self) -> Optional[datetime.timedelta]:
return self.partial_kwargs.get("execution_timeout")
@property
def retry_delay(self) -> datetime.timedelta:
return self.partial_kwargs.get("retry_delay", DEFAULT_RETRY_DELAY)
@property
def retry_exponential_backoff(self) -> bool:
return bool(self.partial_kwargs.get("retry_exponential_backoff"))
@property
def priority_weight(self) -> int: # type: ignore[override]
return self.partial_kwargs.get("priority_weight", DEFAULT_PRIORITY_WEIGHT)
@property
def weight_rule(self) -> int: # type: ignore[override]
return self.partial_kwargs.get("weight_rule", DEFAULT_WEIGHT_RULE)
@property
def sla(self) -> Optional[datetime.timedelta]:
return self.partial_kwargs.get("sla")
@property
def max_active_tis_per_dag(self) -> Optional[int]:
return self.partial_kwargs.get("max_active_tis_per_dag")
@property
def resources(self) -> Optional[Resources]:
return self.partial_kwargs.get("resources")
@property
def on_execute_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_execute_callback")
@property
def on_failure_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_failure_callback")
@property
def on_retry_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_retry_callback")
@property
def on_success_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_success_callback")
@property
def run_as_user(self) -> Optional[str]:
return self.partial_kwargs.get("run_as_user")
@property
def executor_config(self) -> dict:
return self.partial_kwargs.get("executor_config", {})
@property
def inlets(self) -> Optional[Any]:
return self.partial_kwargs.get("inlets", None)
@property
def outlets(self) -> Optional[Any]:
return self.partial_kwargs.get("outlets", None)
def get_dag(self) -> Optional["DAG"]:
"""Implementing Operator."""
return self.dag
def serialize_for_task_group(self) -> Tuple[DagAttributeTypes, Any]:
"""Implementing DAGNode."""
return DagAttributeTypes.OP, self.task_id
def _get_unmap_kwargs(self) -> Dict[str, Any]:
return {
"task_id": self.task_id,
"dag": self.dag,
"task_group": self.task_group,
"params": self.params,
"start_date": self.start_date,
"end_date": self.end_date,
**self.partial_kwargs,
**self.mapped_kwargs,
}
def unmap(self) -> "BaseOperator":
"""Get the "normal" Operator after applying the current mapping."""
dag = self.dag
if not dag:
raise RuntimeError("Cannot unmap a task without a DAG")
if isinstance(self.operator_class, str):
raise RuntimeError("Cannot unmap a deserialized operator")
dag._remove_task(self.task_id)
return self.operator_class(**self._get_unmap_kwargs())
def _get_expansion_kwargs(self) -> Dict[str, "Mappable"]:
"""The kwargs to calculate expansion length against.
This is ``self.mapped_kwargs`` for classic operators because kwargs to
``BaseOperator.expand()`` contribute to operator arguments.
"""
return self.mapped_kwargs
def _get_map_lengths(self, run_id: str, *, session: Session) -> Dict[str, int]:
# TODO: Find a way to cache this.
from airflow.models.taskmap import TaskMap
from airflow.models.xcom_arg import XComArg
expansion_kwargs = self._get_expansion_kwargs()
# Populate literal mapped arguments first.
map_lengths: Dict[str, int] = collections.defaultdict(int)
map_lengths.update((k, len(v)) for k, v in expansion_kwargs.items() if not isinstance(v, XComArg))
# Build a reverse mapping of what arguments each task contributes to.
dep_keys: Dict[str, Set[str]] = collections.defaultdict(set)
for k, v in expansion_kwargs.items():
if not isinstance(v, XComArg):
continue
dep_keys[v.operator.task_id].add(k)
taskmap_query = session.query(TaskMap.task_id, TaskMap.length).filter(
TaskMap.dag_id == self.dag_id,
TaskMap.run_id == run_id,
TaskMap.task_id.in_(list(dep_keys)),
)
for task_id, length in taskmap_query:
for mapped_arg_name in dep_keys[task_id]:
map_lengths[mapped_arg_name] += length
if len(map_lengths) < len(expansion_kwargs):
keys = ", ".join(repr(k) for k in sorted(set(expansion_kwargs).difference(map_lengths)))
raise RuntimeError(f"Failed to populate all mapping metadata; missing: {keys}")
return map_lengths
def expand_mapped_task(self, run_id: str, *, session: Session) -> Sequence["TaskInstance"]:
"""Create the mapped task instances for mapped task.
:return: The mapped task instances, in ascending order by map index.
"""
from airflow.models.taskinstance import TaskInstance
from airflow.settings import task_instance_mutation_hook
total_length = functools.reduce(operator.mul, self._get_map_lengths(run_id, session=session).values())
state: Optional[TaskInstanceState] = None
unmapped_ti: Optional[TaskInstance] = (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index == -1,
or_(TaskInstance.state.in_(State.unfinished), TaskInstance.state.is_(None)),
)
.one_or_none()
)
ret: List[TaskInstance] = []
if unmapped_ti:
# The unmapped task instance still exists and is unfinished, i.e. we
# haven't tried to run it before.
if total_length < 1:
# If the upstream maps this to a zero-length value, simply marked the
# unmapped task instance as SKIPPED (if needed).
self.log.info(
"Marking %s as SKIPPED since the map has %d values to expand",
unmapped_ti,
total_length,
)
unmapped_ti.state = TaskInstanceState.SKIPPED
session.flush()
return ret
# Otherwise convert this into the first mapped index, and create
# TaskInstance for other indexes.
unmapped_ti.map_index = 0
state = unmapped_ti.state
self.log.debug("Updated in place to become %s", unmapped_ti)
ret.append(unmapped_ti)
indexes_to_map = range(1, total_length)
else:
# Only create "missing" ones.
current_max_mapping = (
session.query(func.max(TaskInstance.map_index))
.filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
)
.scalar()
)
indexes_to_map = range(current_max_mapping + 1, total_length)
for index in indexes_to_map:
# TODO: Make more efficient with bulk_insert_mappings/bulk_save_mappings.
# TODO: Change `TaskInstance` ctor to take Operator, not BaseOperator
ti = TaskInstance(self, run_id=run_id, map_index=index, state=state) # type: ignore
self.log.debug("Expanding TIs upserted %s", ti)
task_instance_mutation_hook(ti)
ti = session.merge(ti)
ti.task = self
ret.append(ti)
# Set to "REMOVED" any (old) TaskInstances with map indices greater
# than the current map value
session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index >= total_length,
).update({TaskInstance.state: TaskInstanceState.REMOVED})
session.flush()
return ret
def prepare_for_execution(self) -> "MappedOperator":
# Since a mapped operator cannot be used for execution, and an unmapped
# BaseOperator needs to be created later (see render_template_fields),
# we don't need to create a copy of the MappedOperator here.
return self
def render_template_fields(
self,
context: Context,
jinja_env: Optional["jinja2.Environment"] = None,
) -> Optional["BaseOperator"]:
"""Template all attributes listed in template_fields.
Different from the BaseOperator implementation, this renders the
template fields on the *unmapped* BaseOperator.
:param context: Dict with values to apply on content
:param jinja_env: Jinja environment
:return: The unmapped, populated BaseOperator
"""
if not jinja_env:
jinja_env = self.get_template_env()
unmapped_task = self.unmap()
self._do_render_template_fields(
parent=unmapped_task,
template_fields=unmapped_task.template_fields,
context=context,
jinja_env=jinja_env,
seen_oids=set(),
)
return unmapped_task
def _render_template_field(
self,
key: str,
value: Any,
context: Context,
jinja_env: Optional["jinja2.Environment"] = None,
seen_oids: Optional[Set] = None,
*,
session: Session,
) -> Any:
"""Override the ordinary template rendering to add more logic.
Specifically, if we're rendering a mapped argument, we need to "unmap"
the value as well to assign it to the unmapped operator.
"""
value = super()._render_template_field(key, value, context, jinja_env, seen_oids, session=session)
return self._expand_mapped_field(key, value, context, session=session)
def _expand_mapped_field(self, key: str, value: Any, context: Context, *, session: Session) -> Any:
map_index = context["ti"].map_index
if map_index < 0:
return value
expansion_kwargs = self._get_expansion_kwargs()
all_lengths = self._get_map_lengths(context["run_id"], session=session)
def _find_index_for_this_field(index: int) -> int:
# Need to use self.mapped_kwargs for the original argument order.
for mapped_key in reversed(list(expansion_kwargs)):
mapped_length = all_lengths[mapped_key]
if mapped_length < 1:
raise RuntimeError(f"cannot expand field mapped to length {mapped_length!r}")
if mapped_key == key:
return index % mapped_length
index //= mapped_length
return -1
found_index = _find_index_for_this_field(map_index)
if found_index < 0:
return value
if isinstance(value, collections.abc.Sequence):
return value[found_index]
if not isinstance(value, dict):
raise TypeError(f"can't map over value of type {type(value)}")
for i, (k, v) in enumerate(value.items()):
if i == found_index:
return k, v
raise IndexError(f"index {map_index} is over mapped length")
|
|
"""SMA Solar Webconnect interface."""
from __future__ import annotations
import logging
from typing import Any
import pysma
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PATH,
CONF_SENSORS,
CONF_SSL,
CONF_VERIFY_SSL,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
CONF_CUSTOM,
CONF_FACTOR,
CONF_GROUP,
CONF_KEY,
CONF_UNIT,
DOMAIN,
GROUPS,
PYSMA_COORDINATOR,
PYSMA_DEVICE_INFO,
PYSMA_SENSORS,
)
_LOGGER = logging.getLogger(__name__)
def _check_sensor_schema(conf: dict[str, Any]) -> dict[str, Any]:
"""Check sensors and attributes are valid."""
try:
valid = [s.name for s in pysma.sensor.Sensors()]
valid += pysma.const.LEGACY_MAP.keys()
except (ImportError, AttributeError):
return conf
customs = list(conf[CONF_CUSTOM])
for sensor in conf[CONF_SENSORS]:
if sensor in customs:
_LOGGER.warning(
"All custom sensors will be added automatically, no need to include them in sensors: %s",
sensor,
)
elif sensor not in valid:
raise vol.Invalid(f"{sensor} does not exist")
return conf
CUSTOM_SCHEMA = vol.Any(
{
vol.Required(CONF_KEY): vol.All(cv.string, vol.Length(min=13, max=15)),
vol.Required(CONF_UNIT): cv.string,
vol.Optional(CONF_FACTOR, default=1): vol.Coerce(float),
vol.Optional(CONF_PATH): vol.All(cv.ensure_list, [cv.string]),
}
)
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_GROUP, default=GROUPS[0]): vol.In(GROUPS),
vol.Optional(CONF_SENSORS, default=[]): vol.Any(
cv.schema_with_slug_keys(cv.ensure_list), # will be deprecated
vol.All(cv.ensure_list, [str]),
),
vol.Optional(CONF_CUSTOM, default={}): cv.schema_with_slug_keys(
CUSTOM_SCHEMA
),
},
extra=vol.PREVENT_EXTRA,
),
_check_sensor_schema,
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: AddEntitiesCallback,
discovery_info=None,
) -> None:
"""Import the platform into a config entry."""
_LOGGER.warning(
"Loading SMA via platform setup is deprecated. "
"Please remove it from your configuration"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA sensors."""
sma_data = hass.data[DOMAIN][config_entry.entry_id]
coordinator = sma_data[PYSMA_COORDINATOR]
used_sensors = sma_data[PYSMA_SENSORS]
device_info = sma_data[PYSMA_DEVICE_INFO]
entities = []
for sensor in used_sensors:
entities.append(
SMAsensor(
coordinator,
config_entry.unique_id,
device_info,
sensor,
)
)
async_add_entities(entities)
class SMAsensor(CoordinatorEntity, SensorEntity):
"""Representation of a SMA sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
config_entry_unique_id: str,
device_info: dict[str, Any],
pysma_sensor: pysma.sensor.Sensor,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self._sensor = pysma_sensor
self._enabled_default = self._sensor.enabled
self._config_entry_unique_id = config_entry_unique_id
self._device_info = device_info
if self.unit_of_measurement == ENERGY_KILO_WATT_HOUR:
self._attr_state_class = STATE_CLASS_TOTAL_INCREASING
self._attr_device_class = DEVICE_CLASS_ENERGY
if self.unit_of_measurement == POWER_WATT:
self._attr_state_class = STATE_CLASS_MEASUREMENT
self._attr_device_class = DEVICE_CLASS_POWER
# Set sensor enabled to False.
# Will be enabled by async_added_to_hass if actually used.
self._sensor.enabled = False
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._sensor.name
@property
def native_value(self) -> StateType:
"""Return the state of the sensor."""
return self._sensor.value
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit the value is expressed in."""
return self._sensor.unit
@property
def unique_id(self) -> str:
"""Return a unique identifier for this sensor."""
return (
f"{self._config_entry_unique_id}-{self._sensor.key}_{self._sensor.key_idx}"
)
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
if not self._device_info:
return None
return {
"identifiers": {(DOMAIN, self._config_entry_unique_id)},
"name": self._device_info["name"],
"manufacturer": self._device_info["manufacturer"],
"model": self._device_info["type"],
"sw_version": self._device_info["sw_version"],
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
self._sensor.enabled = True
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
await super().async_will_remove_from_hass()
self._sensor.enabled = False
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import logging as std_logging
import os
from oslo_config import cfg
from oslo_log import log as logging
from tempest.test_discover import plugins
# TODO(marun) Replace use of oslo_config's global ConfigOpts
# (cfg.CONF) instance with a local instance (cfg.ConfigOpts()) once
# the cli tests move to the clients. The cli tests rely on oslo
# incubator modules that use the global cfg.CONF.
_CONF = cfg.CONF
def register_opt_group(conf, opt_group, options):
if opt_group:
conf.register_group(opt_group)
for opt in options:
conf.register_opt(opt, group=getattr(opt_group, 'name', None))
auth_group = cfg.OptGroup(name='auth',
title="Options for authentication and credentials")
AuthGroup = [
cfg.StrOpt('test_accounts_file',
help="Path to the yaml file that contains the list of "
"credentials to use for running tests. If used when "
"running in parallel you have to make sure sufficient "
"credentials are provided in the accounts file. For "
"example if no tests with roles are being run it requires "
"at least `2 * CONC` distinct accounts configured in "
" the `test_accounts_file`, with CONC == the "
"number of concurrent test processes."),
cfg.BoolOpt('allow_tenant_isolation',
default=True,
help="Allows test cases to create/destroy tenants and "
"users. This option requires that OpenStack Identity "
"API admin credentials are known. If false, isolated "
"test cases and parallel execution, can still be "
"achieved configuring a list of test accounts",
deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
group='compute'),
cfg.DeprecatedOpt('allow_tenant_isolation',
group='orchestration')]),
cfg.ListOpt('tempest_roles',
help="Roles to assign to all users created by tempest",
default=[]),
cfg.StrOpt('default_credentials_domain_name',
default='Default',
help="Default domain used when getting v3 credentials. "
"This is the name keystone uses for v2 compatibility.",
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_isolation_domain_name',
group='auth')]),
cfg.BoolOpt('create_isolated_networks',
default=True,
help="If allow_tenant_isolation is set to True and Neutron is "
"enabled Tempest will try to create a usable network, "
"subnet, and router when needed for each tenant it "
"creates. However in some neutron configurations, like "
"with VLAN provider networks, this doesn't work. So if "
"set to False the isolated networks will not be created"),
]
identity_group = cfg.OptGroup(name='identity',
title="Keystone Configuration Options")
IdentityGroup = [
cfg.StrOpt('catalog_type',
default='identity',
help="Catalog type of the Identity service."),
cfg.BoolOpt('disable_ssl_certificate_validation',
default=False,
help="Set to True if using self-signed SSL certificates."),
cfg.StrOpt('ca_certificates_file',
default=None,
help='Specify a CA bundle file to use in verifying a '
'TLS (https) server certificate.'),
cfg.StrOpt('uri',
help="Full URI of the OpenStack Identity API (Keystone), v2"),
cfg.StrOpt('uri_v3',
help='Full URI of the OpenStack Identity API (Keystone), v3'),
cfg.StrOpt('auth_version',
default='v2',
help="Identity API version to be used for authentication "
"for API tests."),
cfg.StrOpt('region',
default='RegionOne',
help="The identity region name to use. Also used as the other "
"services' region name unless they are set explicitly. "
"If no such region is found in the service catalog, the "
"first found one is used."),
cfg.StrOpt('v2_admin_endpoint_type',
default='adminURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The admin endpoint type to use for OpenStack Identity "
"(Keystone) API v2"),
cfg.StrOpt('v2_public_endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The public endpoint type to use for OpenStack Identity "
"(Keystone) API v2",
deprecated_opts=[cfg.DeprecatedOpt('endpoint_type',
group='identity')]),
cfg.StrOpt('v3_endpoint_type',
default='adminURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for OpenStack Identity "
"(Keystone) API v3"),
cfg.StrOpt('username',
help="Username to use for Nova API requests."),
cfg.StrOpt('tenant_name',
help="Tenant name to use for Nova API requests."),
cfg.StrOpt('admin_role',
default='admin',
help="Role required to administrate keystone."),
cfg.StrOpt('password',
help="API key to use when authenticating.",
secret=True),
cfg.StrOpt('domain_name',
help="Domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('alt_username',
help="Username of alternate user to use for Nova API "
"requests."),
cfg.StrOpt('alt_tenant_name',
help="Alternate user's Tenant name to use for Nova API "
"requests."),
cfg.StrOpt('alt_password',
help="API key to use when authenticating as alternate user.",
secret=True),
cfg.StrOpt('alt_domain_name',
help="Alternate domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('admin_username',
help="Administrative Username to use for "
"Keystone API requests."),
cfg.StrOpt('admin_tenant_name',
help="Administrative Tenant name to use for Keystone API "
"requests."),
cfg.StrOpt('admin_password',
help="API key to use when authenticating as admin.",
secret=True),
cfg.StrOpt('admin_domain_name',
help="Admin domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('default_domain_id',
default='default',
help="ID of the default domain"),
]
identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
title='Enabled Identity Features')
IdentityFeatureGroup = [
cfg.BoolOpt('trust',
default=True,
help='Does the identity service have delegation and '
'impersonation enabled'),
cfg.BoolOpt('api_v2',
default=True,
help='Is the v2 identity API enabled'),
cfg.BoolOpt('api_v3',
default=True,
help='Is the v3 identity API enabled'),
]
compute_group = cfg.OptGroup(name='compute',
title='Compute Service Options')
ComputeGroup = [
cfg.StrOpt('image_ref',
help="Valid primary image reference to be used in tests. "
"This is a required option"),
cfg.StrOpt('image_ref_alt',
help="Valid secondary image reference to be used in tests. "
"This is a required option, but if only one image is "
"available duplicate the value of image_ref above"),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
cfg.StrOpt('flavor_ref_alt',
default="2",
help='Valid secondary flavor to be used in tests.'),
cfg.StrOpt('image_ssh_user',
default="root",
help="User name used to authenticate to an instance."),
cfg.StrOpt('image_ssh_password',
default="password",
help="Password used to authenticate to an instance."),
cfg.StrOpt('image_alt_ssh_user',
default="root",
help="User name used to authenticate to an instance using "
"the alternate image."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for an instance to build. "
"Other services that do not define build_timeout will "
"inherit this value."),
cfg.StrOpt('ssh_shell_prologue',
default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
help="Shell fragments to use before executing a command "
"when sshing to a guest."),
cfg.StrOpt('ssh_auth_method',
default='keypair',
help="Auth method used for authenticate to the instance. "
"Valid choices are: keypair, configured, adminpass "
"and disabled. "
"Keypair: start the servers with a ssh keypair. "
"Configured: use the configured user and password. "
"Adminpass: use the injected adminPass. "
"Disabled: avoid using ssh when it is an option."),
cfg.StrOpt('ssh_connect_method',
default='floating',
help="How to connect to the instance? "
"fixed: using the first ip belongs the fixed network "
"floating: creating and using a floating ip."),
cfg.StrOpt('ssh_user',
default='root',
help="User name used to authenticate to an instance."),
cfg.IntOpt('ping_timeout',
default=120,
help="Timeout in seconds to wait for ping to "
"succeed."),
cfg.IntOpt('ping_size',
default=56,
help="The packet size for ping packets originating "
"from remote linux hosts"),
cfg.IntOpt('ping_count',
default=1,
help="The number of ping packets originating from remote "
"linux hosts"),
cfg.IntOpt('ready_wait',
default=0,
help="Additional wait time for clean state, when there is "
"no OS-EXT-STS extension available"),
cfg.StrOpt('fixed_network_name',
help="Name of the fixed network that is visible to all test "
"tenants. If multiple networks are available for a tenant"
" this is the network which will be used for creating "
"servers if tempest does not create a network or a "
"network is not specified elsewhere. It may be used for "
"ssh validation only if floating IPs are disabled."),
cfg.StrOpt('network_for_ssh',
default='public',
help="Network used for SSH connections. Ignored if "
"use_floatingip_for_ssh=true or run_validation=false."),
cfg.BoolOpt('use_floatingip_for_ssh',
default=True,
help="Does SSH use Floating IPs?"),
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
cfg.StrOpt('region',
default='',
help="The compute region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the compute service."),
cfg.StrOpt('volume_device_name',
default='vdb',
help="Expected device name when a volume is attached to "
"an instance"),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'when shelved. This time should be the same as the time '
'of nova.conf, and some tests will run for as long as the '
'time.'),
cfg.StrOpt('floating_ip_range',
default='10.0.0.0/29',
help='Unallocated floating IP range, which will be used to '
'test the floating IP bulk feature for CRUD operation. '
'This block must not overlap an existing floating IP '
'pool.')
]
compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
title="Enabled Compute Service Features")
ComputeFeaturesGroup = [
cfg.BoolOpt('disk_config',
default=True,
help="If false, skip disk config tests"),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled compute extensions with a special '
'entry all which indicates every extension is enabled. '
'Each extension should be specified with alias name. '
'Empty list indicates all extensions are disabled'),
cfg.BoolOpt('change_password',
default=False,
help="Does the test environment support changing the admin "
"password?"),
cfg.BoolOpt('console_output',
default=True,
help="Does the test environment support obtaining instance "
"serial console output?"),
cfg.BoolOpt('resize',
default=False,
help="Does the test environment support resizing?"),
cfg.BoolOpt('pause',
default=True,
help="Does the test environment support pausing?"),
cfg.BoolOpt('shelve',
default=True,
help="Does the test environment support shelving/unshelving?"),
cfg.BoolOpt('suspend',
default=True,
help="Does the test environment support suspend/resume?"),
cfg.BoolOpt('live_migration',
default=True,
help="Does the test environment support live migration "
"available?"),
cfg.BoolOpt('metadata_service',
default=True,
help="Does the test environment support metadata service? "
"Ignored unless validation.run_validation=true."),
cfg.BoolOpt('block_migration_for_live_migration',
default=False,
help="Does the test environment use block devices for live "
"migration"),
cfg.BoolOpt('block_migrate_cinder_iscsi',
default=False,
help="Does the test environment block migration support "
"cinder iSCSI volumes. Note, libvirt doesn't support this, "
"see https://bugs.launchpad.net/nova/+bug/1398999"),
# TODO(gilliard): Remove live_migrate_paused_instances at juno-eol.
cfg.BoolOpt('live_migrate_paused_instances',
default=False,
help="Does the test system allow live-migration of paused "
"instances? Note, this is more than just the ANDing of "
"paused and live_migrate, but all 3 should be set to True "
"to run those tests"),
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
'be same as [nova.vnc]->vnc_enabled in nova.conf'),
cfg.BoolOpt('spice_console',
default=False,
help='Enable Spice console. This configuration value should '
'be same as [nova.spice]->enabled in nova.conf'),
cfg.BoolOpt('rdp_console',
default=False,
help='Enable RDP console. This configuration value should '
'be same as [nova.rdp]->enabled in nova.conf'),
cfg.BoolOpt('rescue',
default=True,
help='Does the test environment support instance rescue '
'mode?'),
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the '
'relevant server API calls such as create, rebuild '
'or rescue.'),
cfg.BoolOpt('interface_attach',
default=True,
help='Does the test environment support dynamic network '
'interface attachment?'),
cfg.BoolOpt('snapshot',
default=True,
help='Does the test environment support creating snapshot '
'images of running instances?'),
cfg.BoolOpt('ec2_api',
default=True,
help='Does the test environment have the ec2 api running?'),
# TODO(mriedem): Remove preserve_ports once juno-eol happens.
cfg.BoolOpt('preserve_ports',
default=False,
help='Does Nova preserve preexisting ports from Neutron '
'when deleting an instance? This should be set to True '
'if testing Kilo+ Nova.'),
cfg.BoolOpt('attach_encrypted_volume',
default=True,
help='Does the test environment support attaching an '
'encrypted volume to a running server instance? This may '
'depend on the combination of compute_driver in nova and '
'the volume_driver(s) in cinder.'),
# TODO(mriedem): Remove allow_duplicate_networks once kilo-eol happens
# since the option was removed from nova in Liberty and is the default
# behavior starting in Liberty.
cfg.BoolOpt('allow_duplicate_networks',
default=False,
help='Does the test environment support creating instances '
'with multiple ports on the same network? This is only '
'valid when using Neutron.'),
]
image_group = cfg.OptGroup(name='image',
title="Image Service Options")
ImageGroup = [
cfg.StrOpt('catalog_type',
default='image',
help='Catalog type of the Image service.'),
cfg.StrOpt('region',
default='',
help="The image region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the image service."),
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessible image'),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for an image to "
"become available."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between image operation status "
"checks.")
]
image_feature_group = cfg.OptGroup(name='image-feature-enabled',
title='Enabled image service features')
ImageFeaturesGroup = [
cfg.BoolOpt('api_v2',
default=True,
help="Is the v2 image API enabled"),
cfg.BoolOpt('api_v1',
default=True,
help="Is the v1 image API enabled"),
cfg.BoolOpt('deactivate_image',
default=False,
help="Is the deactivate-image feature enabled."
" The feature has been integrated since Kilo."),
]
network_group = cfg.OptGroup(name='network',
title='Network Service Options')
NetworkGroup = [
cfg.StrOpt('catalog_type',
default='network',
help='Catalog type of the Neutron service.'),
cfg.StrOpt('region',
default='',
help="The network region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the network service."),
cfg.StrOpt('tenant_network_cidr',
default="10.100.0.0/16",
help="The cidr block to allocate tenant ipv4 subnets from"),
cfg.IntOpt('tenant_network_mask_bits',
default=28,
help="The mask bits for tenant ipv4 subnets"),
cfg.StrOpt('tenant_network_v6_cidr',
default="2003::/48",
help="The cidr block to allocate tenant ipv6 subnets from"),
cfg.IntOpt('tenant_network_v6_mask_bits',
default=64,
help="The mask bits for tenant ipv6 subnets"),
cfg.BoolOpt('tenant_networks_reachable',
default=False,
help="Whether tenant networks can be reached directly from "
"the test client. This must be set to True when the "
"'fixed' ssh_connect_method is selected."),
cfg.StrOpt('public_network_id',
default="",
help="Id of the public network that provides external "
"connectivity"),
cfg.StrOpt('floating_network_name',
help="Default floating network name. Used to allocate floating "
"IPs when neutron is enabled."),
cfg.StrOpt('public_router_id',
default="",
help="Id of the public router that provides external "
"connectivity. This should only be used when Neutron's "
"'allow_overlapping_ips' is set to 'False' in "
"neutron.conf. usually not needed past 'Grizzly' release"),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for network operation to "
"complete."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between network operation status "
"checks."),
cfg.ListOpt('dns_servers',
default=["8.8.8.8", "8.8.4.4"],
help="List of dns servers which should be used"
" for subnet creation"),
cfg.StrOpt('port_vnic_type',
choices=[None, 'normal', 'direct', 'macvtap'],
help="vnic_type to use when Launching instances"
" with pre-configured ports."
" Supported ports are:"
" ['normal','direct','macvtap']"),
]
network_feature_group = cfg.OptGroup(name='network-feature-enabled',
title='Enabled network service features')
NetworkFeaturesGroup = [
cfg.BoolOpt('ipv6',
default=True,
help="Allow the execution of IPv6 tests"),
cfg.ListOpt('api_extensions',
default=['all'],
help="A list of enabled network extensions with a special "
"entry all which indicates every extension is enabled. "
"Empty list indicates all extensions are disabled. "
"To get the list of extensions run: 'neutron ext-list'"),
cfg.BoolOpt('ipv6_subnet_attributes',
default=False,
help="Allow the execution of IPv6 subnet tests that use "
"the extended IPv6 attributes ipv6_ra_mode "
"and ipv6_address_mode"
),
cfg.BoolOpt('port_admin_state_change',
default=True,
help="Does the test environment support changing"
" port admin state"),
]
messaging_group = cfg.OptGroup(name='messaging',
title='Messaging Service')
MessagingGroup = [
cfg.StrOpt('catalog_type',
default='messaging',
help='Catalog type of the Messaging service.'),
cfg.IntOpt('max_queues_per_page',
default=20,
help='The maximum number of queue records per page when '
'listing queues'),
cfg.IntOpt('max_queue_metadata',
default=65536,
help='The maximum metadata size for a queue'),
cfg.IntOpt('max_messages_per_page',
default=20,
help='The maximum number of queue message per page when '
'listing (or) posting messages'),
cfg.IntOpt('max_message_size',
default=262144,
help='The maximum size of a message body'),
cfg.IntOpt('max_messages_per_claim',
default=20,
help='The maximum number of messages per claim'),
cfg.IntOpt('max_message_ttl',
default=1209600,
help='The maximum ttl for a message'),
cfg.IntOpt('max_claim_ttl',
default=43200,
help='The maximum ttl for a claim'),
cfg.IntOpt('max_claim_grace',
default=43200,
help='The maximum grace period for a claim'),
]
validation_group = cfg.OptGroup(name='validation',
title='SSH Validation options')
ValidationGroup = [
cfg.BoolOpt('run_validation',
default=False,
help='Enable ssh on created servers and creation of additional'
' validation resources to enable remote access',
deprecated_opts=[cfg.DeprecatedOpt('run_ssh',
group='compute')]),
cfg.BoolOpt('security_group',
default=True,
help='Enable/disable security groups.'),
cfg.BoolOpt('security_group_rules',
default=True,
help='Enable/disable security group rules.'),
cfg.StrOpt('connect_method',
default='floating',
choices=['fixed', 'floating'],
help='Default IP type used for validation: '
'-fixed: uses the first IP belonging to the fixed network '
'-floating: creates and uses a floating IP'),
cfg.StrOpt('auth_method',
default='keypair',
choices=['keypair'],
help='Default authentication method to the instance. '
'Only ssh via keypair is supported for now. '
'Additional methods will be handled in a separate spec.'),
cfg.IntOpt('ip_version_for_ssh',
default=4,
help='Default IP version for ssh connections.',
deprecated_opts=[cfg.DeprecatedOpt('ip_version_for_ssh',
group='compute')]),
cfg.IntOpt('ping_timeout',
default=120,
help='Timeout in seconds to wait for ping to succeed.'),
cfg.IntOpt('connect_timeout',
default=60,
help='Timeout in seconds to wait for the TCP connection to be '
'successful.',
deprecated_opts=[cfg.DeprecatedOpt('ssh_channel_timeout',
group='compute')]),
cfg.IntOpt('ssh_timeout',
default=300,
help='Timeout in seconds to wait for the ssh banner.',
deprecated_opts=[cfg.DeprecatedOpt('ssh_timeout',
group='compute')]),
]
volume_group = cfg.OptGroup(name='volume',
title='Block Storage Options')
VolumeGroup = [
cfg.IntOpt('build_interval',
default=1,
help='Time in seconds between volume availability checks.'),
cfg.IntOpt('build_timeout',
default=300,
help='Timeout in seconds to wait for a volume to become '
'available.'),
cfg.StrOpt('catalog_type',
default='volume',
help="Catalog type of the Volume Service"),
cfg.StrOpt('region',
default='',
help="The volume region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the volume service."),
cfg.StrOpt('backend1_name',
default='BACKEND_1',
help="Name of the backend1 (must be declared in cinder.conf)"),
cfg.StrOpt('backend2_name',
default='BACKEND_2',
help="Name of the backend2 (must be declared in cinder.conf)"),
cfg.StrOpt('storage_protocol',
default='iSCSI',
help='Backend protocol to target when creating volume types'),
cfg.StrOpt('vendor_name',
default='Open Source',
help='Backend vendor to target when creating volume types'),
cfg.StrOpt('disk_format',
default='raw',
help='Disk format to use when copying a volume to image'),
cfg.IntOpt('volume_size',
default=1,
help='Default size in GB for volumes created by volumes tests'),
]
volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
title='Enabled Cinder Features')
VolumeFeaturesGroup = [
cfg.BoolOpt('multi_backend',
default=False,
help="Runs Cinder multi-backend test (requires 2 backends)"),
cfg.BoolOpt('backup',
default=True,
help='Runs Cinder volumes backup test'),
cfg.BoolOpt('snapshot',
default=True,
help='Runs Cinder volume snapshot test'),
cfg.BoolOpt('clone',
default=True,
help='Runs Cinder volume clone test'),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled volume extensions with a special '
'entry all which indicates every extension is enabled. '
'Empty list indicates all extensions are disabled'),
cfg.BoolOpt('api_v1',
default=True,
help="Is the v1 volume API enabled"),
cfg.BoolOpt('api_v2',
default=True,
help="Is the v2 volume API enabled"),
cfg.BoolOpt('bootable',
default=False,
help='Update bootable status of a volume '
'Not implemented on icehouse ')
]
object_storage_group = cfg.OptGroup(name='object-storage',
title='Object Storage Service Options')
ObjectStoreGroup = [
cfg.StrOpt('catalog_type',
default='object-store',
help="Catalog type of the Object-Storage service."),
cfg.StrOpt('region',
default='',
help="The object-storage region name to use. If empty, the "
"value of identity.region is used instead. If no such "
"region is found in the service catalog, the first found "
"one is used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the object-store service."),
cfg.IntOpt('container_sync_timeout',
default=600,
help="Number of seconds to time on waiting for a container "
"to container synchronization complete."),
cfg.IntOpt('container_sync_interval',
default=5,
help="Number of seconds to wait while looping to check the "
"status of a container to container synchronization"),
cfg.StrOpt('operator_role',
default='Member',
help="Role to add to users created for swift tests to "
"enable creating containers"),
cfg.StrOpt('reseller_admin_role',
default='ResellerAdmin',
help="User role that has reseller admin"),
cfg.StrOpt('realm_name',
default='realm1',
help="Name of sync realm. A sync realm is a set of clusters "
"that have agreed to allow container syncing with each "
"other. Set the same realm name as Swift's "
"container-sync-realms.conf"),
cfg.StrOpt('cluster_name',
default='name1',
help="One name of cluster which is set in the realm whose name "
"is set in 'realm_name' item in this file. Set the "
"same cluster name as Swift's container-sync-realms.conf"),
]
object_storage_feature_group = cfg.OptGroup(
name='object-storage-feature-enabled',
title='Enabled object-storage features')
ObjectStoreFeaturesGroup = [
cfg.ListOpt('discoverable_apis',
default=['all'],
help="A list of the enabled optional discoverable apis. "
"A single entry, all, indicates that all of these "
"features are expected to be enabled"),
cfg.BoolOpt('container_sync',
default=True,
help="Execute (old style) container-sync tests"),
cfg.BoolOpt('object_versioning',
default=True,
help="Execute object-versioning tests"),
cfg.BoolOpt('discoverability',
default=True,
help="Execute discoverability tests"),
]
database_group = cfg.OptGroup(name='database',
title='Database Service Options')
DatabaseGroup = [
cfg.StrOpt('catalog_type',
default='database',
help="Catalog type of the Database service."),
cfg.StrOpt('db_flavor_ref',
default="1",
help="Valid primary flavor to use in database tests."),
cfg.StrOpt('db_current_version',
default="v1.0",
help="Current database version to use in database tests."),
]
orchestration_group = cfg.OptGroup(name='orchestration',
title='Orchestration Service Options')
OrchestrationGroup = [
cfg.StrOpt('catalog_type',
default='orchestration',
help="Catalog type of the Orchestration service."),
cfg.StrOpt('region',
default='',
help="The orchestration region name to use. If empty, the "
"value of identity.region is used instead. If no such "
"region is found in the service catalog, the first found "
"one is used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the orchestration service."),
cfg.StrOpt('stack_owner_role', default='heat_stack_owner',
help='Role required for users to be able to manage stacks'),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=1200,
help="Timeout in seconds to wait for a stack to build."),
cfg.StrOpt('instance_type',
default='m1.micro',
help="Instance type for tests. Needs to be big enough for a "
"full OS plus the test workload"),
cfg.StrOpt('keypair_name',
help="Name of existing keypair to launch servers with."),
cfg.IntOpt('max_template_size',
default=524288,
help="Value must match heat configuration of the same name."),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help="Value must match heat configuration of the same name."),
]
telemetry_group = cfg.OptGroup(name='telemetry',
title='Telemetry Service Options')
TelemetryGroup = [
cfg.StrOpt('catalog_type',
default='metering',
help="Catalog type of the Telemetry service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the telemetry service."),
cfg.BoolOpt('too_slow_to_test',
default=True,
deprecated_for_removal=True,
help="This variable is used as flag to enable "
"notification tests")
]
telemetry_feature_group = cfg.OptGroup(name='telemetry-feature-enabled',
title='Enabled Ceilometer Features')
TelemetryFeaturesGroup = [
cfg.BoolOpt('events',
default=False,
help="Runs Ceilometer event-related tests"),
]
dashboard_group = cfg.OptGroup(name="dashboard",
title="Dashboard options")
DashboardGroup = [
cfg.StrOpt('dashboard_url',
default='http://localhost/',
help="Where the dashboard can be found"),
cfg.StrOpt('login_url',
default='http://localhost/auth/login/',
help="Login page for the dashboard",
deprecated_for_removal=True),
]
data_processing_group = cfg.OptGroup(name="data_processing",
title="Data Processing options")
DataProcessingGroup = [
cfg.StrOpt('catalog_type',
default='data_processing',
help="Catalog type of the data processing service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the data processing "
"service."),
]
data_processing_feature_group = cfg.OptGroup(
name="data_processing-feature-enabled",
title="Enabled Data Processing features")
DataProcessingFeaturesGroup = [
cfg.ListOpt('plugins',
default=["vanilla", "hdp"],
help="List of enabled data processing plugins")
]
boto_group = cfg.OptGroup(name='boto',
title='EC2/S3 options')
BotoGroup = [
cfg.StrOpt('ec2_url',
default="http://localhost:8773/services/Cloud",
help="EC2 URL"),
cfg.StrOpt('s3_url',
default="http://localhost:8080",
help="S3 URL"),
cfg.StrOpt('aws_secret',
help="AWS Secret Key",
secret=True),
cfg.StrOpt('aws_access',
help="AWS Access Key"),
cfg.StrOpt('aws_zone',
default="nova",
help="AWS Zone for EC2 tests"),
cfg.StrOpt('s3_materials_path',
default="/opt/stack/devstack/files/images/"
"s3-materials/cirros-0.3.0",
help="S3 Materials Path"),
cfg.StrOpt('ari_manifest',
default="cirros-0.3.0-x86_64-initrd.manifest.xml",
help="ARI Ramdisk Image manifest"),
cfg.StrOpt('ami_manifest',
default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
help="AMI Machine Image manifest"),
cfg.StrOpt('aki_manifest',
default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
help="AKI Kernel Image manifest"),
cfg.StrOpt('instance_type',
default="m1.tiny",
help="Instance type"),
cfg.IntOpt('http_socket_timeout',
default=3,
help="boto Http socket timeout"),
cfg.IntOpt('num_retries',
default=1,
help="boto num_retries on error"),
cfg.IntOpt('build_timeout',
default=60,
help="Status Change Timeout"),
cfg.IntOpt('build_interval',
default=1,
help="Status Change Test Interval"),
]
stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
StressGroup = [
cfg.StrOpt('nova_logdir',
help='Directory containing log files on the compute nodes'),
cfg.IntOpt('max_instances',
default=16,
help='Maximum number of instances to create during test.'),
cfg.StrOpt('controller',
help='Controller host.'),
# new stress options
cfg.StrOpt('target_controller',
help='Controller host.'),
cfg.StrOpt('target_ssh_user',
help='ssh user.'),
cfg.StrOpt('target_private_key_path',
help='Path to private key.'),
cfg.StrOpt('target_logfiles',
help='regexp for list of log files.'),
cfg.IntOpt('log_check_interval',
default=60,
help='time (in seconds) between log file error checks.'),
cfg.IntOpt('default_thread_number_per_action',
default=4,
help='The number of threads created while stress test.'),
cfg.BoolOpt('leave_dirty_stack',
default=False,
help='Prevent the cleaning (tearDownClass()) between'
' each stress test run if an exception occurs'
' during this run.'),
cfg.BoolOpt('full_clean_stack',
default=False,
help='Allows a full cleaning process after a stress test.'
' Caution : this cleanup will remove every objects of'
' every tenant.')
]
scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
ScenarioGroup = [
cfg.StrOpt('img_dir',
default='/opt/stack/new/devstack/files/images/'
'cirros-0.3.1-x86_64-uec',
help='Directory containing image files'),
cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
default='cirros-0.3.1-x86_64-disk.img',
help='Image file name'),
cfg.StrOpt('img_disk_format',
default='qcow2',
help='Image disk format'),
cfg.StrOpt('img_container_format',
default='bare',
help='Image container format'),
cfg.DictOpt('img_properties', help='Glance image properties. '
'Use for custom images which require them'),
cfg.StrOpt('ami_img_file',
default='cirros-0.3.1-x86_64-blank.img',
help='AMI image file name'),
cfg.StrOpt('ari_img_file',
default='cirros-0.3.1-x86_64-initrd',
help='ARI image file name'),
cfg.StrOpt('aki_img_file',
default='cirros-0.3.1-x86_64-vmlinuz',
help='AKI image file name'),
cfg.StrOpt('ssh_user',
default='cirros',
help='ssh username for the image file'),
cfg.IntOpt(
'large_ops_number',
default=0,
help="specifies how many resources to request at once. Used "
"for large operations testing."),
# TODO(yfried): add support for dhcpcd
cfg.StrOpt('dhcp_client',
default='udhcpc',
choices=["udhcpc", "dhclient"],
help='DHCP client used by images to renew DCHP lease. '
'If left empty, update operation will be skipped. '
'Supported clients: "udhcpc", "dhclient"')
]
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt('cinder',
default=True,
help="Whether or not cinder is expected to be available"),
cfg.BoolOpt('neutron',
default=False,
help="Whether or not neutron is expected to be available"),
cfg.BoolOpt('glance',
default=True,
help="Whether or not glance is expected to be available"),
cfg.BoolOpt('swift',
default=True,
help="Whether or not swift is expected to be available"),
cfg.BoolOpt('nova',
default=True,
help="Whether or not nova is expected to be available"),
cfg.BoolOpt('heat',
default=False,
help="Whether or not Heat is expected to be available"),
cfg.BoolOpt('ceilometer',
default=True,
help="Whether or not Ceilometer is expected to be available"),
cfg.BoolOpt('horizon',
default=True,
help="Whether or not Horizon is expected to be available"),
cfg.BoolOpt('sahara',
default=False,
help="Whether or not Sahara is expected to be available"),
cfg.BoolOpt('ironic',
default=False,
help="Whether or not Ironic is expected to be available"),
cfg.BoolOpt('trove',
default=False,
help="Whether or not Trove is expected to be available"),
cfg.BoolOpt('zaqar',
default=False,
help="Whether or not Zaqar is expected to be available"),
]
debug_group = cfg.OptGroup(name="debug",
title="Debug System")
DebugGroup = [
cfg.StrOpt('trace_requests',
default='',
help="""A regex to determine which requests should be traced.
This is a regex to match the caller for rest client requests to be able to
selectively trace calls out of specific classes and methods. It largely
exists for test development, and is not expected to be used in a real deploy
of tempest. This will be matched against the discovered ClassName:method
in the test environment.
Expected values for this field are:
* ClassName:test_method_name - traces one test_method
* ClassName:setUp(Class) - traces specific setup functions
* ClassName:tearDown(Class) - traces specific teardown functions
* ClassName:_run_cleanups - traces the cleanup functions
If nothing is specified, this feature is not enabled. To trace everything
specify .* as the regex.
""")
]
input_scenario_group = cfg.OptGroup(name="input-scenario",
title="Filters and values for"
" input scenarios")
InputScenarioGroup = [
cfg.StrOpt('image_regex',
default='^cirros-0.3.1-x86_64-uec$',
help="Matching images become parameters for scenario tests"),
cfg.StrOpt('flavor_regex',
default='^m1.nano$',
help="Matching flavors become parameters for scenario tests"),
cfg.StrOpt('non_ssh_image_regex',
default='^.*[Ww]in.*$',
help="SSH verification in tests is skipped"
"for matching images"),
cfg.StrOpt('ssh_user_regex',
default="[[\"^.*[Cc]irros.*$\", \"cirros\"]]",
help="List of user mapped to regex "
"to matching image names."),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal provisioning service options',
help='When enabling baremetal tests, Nova '
'must be configured to use the Ironic '
'driver. The following parameters for the '
'[compute] section must be disabled: '
'console_output, interface_attach, '
'live_migration, pause, rescue, resize '
'shelve, snapshot, and suspend')
BaremetalGroup = [
cfg.StrOpt('catalog_type',
default='baremetal',
help="Catalog type of the baremetal provisioning service"),
cfg.BoolOpt('driver_enabled',
default=False,
help="Whether the Ironic nova-compute driver is enabled"),
cfg.StrOpt('driver',
default='fake',
help="Driver name which Ironic uses"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal provisioning "
"service"),
cfg.IntOpt('active_timeout',
default=300,
help="Timeout for Ironic node to completely provision"),
cfg.IntOpt('association_timeout',
default=30,
help="Timeout for association of Nova instance and Ironic "
"node"),
cfg.IntOpt('power_timeout',
default=60,
help="Timeout for Ironic power transitions."),
cfg.IntOpt('unprovision_timeout',
default=300,
help="Timeout for unprovisioning an Ironic node. "
"Takes longer since Kilo as Ironic performs an extra "
"step in Node cleaning.")
]
negative_group = cfg.OptGroup(name='negative', title="Negative Test Options")
NegativeGroup = [
cfg.StrOpt('test_generator',
default='tempest.common.' +
'generator.negative_generator.NegativeTestGenerator',
help="Test generator class for all negative tests"),
]
DefaultGroup = [
cfg.StrOpt('resources_prefix',
default='tempest',
help="Prefix to be added when generating the name for "
"test resources. It can be used to discover all "
"resources associated with a specific test run when "
"running tempest on a real-life cloud"),
]
_opts = [
(auth_group, AuthGroup),
(compute_group, ComputeGroup),
(compute_features_group, ComputeFeaturesGroup),
(identity_group, IdentityGroup),
(identity_feature_group, IdentityFeatureGroup),
(image_group, ImageGroup),
(image_feature_group, ImageFeaturesGroup),
(network_group, NetworkGroup),
(network_feature_group, NetworkFeaturesGroup),
(messaging_group, MessagingGroup),
(validation_group, ValidationGroup),
(volume_group, VolumeGroup),
(volume_feature_group, VolumeFeaturesGroup),
(object_storage_group, ObjectStoreGroup),
(object_storage_feature_group, ObjectStoreFeaturesGroup),
(database_group, DatabaseGroup),
(orchestration_group, OrchestrationGroup),
(telemetry_group, TelemetryGroup),
(telemetry_feature_group, TelemetryFeaturesGroup),
(dashboard_group, DashboardGroup),
(data_processing_group, DataProcessingGroup),
(data_processing_feature_group, DataProcessingFeaturesGroup),
(boto_group, BotoGroup),
(stress_group, StressGroup),
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
(baremetal_group, BaremetalGroup),
(input_scenario_group, InputScenarioGroup),
(negative_group, NegativeGroup),
(None, DefaultGroup)
]
def register_opts():
ext_plugins = plugins.TempestTestPluginManager()
# Register in-tree tempest config options
for g, o in _opts:
register_opt_group(_CONF, g, o)
# Call external plugin config option registration
ext_plugins.register_plugin_opts(_CONF)
def list_opts():
"""Return a list of oslo.config options available.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users.
"""
ext_plugins = plugins.TempestTestPluginManager()
opt_list = [(getattr(g, 'name', None), o) for g, o in _opts]
opt_list.extend(ext_plugins.get_plugin_options_list())
return opt_list
# this should never be called outside of this class
class TempestConfigPrivate(object):
"""Provides OpenStack configuration information."""
DEFAULT_CONFIG_DIR = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"etc")
DEFAULT_CONFIG_FILE = "tempest.conf"
def __getattr__(self, attr):
# Handles config options from the default group
return getattr(_CONF, attr)
def _set_attrs(self):
self.auth = _CONF.auth
self.compute = _CONF.compute
self.compute_feature_enabled = _CONF['compute-feature-enabled']
self.identity = _CONF.identity
self.identity_feature_enabled = _CONF['identity-feature-enabled']
self.image = _CONF.image
self.image_feature_enabled = _CONF['image-feature-enabled']
self.network = _CONF.network
self.network_feature_enabled = _CONF['network-feature-enabled']
self.validation = _CONF.validation
self.volume = _CONF.volume
self.volume_feature_enabled = _CONF['volume-feature-enabled']
self.object_storage = _CONF['object-storage']
self.object_storage_feature_enabled = _CONF[
'object-storage-feature-enabled']
self.database = _CONF.database
self.orchestration = _CONF.orchestration
self.messaging = _CONF.messaging
self.telemetry = _CONF.telemetry
self.telemetry_feature_enabled = _CONF['telemetry-feature-enabled']
self.dashboard = _CONF.dashboard
self.data_processing = _CONF.data_processing
self.data_processing_feature_enabled = _CONF[
'data_processing-feature-enabled']
self.boto = _CONF.boto
self.stress = _CONF.stress
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
self.debug = _CONF.debug
self.baremetal = _CONF.baremetal
self.input_scenario = _CONF['input-scenario']
self.negative = _CONF.negative
_CONF.set_default('domain_name',
self.auth.default_credentials_domain_name,
group='identity')
_CONF.set_default('alt_domain_name',
self.auth.default_credentials_domain_name,
group='identity')
def __init__(self, parse_conf=True, config_path=None):
"""Initialize a configuration from a conf directory and conf file."""
super(TempestConfigPrivate, self).__init__()
config_files = []
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
if config_path:
path = config_path
else:
# Environment variables override defaults...
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
self.DEFAULT_CONFIG_DIR)
conf_file = os.environ.get('TEMPEST_CONFIG',
self.DEFAULT_CONFIG_FILE)
path = os.path.join(conf_dir, conf_file)
if not os.path.isfile(path):
path = failsafe_path
# only parse the config file if we expect one to exist. This is needed
# to remove an issue with the config file up to date checker.
if parse_conf:
config_files.append(path)
logging.register_options(_CONF)
if os.path.isfile(path):
_CONF([], project='tempest', default_config_files=config_files)
else:
_CONF([], project='tempest')
logging.setup(_CONF, 'tempest')
LOG = logging.getLogger('tempest')
LOG.info("Using tempest config file %s" % path)
register_opts()
self._set_attrs()
if parse_conf:
_CONF.log_opt_values(LOG, std_logging.DEBUG)
class TempestConfigProxy(object):
_config = None
_path = None
_extra_log_defaults = [
('paramiko.transport', std_logging.INFO),
('requests.packages.urllib3.connectionpool', std_logging.WARN),
]
def _fix_log_levels(self):
"""Tweak the oslo log defaults."""
for name, level in self._extra_log_defaults:
std_logging.getLogger(name).setLevel(level)
def __getattr__(self, attr):
if not self._config:
self._fix_log_levels()
self._config = TempestConfigPrivate(config_path=self._path)
return getattr(self._config, attr)
def set_config_path(self, path):
self._path = path
CONF = TempestConfigProxy()
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
import os.path
import os
import json
import utils
import numpy as np
import pprint
from model import Model, Schuetze, Satz, Event
import uuid
import plots
import gzip
class JSONModel(Model):
"""
A JSON based model. Stores everything in a JSON file.
On Start load it from from the file.
For each entry append it to the file.
"""
def __init__(self, settings):
super(JSONModel, self).__init__(settings)
self.openfunction = open
self.data = []
self.schuetzen = []
self.events = []
def load(self):
"""
load the data from store dir or create store dir
if not exists.
"""
if os.path.exists(self.settings.store_dir):
self.data = self._load_data(self.settings.store_dir)
self.schuetzen = self._load_schuetzen(self.settings.store_dir)
self.events = self._load_events(self.settings.store_dir)
else:
os.makedirs(self.settings.store_dir)
def _generic_load_data(self, filename, cls):
data = []
filepath = os.path.join(self.settings.store_dir, filename)
if not os.path.exists(filepath):
return data
with self.openfunction(filepath, "r") as f:
for line in f:
instance = cls.from_json(line)
data.append(instance)
return data
def _load_data(self, dirpath):
"""
load the data from the file.
"""
return self._generic_load_data(self.settings.data_file, JSONSatz)
def _load_schuetzen(self, dirpath):
return self._generic_load_data(self.settings.schuetzen_file, JSONSchuetze)
def _load_events(self, dirpath):
return self._generic_load_data(self.settings.event_file, JSONEvent)
def _generic_add_data(self, obj, filename):
filepath = os.path.join(self.settings.store_dir, filename)
with self.openfunction(filepath, "a") as f:
string = obj.to_json()
f.write(string)
f.write("\n")
def _gerneric_rewrite_data(self, datalist, filename):
filepath = os.path.join(self.settings.store_dir, filename)
with self.openfunction(filepath, "w") as f:
for obj in datalist:
f.write(obj.to_json())
f.write("\n")
def add_schuetze(self, name, surname):
"""
add the schuetze with the given name and surname.
"""
new = JSONSchuetze(name, surname)
if not self.schuetze_exists(new.get_fullname()):
self.schuetzen.append(new)
self._generic_add_data(new, self.settings.schuetzen_file)
return True
else:
return False
def add_satz(self, fullname, result, date):
s = self.get_schuetze_by_fullname(fullname)
entry = JSONSatz(
schuetze_uuid=s.uuid,
result=result,
date=utils.to_timestamp(date))
self.data.append(entry)
self._generic_add_data(entry, self.settings.data_file)
return entry
def add_event(self, date, description):
entry = JSONEvent(
date=utils.to_timestamp(date),
description=description)
self.events.append(entry)
self._generic_add_data(entry, self.settings.event_file)
def delete_schuetze(self, fullname):
"""
Delete the schuetze with this fullname.
"""
dels = self.get_schuetze_by_fullname(fullname)
if dels is not None:
remove = []
for entry in self.data:
if entry.schuetze_uuid == dels.uuid:
remove.append(entry)
for entry in remove:
self.data.remove(entry)
self.schuetzen.remove(dels)
self._gerneric_rewrite_data(self.schuetzen, self.settings.schuetzen_file)
self._gerneric_rewrite_data(self.data, self.settings.data_file)
def delete_satz(self, uuids):
"""
delete the given set/list of satze identified by their uuid.
"""
remove = []
for entry in self.data:
if entry.uuid in uuids:
remove.append(entry)
for entry in remove:
self.data.remove(entry)
self._gerneric_rewrite_data(self.data, self.settings.data_file)
def delete_events(self, uuids):
remove = []
for entry in self.events:
if entry.uuid in uuids:
remove.append(entry)
for entry in remove:
self.events.remove(entry)
self._gerneric_rewrite_data(self.events, self.settings.event_file)
def get_all_schuetzen(self):
return self.schuetzen
def get_all_satz_entries(self):
return self.data
def get_all_event_entries(self):
return self.events
class CompressedJSONModel(JSONModel):
def __init__(self, settings):
super(CompressedJSONModel, self).__init__(settings)
self.openfunction = gzip.open
class JSONSchuetze(Schuetze):
@classmethod
def from_json(cls, jsonstring):
dic = json.loads(jsonstring)
dic["uuid"] = uuid.UUID(dic["uuid"])
return cls(**dic)
def to_json(self):
dic = {
"name":self.name,
"surname": self.surname,
"uuid": str(self.uuid)
}
return json.dumps(dic)
class JSONEvent(Event):
@classmethod
def from_json(cls, jsonstring):
d = json.loads(jsonstring)
d["date"] = float(d["date"])
if "uuid" in d:
d["uuid"] = uuid.UUID(d["uuid"])
return cls(**d)
def to_json(self):
dic = {
"date": self.date,
"description": self.description,
"uuid": str(self.uuid)
}
return json.dumps(dic)
def __repr__(self):
return "JSONEvent[date=%s,uuid=%s]" % (self.date, str(self.uuid))
class JSONSatz(Satz):
@classmethod
def from_json(cls, jsonstring):
d = json.loads(jsonstring)
d["date"] = float(d["date"])
d["result"] = float(d["result"])
d["schuetze_uuid"] = uuid.UUID(d["schuetze_uuid"])
if "uuid" in d:
d["uuid"] = uuid.UUID(d["uuid"])
return cls(**d)
def to_json(self):
dic = self.to_dict()
dic["uuid"] = str(dic["uuid"])
dic["schuetze_uuid"] = str(dic["schuetze_uuid"])
return json.dumps(dic)
|
|
import io
import os
import threading
import unittest
import urllib.robotparser
from test import support
from http.server import BaseHTTPRequestHandler, HTTPServer
class BaseRobotTest:
robots_txt = ''
agent = 'test_robotparser'
good = []
bad = []
site_maps = None
def setUp(self):
lines = io.StringIO(self.robots_txt).readlines()
self.parser = urllib.robotparser.RobotFileParser()
self.parser.parse(lines)
def get_agent_and_url(self, url):
if isinstance(url, tuple):
agent, url = url
return agent, url
return self.agent, url
def test_good_urls(self):
for url in self.good:
agent, url = self.get_agent_and_url(url)
with self.subTest(url=url, agent=agent):
self.assertTrue(self.parser.can_fetch(agent, url))
def test_bad_urls(self):
for url in self.bad:
agent, url = self.get_agent_and_url(url)
with self.subTest(url=url, agent=agent):
self.assertFalse(self.parser.can_fetch(agent, url))
def test_site_maps(self):
self.assertEqual(self.parser.site_maps(), self.site_maps)
class UserAgentWildcardTest(BaseRobotTest, unittest.TestCase):
robots_txt = """\
User-agent: *
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
Disallow: /tmp/ # these will soon disappear
Disallow: /foo.html
"""
good = ['/', '/test.html']
bad = ['/cyberworld/map/index.html', '/tmp/xxx', '/foo.html']
class CrawlDelayAndCustomAgentTest(BaseRobotTest, unittest.TestCase):
robots_txt = """\
# robots.txt for http://www.example.com/
User-agent: *
Crawl-delay: 1
Request-rate: 3/15
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
# Cybermapper knows where to go.
User-agent: cybermapper
Disallow:
"""
good = ['/', '/test.html', ('cybermapper', '/cyberworld/map/index.html')]
bad = ['/cyberworld/map/index.html']
class SitemapTest(BaseRobotTest, unittest.TestCase):
robots_txt = """\
# robots.txt for http://www.example.com/
User-agent: *
Sitemap: http://www.gstatic.com/s2/sitemaps/profiles-sitemap.xml
Sitemap: http://www.google.com/hostednews/sitemap_index.xml
Request-rate: 3/15
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
"""
good = ['/', '/test.html']
bad = ['/cyberworld/map/index.html']
site_maps = ['http://www.gstatic.com/s2/sitemaps/profiles-sitemap.xml',
'http://www.google.com/hostednews/sitemap_index.xml']
class RejectAllRobotsTest(BaseRobotTest, unittest.TestCase):
robots_txt = """\
# go away
User-agent: *
Disallow: /
"""
good = []
bad = ['/cyberworld/map/index.html', '/', '/tmp/']
class BaseRequestRateTest(BaseRobotTest):
request_rate = None
crawl_delay = None
def test_request_rate(self):
parser = self.parser
for url in self.good + self.bad:
agent, url = self.get_agent_and_url(url)
with self.subTest(url=url, agent=agent):
self.assertEqual(parser.crawl_delay(agent), self.crawl_delay)
parsed_request_rate = parser.request_rate(agent)
self.assertEqual(parsed_request_rate, self.request_rate)
if self.request_rate is not None:
self.assertIsInstance(
parsed_request_rate,
urllib.robotparser.RequestRate
)
self.assertEqual(
parsed_request_rate.requests,
self.request_rate.requests
)
self.assertEqual(
parsed_request_rate.seconds,
self.request_rate.seconds
)
class EmptyFileTest(BaseRequestRateTest, unittest.TestCase):
robots_txt = ''
good = ['/foo']
class CrawlDelayAndRequestRateTest(BaseRequestRateTest, unittest.TestCase):
robots_txt = """\
User-agent: figtree
Crawl-delay: 3
Request-rate: 9/30
Disallow: /tmp
Disallow: /a%3cd.html
Disallow: /a%2fb.html
Disallow: /%7ejoe/index.html
"""
agent = 'figtree'
request_rate = urllib.robotparser.RequestRate(9, 30)
crawl_delay = 3
good = [('figtree', '/foo.html')]
bad = ['/tmp', '/tmp.html', '/tmp/a.html', '/a%3cd.html', '/a%3Cd.html',
'/a%2fb.html', '/~joe/index.html']
class DifferentAgentTest(CrawlDelayAndRequestRateTest):
agent = 'FigTree Robot libwww-perl/5.04'
class InvalidRequestRateTest(BaseRobotTest, unittest.TestCase):
robots_txt = """\
User-agent: *
Disallow: /tmp/
Disallow: /a%3Cd.html
Disallow: /a/b.html
Disallow: /%7ejoe/index.html
Crawl-delay: 3
Request-rate: 9/banana
"""
good = ['/tmp']
bad = ['/tmp/', '/tmp/a.html', '/a%3cd.html', '/a%3Cd.html', '/a/b.html',
'/%7Ejoe/index.html']
crawl_delay = 3
class InvalidCrawlDelayTest(BaseRobotTest, unittest.TestCase):
# From bug report #523041
robots_txt = """\
User-Agent: *
Disallow: /.
Crawl-delay: pears
"""
good = ['/foo.html']
# bug report says "/" should be denied, but that is not in the RFC
bad = []
class AnotherInvalidRequestRateTest(BaseRobotTest, unittest.TestCase):
# also test that Allow and Diasallow works well with each other
robots_txt = """\
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
Request-rate: whale/banana
"""
agent = 'Googlebot'
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
class UserAgentOrderingTest(BaseRobotTest, unittest.TestCase):
# the order of User-agent should be correct. note
# that this file is incorrect because "Googlebot" is a
# substring of "Googlebot-Mobile"
robots_txt = """\
User-agent: Googlebot
Disallow: /
User-agent: Googlebot-Mobile
Allow: /
"""
agent = 'Googlebot'
bad = ['/something.jpg']
class UserAgentGoogleMobileTest(UserAgentOrderingTest):
agent = 'Googlebot-Mobile'
class GoogleURLOrderingTest(BaseRobotTest, unittest.TestCase):
# Google also got the order wrong. You need
# to specify the URLs from more specific to more general
robots_txt = """\
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
"""
agent = 'googlebot'
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
class DisallowQueryStringTest(BaseRobotTest, unittest.TestCase):
# see issue #6325 for details
robots_txt = """\
User-agent: *
Disallow: /some/path?name=value
"""
good = ['/some/path']
bad = ['/some/path?name=value']
class UseFirstUserAgentWildcardTest(BaseRobotTest, unittest.TestCase):
# obey first * entry (#4108)
robots_txt = """\
User-agent: *
Disallow: /some/path
User-agent: *
Disallow: /another/path
"""
good = ['/another/path']
bad = ['/some/path']
class EmptyQueryStringTest(BaseRobotTest, unittest.TestCase):
# normalize the URL first (#17403)
robots_txt = """\
User-agent: *
Allow: /some/path?
Disallow: /another/path?
"""
good = ['/some/path?']
bad = ['/another/path?']
class DefaultEntryTest(BaseRequestRateTest, unittest.TestCase):
robots_txt = """\
User-agent: *
Crawl-delay: 1
Request-rate: 3/15
Disallow: /cyberworld/map/
"""
request_rate = urllib.robotparser.RequestRate(3, 15)
crawl_delay = 1
good = ['/', '/test.html']
bad = ['/cyberworld/map/index.html']
class StringFormattingTest(BaseRobotTest, unittest.TestCase):
robots_txt = """\
User-agent: *
Crawl-delay: 1
Request-rate: 3/15
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
# Cybermapper knows where to go.
User-agent: cybermapper
Disallow: /some/path
"""
expected_output = """\
User-agent: cybermapper
Disallow: /some/path
User-agent: *
Crawl-delay: 1
Request-rate: 3/15
Disallow: /cyberworld/map/\
"""
def test_string_formatting(self):
self.assertEqual(str(self.parser), self.expected_output)
class RobotHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_error(403, "Forbidden access")
def log_message(self, format, *args):
pass
class PasswordProtectedSiteTestCase(unittest.TestCase):
def setUp(self):
self.server = HTTPServer((support.HOST, 0), RobotHandler)
self.t = threading.Thread(
name='HTTPServer serving',
target=self.server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
self.t.daemon = True # In case this function raises.
self.t.start()
def tearDown(self):
self.server.shutdown()
self.t.join()
self.server.server_close()
@support.reap_threads
def testPasswordProtectedSite(self):
addr = self.server.server_address
url = 'http://' + support.HOST + ':' + str(addr[1])
robots_url = url + "/robots.txt"
parser = urllib.robotparser.RobotFileParser()
parser.set_url(url)
parser.read()
self.assertFalse(parser.can_fetch("*", robots_url))
class NetworkTestCase(unittest.TestCase):
base_url = 'http://www.pythontest.net/'
robots_txt = '{}elsewhere/robots.txt'.format(base_url)
@classmethod
def setUpClass(cls):
support.requires('network')
with support.transient_internet(cls.base_url):
cls.parser = urllib.robotparser.RobotFileParser(cls.robots_txt)
cls.parser.read()
def url(self, path):
return '{}{}{}'.format(
self.base_url, path, '/' if not os.path.splitext(path)[1] else ''
)
def test_basic(self):
self.assertFalse(self.parser.disallow_all)
self.assertFalse(self.parser.allow_all)
self.assertGreater(self.parser.mtime(), 0)
self.assertFalse(self.parser.crawl_delay('*'))
self.assertFalse(self.parser.request_rate('*'))
def test_can_fetch(self):
self.assertTrue(self.parser.can_fetch('*', self.url('elsewhere')))
self.assertFalse(self.parser.can_fetch('Nutch', self.base_url))
self.assertFalse(self.parser.can_fetch('Nutch', self.url('brian')))
self.assertFalse(self.parser.can_fetch('Nutch', self.url('webstats')))
self.assertFalse(self.parser.can_fetch('*', self.url('webstats')))
self.assertTrue(self.parser.can_fetch('*', self.base_url))
def test_read_404(self):
parser = urllib.robotparser.RobotFileParser(self.url('i-robot.txt'))
parser.read()
self.assertTrue(parser.allow_all)
self.assertFalse(parser.disallow_all)
self.assertEqual(parser.mtime(), 0)
self.assertIsNone(parser.crawl_delay('*'))
self.assertIsNone(parser.request_rate('*'))
if __name__=='__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""Maximum flow algorithms test suite.
"""
from nose.tools import *
import networkx as nx
from networkx.algorithms.flow import build_flow_dict, build_residual_network
from networkx.algorithms.flow import (edmonds_karp, ford_fulkerson,
preflow_push, shortest_augmenting_path)
flow_funcs = [edmonds_karp, ford_fulkerson, preflow_push,
shortest_augmenting_path]
max_min_funcs = [nx.maximum_flow, nx.minimum_cut]
flow_value_funcs = [nx.maximum_flow_value, nx.minimum_cut_value]
interface_funcs = sum([max_min_funcs, flow_value_funcs], [])
all_funcs = sum([flow_funcs, interface_funcs], [])
msg = "Assertion failed in function: {0}"
msgi = "Assertion failed in function: {0} in interface {1}"
def compute_cutset(G, partition):
reachable, non_reachable = partition
cutset = set()
for u, nbrs in ((n, G[n]) for n in reachable):
cutset.update((u, v) for v in nbrs if v in non_reachable)
return cutset
def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func):
assert_equal(set(G), set(flowDict), msg=msg.format(flow_func.__name__))
for u in G:
assert_equal(set(G[u]), set(flowDict[u]),
msg=msg.format(flow_func.__name__))
excess = dict((u, 0) for u in flowDict)
for u in flowDict:
for v, flow in flowDict[u].items():
if capacity in G[u][v]:
ok_(flow <= G[u][v][capacity])
ok_(flow >= 0, msg=msg.format(flow_func.__name__))
excess[u] -= flow
excess[v] += flow
for u, exc in excess.items():
if u == s:
assert_equal(exc, -solnValue, msg=msg.format(flow_func.__name__))
elif u == t:
assert_equal(exc, solnValue, msg=msg.format(flow_func.__name__))
else:
assert_equal(exc, 0, msg=msg.format(flow_func.__name__))
def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func):
assert_true(all(n in G for n in partition[0]),
msg=msg.format(flow_func.__name__))
assert_true(all(n in G for n in partition[1]),
msg=msg.format(flow_func.__name__))
cutset = compute_cutset(G, partition)
assert_true(all(G.has_edge(u, v) for (u, v) in cutset),
msg=msg.format(flow_func.__name__))
assert_equal(solnValue, sum(G[u][v][capacity] for (u, v) in cutset),
msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(cutset)
if not G.is_directed():
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
else:
assert_false(nx.is_strongly_connected(H),
msg=msg.format(flow_func.__name__))
def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity='capacity'):
for flow_func in flow_funcs:
R = flow_func(G, s, t, capacity)
# Test both legacy and new implementations.
legacy = R.graph.get('algorithm') == "ford_fulkerson_legacy"
flow_value = R.graph['flow_value']
if legacy:
flow_dict = R.graph['flow_dict']
else:
flow_dict = build_flow_dict(G, R)
assert_equal(flow_value, solnValue, msg=msg.format(flow_func.__name__))
if legacy:
assert_equal(flow_dict, solnFlows, msg=msg.format(flow_func.__name__))
else:
validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func)
# Minimum cut
if legacy:
cut_value, partition = nx.minimum_cut(G, s, t, capacity=capacity,
flow_func=ford_fulkerson)
else:
cut_value, partition = nx.minimum_cut(G, s, t, capacity=capacity,
flow_func=flow_func)
validate_cuts(G, s, t, solnValue, partition, capacity, flow_func)
class TestMaxflowMinCutCommon:
def test_graph1(self):
# Trivial undirected graph
G = nx.Graph()
G.add_edge(1,2, capacity = 1.0)
solnFlows = {1: {2: 1.0},
2: {1: 1.0}}
compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0)
def test_graph2(self):
# A more complex undirected graph
# adapted from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
G = nx.Graph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
H = {'x': {'a': 3, 'b': 1},
'a': {'c': 3, 'x': 3},
'b': {'c': 1, 'd': 2, 'x': 1},
'c': {'a': 3, 'b': 1, 'y': 2},
'd': {'b': 2, 'e': 2},
'e': {'d': 2, 'y': 2},
'y': {'c': 2, 'e': 2}}
compare_flows_and_cuts(G, 'x', 'y', H, 4.0)
def test_digraph1(self):
# The classic directed graph example
G = nx.DiGraph()
G.add_edge('a','b', capacity = 1000.0)
G.add_edge('a','c', capacity = 1000.0)
G.add_edge('b','c', capacity = 1.0)
G.add_edge('b','d', capacity = 1000.0)
G.add_edge('c','d', capacity = 1000.0)
H = {'a': {'b': 1000.0, 'c': 1000.0},
'b': {'c': 0, 'd': 1000.0},
'c': {'d': 1000.0},
'd': {}}
compare_flows_and_cuts(G, 'a', 'd', H, 2000.0)
def test_digraph2(self):
# An example in which some edges end up with zero flow.
G = nx.DiGraph()
G.add_edge('s', 'b', capacity = 2)
G.add_edge('s', 'c', capacity = 1)
G.add_edge('c', 'd', capacity = 1)
G.add_edge('d', 'a', capacity = 1)
G.add_edge('b', 'a', capacity = 2)
G.add_edge('a', 't', capacity = 2)
H = {'s': {'b': 2, 'c': 0},
'c': {'d': 0},
'd': {'a': 0},
'b': {'a': 2},
'a': {'t': 2},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 2)
def test_digraph3(self):
# A directed graph example from Cormen et al.
G = nx.DiGraph()
G.add_edge('s','v1', capacity = 16.0)
G.add_edge('s','v2', capacity = 13.0)
G.add_edge('v1','v2', capacity = 10.0)
G.add_edge('v2','v1', capacity = 4.0)
G.add_edge('v1','v3', capacity = 12.0)
G.add_edge('v3','v2', capacity = 9.0)
G.add_edge('v2','v4', capacity = 14.0)
G.add_edge('v4','v3', capacity = 7.0)
G.add_edge('v3','t', capacity = 20.0)
G.add_edge('v4','t', capacity = 4.0)
H = {'s': {'v1': 12.0, 'v2': 11.0},
'v2': {'v1': 0, 'v4': 11.0},
'v1': {'v2': 0, 'v3': 12.0},
'v3': {'v2': 0, 't': 19.0},
'v4': {'v3': 7.0, 't': 4.0},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 23.0)
def test_digraph4(self):
# A more complex directed graph
# from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
G = nx.DiGraph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
H = {'x': {'a': 2.0, 'b': 1.0},
'a': {'c': 2.0},
'b': {'c': 0, 'd': 1.0},
'c': {'y': 2.0},
'd': {'e': 1.0},
'e': {'y': 1.0},
'y': {}}
compare_flows_and_cuts(G, 'x', 'y', H, 3.0)
def test_optional_capacity(self):
# Test optional capacity parameter.
G = nx.DiGraph()
G.add_edge('x','a', spam = 3.0)
G.add_edge('x','b', spam = 1.0)
G.add_edge('a','c', spam = 3.0)
G.add_edge('b','c', spam = 5.0)
G.add_edge('b','d', spam = 4.0)
G.add_edge('d','e', spam = 2.0)
G.add_edge('c','y', spam = 2.0)
G.add_edge('e','y', spam = 3.0)
solnFlows = {'x': {'a': 2.0, 'b': 1.0},
'a': {'c': 2.0},
'b': {'c': 0, 'd': 1.0},
'c': {'y': 2.0},
'd': {'e': 1.0},
'e': {'y': 1.0},
'y': {}}
solnValue = 3.0
s = 'x'
t = 'y'
compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity = 'spam')
def test_digraph_infcap_edges(self):
# DiGraph with infinite capacity edges
G = nx.DiGraph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c', capacity = 25)
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 't': 60},
'b': {'c': 12},
'c': {'t': 37},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 97)
# DiGraph with infinite capacity digon
G = nx.DiGraph()
G.add_edge('s', 'a', capacity = 85)
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c')
G.add_edge('c', 'a')
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't', capacity = 37)
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 't': 60},
'c': {'a': 0, 't': 37},
'b': {'c': 12},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 97)
def test_digraph_infcap_path(self):
# Graph with infinite capacity (s, t)-path
G = nx.DiGraph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c')
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
for flow_func in all_funcs:
assert_raises(nx.NetworkXUnbounded,
flow_func, G, 's', 't')
def test_graph_infcap_edges(self):
# Undirected graph with infinite capacity edges
G = nx.Graph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c', capacity = 25)
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 's': 85, 't': 60},
'b': {'c': 12, 's': 12},
'c': {'a': 25, 'b': 12, 't': 37},
't': {'a': 60, 'c': 37}}
compare_flows_and_cuts(G, 's', 't', H, 97)
def test_digraph4(self):
# From ticket #429 by mfrasca.
G = nx.DiGraph()
G.add_edge('s', 'a', capacity = 2)
G.add_edge('s', 'b', capacity = 2)
G.add_edge('a', 'b', capacity = 5)
G.add_edge('a', 't', capacity = 1)
G.add_edge('b', 'a', capacity = 1)
G.add_edge('b', 't', capacity = 3)
flowSoln = {'a': {'b': 1, 't': 1},
'b': {'a': 0, 't': 3},
's': {'a': 2, 'b': 2},
't': {}}
compare_flows_and_cuts(G, 's', 't', flowSoln, 4)
def test_disconnected(self):
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(1)
assert_equal(nx.maximum_flow_value(G,0,3), 0)
flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}}
compare_flows_and_cuts(G, 0, 3, flowSoln, 0)
def test_source_target_not_in_graph(self):
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(0)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 3)
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(3)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 3)
def test_source_target_coincide(self):
G = nx.Graph()
G.add_node(0)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 0)
def test_multigraphs_raise(self):
G = nx.MultiGraph()
M = nx.MultiDiGraph()
G.add_edges_from([(0, 1), (1, 0)], capacity=True)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 0)
class TestMaxFlowMinCutInterface:
def setup(self):
G = nx.DiGraph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
self.G = G
H = nx.DiGraph()
H.add_edge(0, 1, capacity = 1.0)
H.add_edge(1, 2, capacity = 1.0)
self.H = H
def test_flow_func_not_callable(self):
elements = ['this_should_be_callable', 10, set([1,2,3])]
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)], weight='capacity')
for flow_func in interface_funcs:
for element in elements:
assert_raises(nx.NetworkXError,
flow_func, G, 0, 1, flow_func=element)
assert_raises(nx.NetworkXError,
flow_func, G, 0, 1, flow_func=element)
def test_flow_func_parameters(self):
G = self.G
fv = 3.0
for interface_func in interface_funcs:
for flow_func in flow_funcs:
result = interface_func(G, 'x', 'y', flow_func=flow_func)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result, msg=msgi.format(flow_func.__name__,
interface_func.__name__))
def test_minimum_cut_no_cutoff(self):
G = self.G
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.minimum_cut, G, 'x', 'y',
flow_func=flow_func, cutoff=1.0)
assert_raises(nx.NetworkXError, nx.minimum_cut_value, G, 'x', 'y',
flow_func=flow_func, cutoff=1.0)
def test_kwargs(self):
G = self.H
fv = 1.0
to_test = (
(shortest_augmenting_path, dict(two_phase=True)),
(preflow_push, dict(global_relabel_freq=5)),
)
for interface_func in interface_funcs:
for flow_func, kwargs in to_test:
result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result, msg=msgi.format(flow_func.__name__,
interface_func.__name__))
def test_kwargs_default_flow_func(self):
G = self.H
for interface_func in interface_funcs:
assert_raises(nx.NetworkXError, interface_func,
G, 0, 1, global_relabel_freq=2)
def test_reusing_residual(self):
G = self.G
fv = 3.0
s, t = 'x', 'y'
R = build_residual_network(G, 'capacity')
for interface_func in interface_funcs:
for flow_func in flow_funcs:
for i in range(3):
result = interface_func(G, 'x', 'y', flow_func=flow_func,
residual=R)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result,
msg=msgi.format(flow_func.__name__,
interface_func.__name__))
# Tests specific to one algorithm
def test_preflow_push_global_relabel_freq():
G = nx.DiGraph()
G.add_edge(1, 2, capacity=1)
R = preflow_push(G, 1, 2, global_relabel_freq=None)
assert_equal(R.graph['flow_value'], 1)
assert_raises(nx.NetworkXError, preflow_push, G, 1, 2,
global_relabel_freq=-1)
def test_shortest_augmenting_path_two_phase():
k = 5
p = 1000
G = nx.DiGraph()
for i in range(k):
G.add_edge('s', (i, 0), capacity=1)
G.add_path(((i, j) for j in range(p)), capacity=1)
G.add_edge((i, p - 1), 't', capacity=1)
R = shortest_augmenting_path(G, 's', 't', two_phase=True)
assert_equal(R.graph['flow_value'], k)
R = shortest_augmenting_path(G, 's', 't', two_phase=False)
assert_equal(R.graph['flow_value'], k)
class TestCutoff:
def test_cutoff(self):
k = 5
p = 1000
G = nx.DiGraph()
for i in range(k):
G.add_edge('s', (i, 0), capacity=2)
G.add_path(((i, j) for j in range(p)), capacity=2)
G.add_edge((i, p - 1), 't', capacity=2)
R = shortest_augmenting_path(G, 's', 't', two_phase=True, cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
R = shortest_augmenting_path(G, 's', 't', two_phase=False, cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
R = edmonds_karp(G, 's', 't', cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
def test_complete_graph_cutoff(self):
G = nx.complete_graph(5)
nx.set_edge_attributes(G, 'capacity',
dict(((u, v), 1) for u, v in G.edges()))
for flow_func in [shortest_augmenting_path, edmonds_karp]:
for cutoff in [3, 2, 1]:
result = nx.maximum_flow_value(G, 0, 4, flow_func=flow_func,
cutoff=cutoff)
assert_equal(cutoff, result,
msg="cutoff error in {0}".format(flow_func.__name__))
|
|
import sys, os, re, urllib, urllib3, httplib, time, json, hmac, hashlib, base64
from decimal import Decimal
from common.functions import console_log
from exchange.exchange_abstract import ExchangeAbstract, Order
class MtGox1(ExchangeAbstract):
"""
See:
https://en.bitcoin.it/wiki/MtGox/API
"""
_last_price = {}
_order = None
ticker_url = { "method": "GET", "url": "http://data.mtgox.com/api/1/BTCUSD/ticker_fast" }
buy_url = { "method": "POST", "url": "https://data.mtgox.com/api/1/BTCUSD/private/order/add" }
sell_url = { "method": "POST", "url": "https://data.mtgox.com/api/1/BTCUSD/private/order/add" }
order_url = { "method": "POST", "url": "https://data.mtgox.com/api/1/generic/private/order/result" }
open_orders_url = { "method": "POST", "url": "https://data.mtgox.com/api/1/generic/private/orders" }
cancel_url = { "method": "POST", "url": " https://data.mtgox.com/api/1/BTCUSD/private/order/cancel" }
key = None
secret = None
classname = None
@property
def order(self):
return self._order
@order.setter
def order(self, order):
self._order = order
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
self._last_price = {}
self._order = None
def _change_currency_url(self, url, currency):
return re.sub(r'BTC\w{3}', r'BTC' + currency, url)
def _create_nonce(self):
return int(time.time() * 1000000)
def _send_request(self, url, params, extra_headers=None):
headers = { 'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' }
if extra_headers is not None:
for k, v in extra_headers.iteritems():
headers[k] = v
http_pool = urllib3.connection_from_url(url['url'])
response = http_pool.urlopen(url['method'], url['url'], body=urllib.urlencode(params), headers=headers)
if response.status == 200:
return json.loads(response.data)
return None
def _to_int_price(self, price, currency):
ret_price = None
if currency == "USD" or currency == "EUR" or currency == "GBP" or currency == "PLN" or currency == "CAD" or currency == "AUD" or currency == "CHF" or currency == "CNY" or currency == "NZD" or currency == "RUB" or currency == "DKK" or currency == "HKD" or currency == "SGD" or currency == "THB":
ret_price = Decimal(price)
ret_price = int(price * 100000)
elif currency == "JPY" or currency == "SEK":
ret_price = Decimal(price)
ret_price = int(price * 1000)
return ret_price
def _to_int_amount(self, amount):
amount = Decimal(amount)
return int(amount * 100000000)
def get_order(self, trade):
"""
Method gets particular order.
"""
if not self.key or self.key is None:
console_log("mtgox: key not set; check settings.py")
return
if not self.secret or self.secret is None:
console_log("mtgox: secret not set; check settings.py")
return
order_type = ""
if trade.buy_or_sell == True:
order_type = "bid"
elif trade.buy_or_sell == False:
order_type = "ask"
params = [ ("nonce", self._create_nonce()), ("order", trade.exchange_oid), ("type", order_type) ]
headers = { 'Rest-Key': self.key, 'Rest-Sign': base64.b64encode(str(hmac.new(base64.b64decode(self.secret), urllib.urlencode(params), hashlib.sha512).digest())) }
response = self._send_request(self.order_url, params, headers)
if response and u"result" in response and response[u"result"] == u"success":
order = Order()
if u"trades" in response[u"return"]:
order.trades = response[u"return"][u"trades"]
sum_price = 0
sum_amount = 0
for exchange_trade in response[u"return"]["trades"]:
if str(trade.currency) == str(exchange_trade[u"currency"]):
sum_price += Decimal(exchange_trade[u"amount"][u"value"]) * Decimal((exchange_trade[u"price"][u"value"]))
sum_amount += Decimal(exchange_trade[u"amount"][u"value"])
order.sum_price = sum_price
order.sum_amount = sum_amount
return order
elif response and u"result" in response and response[u"result"] == u"error":
return {"error": response[u"error"]}
return None
def get_orders(self):
"""
Method gets open orders.
"""
if not self.key or self.key is None:
console_log("mtgox: key not set; check settings.py")
return
if not self.secret or self.secret is None:
console_log("mtgox: secret not set; check settings.py")
return
params = [ (u"nonce", self._create_nonce()) ]
headers = { 'Rest-Key': self.key, 'Rest-Sign': base64.b64encode(str(hmac.new(base64.b64decode(self.secret), urllib.urlencode(params), hashlib.sha512).digest())) }
response = self._send_request(self.open_orders_url, params, headers)
if response and u"result" in response and response[u"result"] == u"success":
return response[u"return"]
return None
def get_last_price(self, currency):
if currency in self._last_price:
return self._last_price[currency]
self.ticker_url["url"] = self._change_currency_url(self.ticker_url["url"], currency)
response = self._send_request(self.ticker_url, {})
if response and u"result" in response and response[u"result"] == u"success" and u"return" in response and u"last_local" in response[u"return"]:
self._last_price[currency] = Decimal(response[u"return"][u"last_local"][u"value"])
return Decimal(response[u"return"][u"last_local"][u"value"])
return None
def get_balance(self):
"""
For future use.
"""
if not self.key or self.key is None:
console_log("mtgox: key not set; check settings.py")
return
if not self.secret or self.secret is None:
console_log("mtgox: secret not set; check settings.py")
return
params = [ (u"nonce", self._create_nonce()) ]
headers = { 'Rest-Key': self.key, 'Rest-Sign': base64.b64encode(str(hmac.new(base64.b64decode(self.secret), urllib.urlencode(params), hashlib.sha512).digest())) }
response = self._send_request(self.balance_url, params, headers)
if response and "result" in response and response["result"] == "success":
return response
return None
def buy(self, price, amount, currency):
"""
bid == buy
ask == sell
Returns order ID if order was placed successfully.
"""
if not self.key or self.key is None:
console_log("mtgox: key not set; check settings.py")
return None
if not self.secret or self.secret is None:
console_log("mtgox: secret not set; check settings.py")
return None
price = self._to_int_price(price, currency)
amount = self._to_int_amount(amount)
if not price or price is None:
console_log("mtgox: there is no conversion forumla for currency %s" % (currency))
return None
if not amount or amount is None: return None
self.buy_url["url"] = self._change_currency_url(self.buy_url["url"], currency)
params = [ ("nonce", self._create_nonce()), ("amount_int", str(amount)), ("price_int", str(price)), ("type", "bid") ]
headers = { 'Rest-Key': self.key, 'Rest-Sign': base64.b64encode(str(hmac.new(base64.b64decode(self.secret), urllib.urlencode(params), hashlib.sha512).digest())) }
response = self._send_request(self.buy_url, params, headers)
if response and u"result" in response and response[u"result"] == u"success":
return response[u"return"]
return None
def sell(self, price, amount, currency):
"""
ask == sell
"""
if not self.key or self.key is None:
console_log("mtgox: key not set; check settings.py")
return
if not self.secret or self.secret is None:
console_log("mtgox: secret not set; check settings.py")
return
price = self._to_int_price(price, currency)
amount = self._to_int_amount(amount)
if not price or price is None:
console_log("there is no conversion forumla for currency %s" % (currency))
return None
if not amount or amount is None: return None
self.sell_url["url"] = self._change_currency_url(self.sell_url["url"], currency)
params = [ ("nonce", self._create_nonce()), ("amount_int", str(amount)), ("price_int", str(price)), ("type", "ask") ]
headers = { 'Rest-Key': self.key, 'Rest-Sign': base64.b64encode(str(hmac.new(base64.b64decode(self.secret), urllib.urlencode(params), hashlib.sha512).digest())) }
response = self._send_request(self.sell_url, params, headers)
if response and u"result" in response and response[u"result"] == u"success":
return response[u"return"]
return None
|
|
"""
Forcefield.py
This module takes a pdblist as input and replaces the occupancy and
tempfactor fields with charge and radius fields, with values as defined
by a particular forcefield. The forcefield structure is modeled off of
the structures.py file, where each forcefield is considered a chain of
residues of atoms.
----------------------------
PDB2PQR -- An automated pipeline for the setup, execution, and analysis of
Poisson-Boltzmann electrostatics calculations
Copyright (c) 2002-2010, Jens Erik Nielsen, University College Dublin;
Nathan A. Baker, Washington University in St. Louis; Paul Czodrowski &
Gerhard Klebe, University of Marburg
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of University College Dublin, Washington University in
St. Louis, or University of Marburg nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------
"""
__date__ = "6 November 2007"
__author__ = "Todd Dolinsky, Yong Huang"
import string
import sys
import getopt
import os
import re
from xml import sax
from utilities import *
class ForcefieldHandler(sax.ContentHandler):
def __init__(self, map, reference):
self.oldresname = None
self.oldatomname = None
self.curelement = None
self.newatomname = None
self.newresname = None
self.atommap = {}
self.map = map
self.reference = reference
def updateMap(self, toname, fromname, map):
"""
Update the given map by adding a pointer from a new
name to an object.
Parameters
toname: The new name for the object (string)
fromname: The old name for the object (string)
map: A dictionary of items (dict)
"""
fromobj = map[fromname]
if isinstance(fromobj, ForcefieldResidue):
if toname not in map:
newres = ForcefieldResidue(fromname)
map[toname] = newres
for atomname in fromobj.atoms:
map[toname].atoms[atomname] = fromobj.atoms[atomname]
elif isinstance(fromobj, ForcefieldAtom):
map[toname] = fromobj
def findMatchingNames(self, regname, map):
"""
Find a list of strings that match the given regular
expression.
Parameters
regname: The regular expression (string)
map: The dictionary to search (dict)
Returns
list: A list of regular expression objects that match
the regular expression.
"""
list = []
regname += "$"
# Find the existing items that match this string
for name in map:
regexp = re.compile(regname).match(name)
if regexp:
list.append(regexp)
return list
def startElement(self, name, attributes):
"""
Override the startElement function to keep track of the current
element.
"""
if name != "name": self.curelement = name
def endElement(self, name):
"""
At the end of the element, act on the stored information.
Parameters
name: The name of the element (string)
"""
if name == "residue":
if self.oldresname != None: # Make a new residue hook
newreslist = self.findMatchingNames(self.newresname, self.reference)
if self.oldresname.find("$group") >= 0: # Multiple new residues
for resitem in newreslist:
resname = resitem.string
group = resitem.group(1)
fromname = self.oldresname.replace("$group", group)
if fromname in self.map:
self.updateMap(resname, fromname, self.map)
else: # Work with a single new residue name
oldreslist = self.findMatchingNames(self.oldresname, self.map)
for resitem in newreslist:
resname = resitem.string
self.updateMap(resname, self.oldresname, self.map)
# If this was only a residue conversion, exit
if self.atommap == {}:
self.oldresname = None
self.newresname = None
return
# Apply atom conversions for all appropriate residues
resmatchlist = self.findMatchingNames(self.newresname, self.map)
for resitem in resmatchlist:
residue = self.map[resitem.string]
for newname in self.atommap:
oldname = self.atommap[newname]
if oldname not in residue.atoms: continue
self.updateMap(newname, oldname, residue.atoms)
# Clean up
self.oldresname = None
self.newresname = None
self.atommap = {}
elif name == "atom":
self.atommap[self.newatomname] = self.oldatomname
self.oldatomname = None
self.newatomname = None
else: # Just free the current element namespace
self.curelement = ""
return self.map
def characters(self, text):
"""
Store the information in the object for future use/
Parameters
text: The text value between the XML tags
"""
if text.isspace(): return
text = str(text)
if self.curelement == "residue":
self.newresname = text
elif self.curelement == "atom":
self.newatomname = text
elif self.curelement == "useatomname":
self.oldatomname = text
elif self.curelement == "useresname":
self.oldresname = text
class Forcefield:
"""
Forcefield class
The forcefield class contains definitions for a given forcefield.
Each forcefield object contains a dictionary of residues, with each
residue containing a dictionary of atoms. Dictionaries are used
instead of lists as the ordering is not important. The forcefield
definition files are unedited, directly from the forcefield - all
transformations are done within.
"""
def __init__(self, ff, definition, userff, usernames = None):
"""
Initialize the class by parsing the definition file
Parameters
ff: The name of the forcefield (string)
definition: The definition objects
userff: A link to the file for CGI based user-defined
forcefields
"""
self.map = {}
self.name = ff
defpath = ""
if userff == None:
defpath = getFFfile(ff)
if defpath == "":
raise ValueError, "Unable to find forcefield parameter file %s!" % path
file = open(defpath, 'rU')
else: file = userff
lines = file.readlines()
for line in lines:
if not line.startswith("#"):
fields = string.split(line)
if fields == []: continue
try:
resname = fields[0]
atomname = fields[1]
charge = float(fields[2])
radius = float(fields[3])
except ValueError:
txt = "Unable to recognize user-defined forcefield file"
if defpath != "": txt += " %s!" % defpath
else: txt += "!"
txt += " Please use a valid parameter file."
raise ValueError, txt
try:
group = fields[4]
atom = ForcefieldAtom(atomname, charge, radius, resname, group)
except:
atom = ForcefieldAtom(atomname, charge, radius, resname)
myResidue = self.getResidue(resname)
if myResidue == None:
myResidue = ForcefieldResidue(resname)
self.map[resname] = myResidue
myResidue.addAtom(atom)
file.close()
# Now parse the XML file, associating with FF objects -
# This is not necessary (if canonical names match ff names)
defpath = getNamesFile(ff)
if defpath != "":
handler = ForcefieldHandler(self.map, definition.map)
sax.make_parser()
if usernames != None:
namesfile = usernames
sax.parseString(namesfile.read(), handler)
else:
namesfile = open(defpath)
sax.parseString(namesfile.read(), handler)
namesfile.close()
# CGI based .names file handling
else:
handler = ForcefieldHandler(self.map, definition.map)
sax.make_parser()
if usernames != None:
namesfile = usernames
sax.parseString(namesfile.getvalue(), handler)
else:
raise ValueError, "Please provide a valid .names file!"
namesfile.close()
def hasResidue(self, resname):
"""
Check if the residue name is in the map or not.
Parameters
resname: The name to search for (string)
Returns
1 if the resname is in the map, 0 otherwise.
"""
if resname in self.map: return 1
else: return 0
def getResidue(self, resname):
"""
Return the residue object with the given resname
Parameters
resname: The name of the residue (string)
Returns
residue: The residue object (ForcefieldResidue)
"""
if self.hasResidue(resname): return self.map[resname]
else: return None
def getNames(self, resname, atomname):
"""
Get the actual names associated with the input fields.
The names passed in point to ForcefieldResidue and
ForcefieldAtom objects which may have different names;
grab these names and return.
Parameters
resname: The residue name (string)
atomname: The atom name (string)
Returns
rname: The forcefield's name for this residue (string)
aname: The forcefield's name for this atom (string)
"""
rname = None
aname = None
if resname in self.map:
res = self.map[resname]
if res.hasAtom(atomname):
atom = res.atoms[atomname]
aname = atom.name
rname = atom.resname
return rname, aname
def getGroup(self, resname, atomname):
"""
Get the group/type associated with the input
fields. If not found, return a null string.
Parameters:
resname: The residue name (string)
atomname: The atom name (string)
"""
group = ""
if resname in self.map:
resid = self.map[resname]
if resid.hasAtom(atomname):
atom = resid.atoms[atomname]
group = atom.group
return group
def getParams(self, resname, atomname):
"""
Get the parameters associated with the input fields.
The residue itself is needed instead of simply its name
because the forcefield may use a different residue name
than the standard amino acid name.
Parameters
resname: The residue name (string)
atomname: The atom name (string)
Returns
charge: The charge on the atom (float)
radius: The radius of the atom (float)
"""
charge = None
radius = None
#print self.map.keys()
if resname in self.map:
resid = self.map[resname]
if resid.hasAtom(atomname):
atom = resid.atoms[atomname]
charge = atom.charge
radius = atom.radius
return charge, radius
def getParams1(self, residue, name):
"""
Get the parameters associated with the input fields.
The residue itself is needed instead of simply its name
because the forcefield may use a different residue name
than the standard amino acid name.
Parameters
residue: The residue (residue)
name: The atom name (string)
Returns
charge: The charge on the atom (float)
radius: The radius of the atom (float)
"""
charge = None
radius = None
resname = ""
atomname = ""
if self.name == "amber":
resname, atomname = self.getAmberParams(residue, name)
elif self.name == "charmm":
resname, atomname = self.getCharmmParams(residue, name)
elif self.name == "parse":
resname, atomname = self.getParseParams(residue, name)
else:
resname = residue.name
atomname = name
defresidue = self.getResidue(resname)
# print "resname: %s, defresidue: %s" % (resname, defresidue)
if defresidue == None:
return charge, radius
atom = defresidue.getAtom(atomname)
if atom != None:
charge = atom.get("charge")
radius = atom.get("radius")
return charge, radius
def getAmberParams(self, residue, name):
"""
Get the forcefield definitions from the Amber database
Parameters
residue: The residue (residue)
name: The atom name (string)
Returns
resname: The name of the amber residue
atomname: The name of the amber atom
"""
atomname = name
type = residue.get("type")
if type == 4:
resname = residue.get("naname")
else:
resname = residue.get("name")
# Residue Substitutions
if residue.get("name") == "CYS" and "HG" not in residue.get("map"):
resname = "CYX"
elif residue.get("name") == "HIS":
if "HD1" in residue.get("map") and "HE2" in residue.get("map"):
resname = "HIP"
elif "HD1" in residue.get("map"):
resname = "HID"
elif "HE2" in residue.get("map"):
resname = "HIE"
else:
resname = "HID" # Default for no hydrogens
elif residue.get("name") == "HSP":
resname = "HIP"
elif residue.get("name") == "HSE":
resname = "HIE"
elif residue.get("name") == "HSD":
resname = "HID"
elif residue.get("name") == "GLU" or residue.get("name") == "GLH":
if "HE1" in residue.get("map"):
resname = "GLH"
if atomname == "HE1": atomname = "HE2"
elif atomname == "OE1": atomname = "OE2"
elif atomname == "OE2": atomname = "OE1"
elif "HE2" in residue.get("map"): resname = "GLH"
elif residue.get("name") == "ASP" or residue.get("name") == "ASH":
if "HD1" in residue.get("map"):
resname = "ASH"
if atomname == "HD1": atomname = "HD2"
elif atomname == "OD1": atomname = "OD2"
elif atomname == "OD2": atomname = "OD1"
elif "HD2" in residue.get("map"): resname = "ASH"
if residue.get("isCterm"):
resname = "C" + resname
elif residue.get("isNterm"):
resname = "N" + resname
# Atom Substitutions
if resname == "WAT":
if atomname == "O": atomname = "OW"
elif atomname == "H1": atomname = "HW"
elif atomname == "H2": atomname = "HW"
elif resname == "ILE":
if atomname == "CD": atomname = "CD1"
if resname[0] == "N" and resname != "NME": # N-terminal
if atomname == "H": atomname = "H1"
if (resname == "CCYS" or resname == "NCYS") and atomname == "HG": atomname = "HSG"
if resname == "CYM" and atomname == "H": atomname = "HN"
if residue.get("isNterm") and resname == "NPRO" and atomname == "HN2":
atomname = "H2"
if residue.get("isNterm") and resname == "NPRO" and atomname == "HN1":
atomname = "H3"
return resname, atomname
def getParseParams(self, residue, name):
"""
Get the forcefield definitions from the Parse database
Parameters
residue: The residue (residue)
name: The atom name (string)
Returns
resname: The name of the amber residue
atomname: The name of the amber atom
"""
atomname = name
resname = residue.name
# Terminal/Water Substitutions
nterm = residue.get("isNterm")
cterm = residue.get("isCterm")
if nterm and resname != "ACE":
if resname == "PRO" and nterm == 2:
resname = "PR+"
if atomname == "H2": atomname = "HN1"
elif atomname == "H3": atomname = "HN2"
elif resname == "PRO" and nterm == 1:
resname = "PRN"
if atomname == "H2" or atomname == "H3": atomname = "HN"
elif nterm == 2: # Neutral
if atomname in ["N","H","H2","H3","CA","HA","C","O"]:
resname = "BKN"
if atomname == "H":
atomname = "H1"
if atomname == 'H3':
atomname='H2'
elif nterm == 3: # Positive
if atomname in ["N","H","H2","H3","CA","HA","C","O"]:
resname = "BK+"
if atomname == "H": atomname = "H1"
elif cterm:
if atomname == "O": atomname = "O1"
elif atomname == "OXT": atomname = "O2"
if cterm == 1 and atomname in ["N","H","HA","CA","C","O1","O2"]:
resname = "BK-"
elif cterm == 2 and atomname in ["N","H","HA","CA","C","O1","O2","HO"]:
if atomname == "HO": atomname = "H2"
resname = "BKC"
#print 'Cterm resname is',resname
elif residue.get("type") == 3:
resname = "H2O"
if atomname == "O": atomname = "OH"
elif atomname == "H1": atomname = "HH1"
elif atomname == "H2": atomname = "HH2"
# Residue Substitutions
if resname == "HSD": resname = "HID"
elif resname in ["HIE","HSE"]: resname = "HIS"
elif resname in ["HIP","HSP"]: resname = "HI+"
elif resname == "ILE":
if atomname == "HG12": atomname = "HG11"
elif atomname == "HG13": atomname = "HG12"
elif atomname == "CD": atomname = "CD1"
elif resname == "CYS" and "HG" not in residue.get("map"):
resname = "CSS"
#
# Histidine
#
elif resname == "HIS":
if "HD1" in residue.get("map") and "HE2" in residue.get("map"):
resname = "HI+"
elif "HD1" in residue.get("map"):
resname = "HID"
elif "HE2" in residue.get("map"):
resname = "HIS"
elif resname == "GLU" or resname == "GLH":
if "HE1" in residue.get("map"):
resname = "GL0"
if atomname == "HE1": atomname = "HE2"
elif atomname == "OE1": atomname = "OE2"
elif atomname == "OE2": atomname = "OE1"
elif "HE2" in residue.get("map"): resname = "GL0"
elif resname == "ASP" or resname == "ASH":
if "HD1" in residue.get("map"):
resname = "AS0"
if atomname == "HD1": atomname = "HD2"
elif atomname == "OD1": atomname = "OD2"
elif atomname == "OD2": atomname = "OD1"
elif "HD2" in residue.get("map"): resname = "AS0"
elif resname == "ACE":
if atomname == "HH31": atomname = "HA1"
elif atomname == "HH32": atomname = "HA2"
elif atomname == "HH33": atomname = "HA3"
elif atomname == "CH3": atomname = "CA"
elif resname == "TYR":
if not "HH" in residue.get("map"):
resname="TYM"
elif resname == "TYM": resname = "TY-"
elif resname == "CYM": resname = "CY-"
elif resname == "LYN": resname = "LY0"
#
# Neutral LYS and neutral ARG detection based on hydrogens - added by Jens
#
elif resname == "LYS":
if not "HZ3" in residue.get("map"):
resname="LY0"
elif resname == "ARG":
if not "HE" in residue.get("map"):
resname="AR0"
elif resname == "NME":
resname = "N-M"
if atomname == "CH3": atomname = "CA"
elif atomname == "H": atomname = "H1"
elif atomname.startswith("HH"): atomname = "HA" + atomname[-1]
# Hydrogen Substitutions
if atomname == "H": atomname = "HN"
elif atomname == "HA2": atomname = "HA1"
elif atomname == "HA3": atomname = "HA2"
elif atomname == "HB2" and resname not in ["ALA"]: atomname = "HB1"
elif atomname == "HB3" and resname not in ["ALA"]: atomname = "HB2"
elif atomname == "HD2" and resname not in ["HIS","HI+","HID","AS0"]: atomname = "HD1"
elif atomname == "HD3" and resname not in ["HIS","HI+","HID"]: atomname = "HD2"
elif atomname == "HE2" and resname not in ["TRP","HIS","HI+","HID","GL0"]: atomname = "HE1"
elif atomname == "HE3" and resname not in ["TRP","HIS","HI+","HID"]: atomname = "HE2"
elif atomname == "HG2": atomname = "HG1"
elif atomname == "HG3": atomname = "HG2"
elif atomname == "HZ2" and resname == "LY0": atomname = "HZ1"
elif atomname == "HZ3" and resname == "LY0": atomname = "HZ2"
return resname, atomname
def getCharmmParams(self, residue, name):
"""
Get the forcefield definitions from the Charmm database
Parameters
residue: The residue (residue)
name: The atom name (string)
Returns
resname: The name of the Charmm residue
atomname: The name of the Charmm atom
"""
resname = residue.get("name")
atomname = name
# Nucleic Acid Substitutions
if residue.get("type") == 4:
resname = resname[0]
if resname == "A": resname = "ADE"
elif resname == "C": resname = "CYT"
elif resname == "G": resname = "GUA"
elif resname == "T":
resname = "THY"
if atomname == "C7": atomname = "C5M"
elif atomname == "H71": atomname = "H51"
elif atomname == "H72": atomname = "H52"
elif atomname == "H73": atomname = "H53"
elif resname == "U": resname = "URA"
if atomname == "H5'1": atomname = "H5'"
elif atomname == "H5'2": atomname = "H5''"
elif atomname == "H2'1": atomname = "H2'"
elif atomname in ["H2'2","HO'2"]: atomname = "H2''"
if residue.getAtom("O2'") == None:
if atomname in ["C2'","H2'","H2''"]: resname = "DEO1"
if residue.getAtom("H5T") != None:
if atomname in ["H5T","O5'","C5'"]: resname = "5TER"
if residue.getAtom("H3T") != None:
if atomname in ["H3T","O3'","C3'"]: resname = "3TER"
# Terminal/Water Substitutions
if residue.get("isNterm"):
if resname == "GLY" and atomname in ["N","H","H2","H3","CA","HA2","HA3"]:
resname = "GLYP"
if atomname == "H": atomname = "HT1"
elif atomname == "H2": atomname = "HT2"
elif atomname == "H3": atomname = "HT3"
elif resname == "PRO" and atomname in ["N","HN1","HN2","CD","CA","HD1","HD2","HA","H2","H3"]:
resname = "PROP"
if atomname == "H2": atomname = "HN1"
elif atomname == "H3": atomname = "HN2"
elif resname == "ACE":
if atomname == "CH3": atomname = "CAY"
elif atomname == "HH31": atomname = "HY1"
elif atomname == "HH32": atomname = "HY2"
elif atomname == "HH33": atomname = "HY3"
elif atomname == "C": atomname = "CY"
elif atomname == "O": atomname = "OY"
else:
if atomname in ["N","H","H2","H3","CA","HA"]:
resname = "NTER"
if atomname == "H": atomname = "HT1"
elif atomname == "H2": atomname = "HT2"
elif atomname == "H3": atomname = "HT3"
elif residue.get("isCterm"):
if atomname in ["O","OXT","C"]:
resname = "CTER"
if atomname == "O":
atomname = "OT1"
elif atomname == "OXT":
atomname = "OT2"
elif residue.get("type") == 3:
resname = "TP3M"
if atomname == "O": atomname = "OH2"
# Residue substitutions
if resname == "ILE":
if atomname == "CD1": atomname = "CD"
elif atomname == "HD11": atomname = "HD1"
elif atomname == "HD12": atomname = "HD2"
elif atomname == "HD13": atomname = "HD3"
elif atomname == "HG12": atomname = "HG11"
elif atomname == "HG13": atomname = "HG12"
elif resname == "CYS" and "HG" not in residue.get("map"):
resname = "CYS"
if atomname == "CB":
resname = "DISU"
atomname = "1CB"
elif atomname == "SG":
resname = "DISU"
atomname = "1SG"
elif resname == "HIS":
if "HD1" in residue.get("map") and "HE2" in residue.get("map"):
resname = "HSP"
elif "HD1" in residue.get("map"):
resname = "HSD"
elif "HE2" in residue.get("map"):
resname = "HSE"
elif resname == "GLU" or resname == "GLH":
if "HE1" in residue.get("map"):
if atomname == "HE1": atomname = "HE2"
elif atomname == "OE1": atomname = "OE2"
elif atomname == "OE2": atomname = "OE1"
if atomname in ["CG","HG3","HG1","HG2","CD","OE1","OE2","HE2"]: resname = "GLUP"
else: resname = "GLU"
elif "HE2" in residue.get("map"):
if atomname in ["CG","HG3","HG1","HG2","CD","OE1","OE2","HE2"]: resname = "GLUP"
else: resname = "GLU"
elif resname == "ASP" or resname == "ASH":
if "HD1" in residue.get("map"):
if atomname == "HD1": atomname = "HD2"
elif atomname == "OD1": atomname = "OD2"
elif atomname == "OD2": atomname = "OD1"
if atomname in ["CB","HB3","HB1","HB2","CG","OD1","OD2","HD2"]: resname = "ASPP"
else: resname = "ASP"
elif "HD2" in residue.get("map"):
if atomname in ["CB","HB3","HB1","HB2","CG","OD1","OD2","HD2"]: resname = "ASPP"
else: resname = "ASP"
# HETATM Substitutions
if resname == "ACE":
if atomname == "CH3": atomname = "CAY"
elif atomname == "HH31": atomname = "HY1"
elif atomname == "HH32": atomname = "HY2"
elif atomname == "HH33": atomname = "HY3"
elif atomname == "C": atomname = "CY"
elif atomname == "O": atomname = "OY"
elif resname == "ADP":
atomname = string.replace(atomname,"*","\'")
elif resname == "NME":
resname = "CT3"
if atomname == "HH31": atomname = "HT1"
elif atomname == "HH32": atomname = "HT2"
elif atomname == "HH33": atomname = "HT3"
elif atomname == "CH3": atomname = "CAT"
elif atomname == "N": atomname = "NT"
elif atomname == "H": atomname = "HNT"
# Hydrogen Substitutions
if atomname == "H": atomname = "HN"
elif atomname == "HA2": atomname = "HA1"
elif atomname == "HA3": atomname = "HA2"
elif atomname == "HB2" and resname not in ["ALA"]: atomname = "HB1"
elif atomname == "HB3" and resname not in ["ALA"]: atomname = "HB2"
elif atomname == "HD2" and resname not in ["HSP","HSE","HSD","ASPP"]: atomname = "HD1"
elif atomname == "HD3" and resname not in ["HIS","HSE","HSD"]: atomname = "HD2"
elif atomname == "HE2" and resname not in ["TRP","HSP","HSE","HSD","GLUP"]: atomname = "HE1"
elif atomname == "HE3" and resname not in ["TRP","HSP","HSE","HSD"]: atomname = "HE2"
elif atomname == "HG2": atomname = "HG1"
elif atomname == "HG3": atomname = "HG2"
elif atomname == "HG" and resname in ["SER","CYS"]: atomname = "HG1"
return resname, atomname
class ForcefieldResidue:
"""
ForcefieldResidue class
The ForceFieldResidue class contains a mapping of all atoms within
the residue for easy searching.
"""
def __init__(self, name):
"""
Initialize the ForceFieldResidue object
Parameters
name: The name of the residue (string)
"""
self.name = name
self.atoms = {}
def addAtom(self, atom):
"""
Add an atom to the ForcefieldResidue
Parameters
atom: The atom to be added (atom)
"""
atomname = atom.get("name")
self.atoms[atomname] = atom
def getAtoms(self):
"""
Return the list of atoms in this residue.
"""
return self.atoms
def hasAtom(self, atomname):
"""
Check to see if the atomname is in the current residue.
Parameters
atomname: The name of the atom to search for
Returns
1 if the atom is present in the residue, 0 otherwise
"""
if atomname in self.atoms: return 1
else: return 0
def getAtom(self, atomname):
"""
Return the atom object with the given atomname
Parameters
resname: The name of the atom (string)
Returns
residue: The atom object (ForcefieldAtom)
"""
if self.hasAtom(atomname): return self.atoms[atomname]
else: return None
class ForcefieldAtom:
"""
ForcefieldAtom class
The ForcefieldAtom object contains fields that are related to the
forcefield at the atom level
"""
def __init__(self, name, charge, radius, resname, group=""):
"""
Initialize the object
Parameters
name: The atom name (string)
charge: The charge on the atom (float)
radius: The radius of the atom (float)
resname: The residue name (string)
group: The group name (string)
"""
self.name = name
self.charge = charge
self.radius = radius
self.resname = resname
self.group = group
def get(self, name):
"""
Get a member of the ForcefieldAtom class
Parameters
name: The name of the member (string)
Possible Values
name: The atom name (string)
charge: The charge on the atom (float)
radius: The radius of the atom (float)
epsilon: The epsilon assocaited with the atom (float)
Returns
item: The value of the member
"""
try:
item = getattr(self, name)
return item
except AttributeError:
message = "Unable to access object \"%s\" in class ForcefieldAtom" % name
raise ValueError, message
def __str__(self):
"""
String representation of the forcefield atom.
"""
txt = "%s:\n"% self.name
txt += " Charge: %.4f\n" % self.charge
txt += " Radius: %.4f" % self.radius
return txt
|
|
# pylint: disable=bad-indentation,line-too-long,missing-function-docstring
"""
When near_policy_dataset = True, the behavior and target policy is trained in an
environment with noise_level = 0, and run_id = 1.
Otherwise, the target policy is trained in an environment with the same
noise_level as that to be evaluated.
"""
from absl import logging
import functools
import os
from typing import Any, Dict
from pathlib import Path
from acme import specs
from acme.tf import utils as tf2_utils
from acme.tf import networks
import tensorflow as tf
import trfl
import sonnet as snt
from src.utils import load_offline_bsuite_dataset
from src.utils import load_offline_dm_control_dataset
from src.utils import acme_utils
TASK_SHARD_MAP = {
"bsuite_catch": 1,
"bsuite_mountain_car": 1,
"bsuite_cartpole": 1,
"dm_control_cartpole_swingup": 1,
"dm_control_cheetah_run": 1,
"dm_control_walker_walk": 1,
"dm_control_humanoid_run": 10,
}
TASK_VALID_SHARD_MAP = {
"bsuite_catch": 1,
"bsuite_mountain_car": 1,
"bsuite_cartpole": 1,
"dm_control_cartpole_swingup": 1,
"dm_control_cheetah_run": 1,
"dm_control_walker_walk": 1,
"dm_control_humanoid_run": 10,
}
def bsuite_near_policy_dataset_dir(bsuite_id: str,
noise_level: float,
dataset_path: str):
# Policy training config.
policy_train_env_noise_level = 0.0
run_id = 1
# Data generation config.
# Policy action noise.
policy_noise_level = 0.3
# Environment noise level.
prob_noise_level = noise_level
dataset_path = Path(dataset_path)
path = str(dataset_path.joinpath(
f"bsuite_near_policy/transitions/{bsuite_id}/"
f"policy_train_env_noise_{policy_train_env_noise_level}_run_{run_id}/"
f"policy_noise_{policy_noise_level}/"
f"env_noise_{prob_noise_level}")) + "/"
return path
def bsuite_offline_dataset_dir(bsuite_id: str,
noise_level: float,
dataset_path: str):
run_id = 0
dataset_path = Path(dataset_path)
path = str(dataset_path.joinpath(
f"bsuite/{bsuite_id}_{noise_level}/{run_id}_full"))
return path
def dm_control_offline_dataset_dir(dm_control_task_name: str,
noise_level: float,
dataset_path: str):
run_id = 0
dataset_path = Path(dataset_path)
root_path = str(dataset_path.joinpath(
"dm_control_suite_stochastic/transitions/"
f"{dm_control_task_name}_{noise_level}/"))
data_path = f"{run_id}_full"
return root_path, data_path
def load_data_and_env(task_name: str,
noise_level: float,
dataset_path: str,
batch_size: int,
near_policy_dataset: bool = False,
valid_batch_size: int = 1024,
shuffle: bool = True, # Shuffle training dataset.
repeat: bool = True, # Repeat training dataset.
max_dev_size: int = None):
"""Load train/valid dataset and environment."""
num_shards = TASK_SHARD_MAP.get(task_name, 1)
num_valid_shards = TASK_VALID_SHARD_MAP.get(task_name, 1)
if task_name.startswith("bsuite"):
# BSuite tasks.
bsuite_id = task_name[len("bsuite_"):] + "/0"
if near_policy_dataset:
# Near-policy dataset.
path = bsuite_near_policy_dataset_dir(
bsuite_id, noise_level, dataset_path)
else:
# Pure offline dataset.
path = bsuite_offline_dataset_dir(
bsuite_id, noise_level, dataset_path)
logging.info("Dataset path: %s", path)
train_dataset, valid_dataset, environment = load_offline_bsuite_dataset(
bsuite_id=bsuite_id,
random_prob=noise_level,
path=path,
batch_size=batch_size,
valid_batch_size=valid_batch_size,
num_shards=num_shards,
num_valid_shards=num_valid_shards,
shuffle=shuffle,
repeat=repeat)
elif task_name.startswith("dm_control"):
# DM Control tasks.
if near_policy_dataset:
raise ValueError(
"Near-policy dataset is not available for dm_control tasks.")
dm_control_task_name = task_name[len("dm_control_"):]
root_path, data_path = dm_control_offline_dataset_dir(
dm_control_task_name, noise_level, dataset_path)
logging.info("Dataset root path: %s", root_path)
logging.info("Dataset file path: %s", data_path)
train_dataset, valid_dataset, environment = load_offline_dm_control_dataset(
task_name=dm_control_task_name,
noise_std=noise_level,
root_path=root_path,
data_path=data_path,
batch_size=batch_size,
valid_batch_size=valid_batch_size,
num_shards=num_shards,
num_valid_shards=num_valid_shards,
shuffle=shuffle,
repeat=repeat)
else:
raise ValueError(f"task name {task_name} is unsupported.")
if max_dev_size is not None:
valid_dataset = valid_dataset.take(
(max_dev_size + valid_batch_size - 1) // valid_batch_size)
return train_dataset, valid_dataset, environment
def bsuite_policy_path(bsuite_id: str,
noise_level: float,
near_policy_dataset: bool,
dataset_path: str):
if near_policy_dataset:
env_noise_level = 0.0 # params["env_noise_level"]
run_id = 1 # params["run_id"]
else:
env_noise_level = noise_level # params["env_noise_level"]
run_id = 1 # params["run_id"]
# policy_noise_level = 0.1 # params["policy_noise_level"]
dataset_path = Path(dataset_path)
path = str(dataset_path.joinpath(
"bsuite_near_policy/snapshots/"
f"{bsuite_id}_{env_noise_level}/{run_id}_full"))
return path
def dm_control_policy_path(dm_control_task: str,
noise_level: float,
dataset_path: str):
env_noise_level = noise_level
run_id = 1
dataset_path = Path(dataset_path)
path = str(dataset_path.joinpath(
"dm_control_suite_stochastic/snapshots/"
f"{dm_control_task}_{env_noise_level}/{run_id}_full"))
return path
def load_policy_net(
task_name: str,
noise_level: float,
dataset_path: str,
environment_spec: specs.EnvironmentSpec,
near_policy_dataset: bool = False,
):
dataset_path = Path(dataset_path)
if task_name.startswith("bsuite"):
# BSuite tasks.
bsuite_id = task_name[len("bsuite_"):] + "/0"
path = bsuite_policy_path(
bsuite_id, noise_level, near_policy_dataset, dataset_path)
logging.info("Policy path: %s", path)
policy_net = tf.saved_model.load(path)
policy_noise_level = 0.1 # params["policy_noise_level"]
observation_network = tf2_utils.to_sonnet_module(functools.partial(
tf.reshape, shape=(-1,) + environment_spec.observations.shape))
policy_net = snt.Sequential([
observation_network,
policy_net,
# Uncomment this line to add action noise to the target policy.
lambda q: trfl.epsilon_greedy(q, epsilon=policy_noise_level).sample(),
])
elif task_name.startswith("dm_control"):
# DM Control tasks.
if near_policy_dataset:
raise ValueError(
"Near-policy dataset is not available for dm_control tasks.")
dm_control_task = task_name[len("dm_control_"):]
path = dm_control_policy_path(
dm_control_task, noise_level, dataset_path)
logging.info("Policy path: %s", path)
policy_net = tf.saved_model.load(path)
policy_noise_level = 0.2 # params["policy_noise_level"]
observation_network = tf2_utils.to_sonnet_module(tf2_utils.batch_concat)
policy_net = snt.Sequential([
observation_network,
policy_net,
# Uncomment these two lines to add action noise to target policy.
acme_utils.GaussianNoise(policy_noise_level),
networks.ClipToSpec(environment_spec.actions),
])
else:
raise ValueError(f"task name {task_name} is unsupported.")
return policy_net
|
|
import sys
import urllib
import cookielib
import re
import requests
from bs4 import BeautifulSoup
# This is a debug parameter, turn it 1 for printing some debug messages
isDebug = 0
# Function to take the torrent search keyword as input from user
# parameters: None
# returns: Name of the torrent in URL encoded format for searching in The Pirate Bay
def get_torrent_name():
print "\nEnter the name of torrent to be searched: "
sys.stdout.flush()
resp = ""
while(resp == ""):
resp = str(raw_input())
else:
if(len(resp)):
# Perform an URL encoding for searching TPB
torrent = urllib.pathname2url(resp)
return torrent
else:
print "Incorrect input!!!"
return
# Function to scrape the TPB with given torrent keyword and returns results pertaining to that keyword
# parameters: URL Encoded torrent keyword e.g. game%20of%20thrones
# returns: Search results of TPB's first page with given torrent query
def call_tpb(torrent):
# Make a query_string = 'https://pirateproxy.one/search/game%20of%20thrones'
query_string = 'https://pirateproxy.one/search/' + torrent
# Take care of cookies handing
cj = cookielib.CookieJar()
# Make the GET call to TPB and obtain response
try:
response = requests.get(query_string, cookies = cj, headers = get_headers())
if(isDebug):
if(response.status_code == 200):
file = open("D:\Workspace\Test\Output\\torrent2.html",'a+')
file.write((response.text).encode('utf-8'))
file.close()
else:
print "Response code is not 200."
parsed_response = parse_response(response)
return parsed_response
except IOError as e:
print "Error in connecting to TPB because: "
print e
return None
# Utility to parse HTML response of TPB results and extracts and returns torrent search data in form of rows
# Right now it only scrapes first page of TPB search query, which is a pragmatic approach
# parameters: HTML response obtained by running search query on TPB
# returns: Search results of TPB's first page with given torrent query
def parse_response(response):
soup = BeautifulSoup(response.text, 'html.parser')
# TPB has a div with id 'searchResult' to show all the torrent search results
search = soup.find(id='searchResult')
rows = search.findAll('tr')
return rows
# Function to display search results on CLI in a tabular form and takes selected result from user as a number
# parameters: List 'rows' containing parsed HTML response. Each torrent found, is represented by a row.
# parameters: An integer n, representing number of results on first page, it is simply len(rows)
# returns: The row number(1 index based), which user selected for download
def show_results(rows, n):
resp, start = None, 1
end = start + 5
while(start < n):
show_header()
if(end > n):
end = n
display_list(rows, start, end)
print "\nEnter torrent no. to select or Return to see more: ",
sys.stdout.flush()
resp = str(raw_input())
# Enter is pressed for new rows
if(resp is ""):
start, end = end, end + 5
else:
resp = int(resp)
if(not(resp < end and resp >= start)):
print "Wrong Choice!!!"
return resp
else:
print "\nNo more torrents to show!!!"
return
# Utility to print heading for torrent search display in tabular form
# parameters: None
# returns: None
def show_header():
print "\n\n"
line_new = '{:<6} {:^70.70} {:>6} {:<0} {:<6} {:>10}'.format("S.No", "Name", "S", "/", "L", "Size")
print line_new
# Utility to print all searched torrents in tabular form
# Searched torrents are presented in batch of 5 at a time in CLI for user to select e.g. display results 6-10
# parameters: rows, a List containing searched rows
# parameters: start, an integer indicating the start of the current batch for printing.
# parameters: end, an integer indicating the end of the current batch for printing.
# returns: None
def display_list(rows, start, end):
for row in range(start, end):
cols = rows[row].findAll('td')
name = (cols[1].a.string).encode('utf-8')
seed = int(cols[2].string)
leech = int(cols[3].string)
size = find_size(cols[1])
print "\n"
line_new = '{:<6} {:<70.70} {:>6} {:<0} {:<6} {:>10}'.format(row, name, seed, "/", leech, size)
print line_new
# Function to extract size of torrent, looks hacky. TODO: Should be corrected later.
def find_size(col):
b = col.font.contents
u = (b[0].string).encode('utf-8')
m = u.find('Size')
u = u[m+5:] # To skip "Size "
n = u.index(',')
size = u[:n]
return size
# Wrapper function to start a torrent selected by user from a list of rows
# parameters: List 'rows' containing parsed HTML response. Each torrent found, is represented by a row.
# parameters: row_no, indicating the user input for torrent selection
# returns: None
def start_download(rows, row_no):
row = rows[row_no]
mag_link = get_magnet_link(row)
if(mag_link is not None):
add_to_utorrent(mag_link)
else:
print "Error in Magnetic link!!!"
return
# Calls utorrent API and adds torrent to it for downloading
# parameters: mag_link, a magnetic link of the selected torrent
# returns: None
def add_to_utorrent(mag_link):
# TODO: Hardcoded to localhost:8080 now, change later to be generic and should be able to be read automatically by settings file
ip = get_ip()
port = get_port()
base_url = "http://" + ip + ":" + port + "/gui/"
token, cookie = get_token_and_cookie(base_url)
# Form add url
add_url = base_url + "?token=" + token + "&action=add-url&s=" + mag_link
auth = get_utorrent_credentials()
headers = get_headers()
if(isDebug):
print "add_url: ", add_url
try:
r = requests.get(add_url, auth = auth, cookies = cookie, headers = headers)
if(isDebug):
print r
if(r.ok):
print "Successfully added torrent"
except requests.exceptions.RequestException as e:
print "Could not add because"
print e
# HARDCODE
# Returns IP on which utorrent is runnning
# parameters: None
# returns: str(IP)
def get_ip():
return 'localhost'
# HARDCODE
# Returns port on which utorrent is runnning
# parameters: None
# returns: str(port)
def get_port():
return '8080'
# HARDCODE
# Returns utorrent login credentials
# parameters: None
# returns: ('username', 'password')
def get_utorrent_credentials():
return 'prateek', 'prateek'
# HARDCODE
# Returns User Agent Headers for GET requests
# parameters: None
# returns: dict of User-Agent
def get_headers():
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
return headers
# Calls utorrent API and fetches token number and cookie associated
# parameters: base_url for utorrent API e.g. http://localhost:8080/gui/
# returns: (token, cookie)
def get_token_and_cookie(base_url):
token_url = base_url + "token.html"
regex_token = r'<div[^>]*id=[\"\']token[\"\'][^>]*>([^<]*)</div>' # could use BeautifulSoup but this works as well
# Hardcoded again. TODO: Either use CLI to get from user or read from settings
auth = get_utorrent_credentials()
r = requests.get(token_url, auth = auth)
# We need to extract both token and cookie GUID value
token = re.search(regex_token, r.text).group(1)
guid = r.cookies['GUID']
cookie = {"GUID" : guid}
return token, cookie
# Calls utorrent API and adds torrent to it for downloading
# parameters: mag_link, a magnetic link of the selected torrent
# returns: None
def get_magnet_link(row):
cols = row.findAll('td')
torrent_description = cols[1]
# find all <a> tags and extract magnet link from list item at index 1
anchors = torrent_description.findChildren('a')
return anchors[1].attrs['href']
# Driver function
# parameters: None
# returns: None
def main():
torrent = get_torrent_name()
rows = call_tpb(torrent)
if(rows is not None):
total_rows = len(rows)
torrent_no = show_results(rows, total_rows)
if(torrent_no is not None):
start_download(rows, torrent_no)
|
|
# mssql.py
"""Support for the Microsoft SQL Server database.
Connecting
----------
See the individual driver sections below for details on connecting.
Auto Increment Behavior
-----------------------
``IDENTITY`` columns are supported by using SQLAlchemy
``schema.Sequence()`` objects. In other words::
from sqlalchemy import Table, Integer, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah',100,10), primary_key=True),
Column('name', String(20))
).create(some_engine)
would yield::
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
Note that the ``start`` and ``increment`` values for sequences are
optional and will default to 1,1.
Implicit ``autoincrement`` behavior works the same in MSSQL as it
does in other dialects and results in an ``IDENTITY`` column.
* Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for
``INSERT`` s)
* Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on
``INSERT``
Collation Support
-----------------
MSSQL specific string types support a collation parameter that
creates a column-level specific collation for the column. The
collation parameter accepts a Windows Collation Name or a SQL
Collation Name. Supported types are MSChar, MSNChar, MSString,
MSNVarchar, MSText, and MSNText. For example::
from sqlalchemy.dialects.mssql import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatibile with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibiility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Known Issues
------------
* No support for more than one ``IDENTITY`` column per table
"""
import datetime, decimal, inspect, operator, sys, re
import itertools
from sqlalchemy import sql, schema as sa_schema, exc, util
from sqlalchemy.sql import select, compiler, expression, \
operators as sql_operators, \
functions as sql_functions, util as sql_util
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy import processors
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
VARBINARY, BLOB
from sqlalchemy.dialects.mssql import information_schema as ischema
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
'current_date', 'current_time', 'current_timestamp', 'current_user',
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
'reconfigure', 'references', 'replication', 'restore', 'restrict',
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
])
class REAL(sqltypes.Float):
"""A type for ``real`` numbers."""
__visit_name__ = 'REAL'
def __init__(self):
super(REAL, self).__init__(precision=24)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, basestring):
return datetime.date(*[
int(x or 0)
for x in self._reg.match(value).groups()
])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, basestring):
return datetime.time(*[
int(x or 0)
for x in self._reg.match(value).groups()])
else:
return value
return process
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'SMALLDATETIME'
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kwargs):
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = 'DATETIMEOFFSET'
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _StringType(object):
"""Base for MSSQL string types."""
def __init__(self, collation=None):
self.collation = collation
class TEXT(_StringType, sqltypes.TEXT):
"""MSSQL TEXT type, for variable-length text up to 2^31 characters."""
def __init__(self, *args, **kw):
"""Construct a TEXT.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.Text.__init__(self, *args, **kw)
class NTEXT(_StringType, sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
def __init__(self, *args, **kwargs):
"""Construct a NTEXT.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kwargs.pop('collation', None)
_StringType.__init__(self, collation)
length = kwargs.pop('length', None)
sqltypes.UnicodeText.__init__(self, length, **kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MSSQL VARCHAR type, for variable-length non-Unicode data with a maximum
of 8,000 characters."""
def __init__(self, *args, **kw):
"""Construct a VARCHAR.
:param length: Optinal, maximum data length, in characters.
:param convert_unicode: defaults to False. If True, convert
``unicode`` data sent to the database to a ``str``
bytestring, and convert bytestrings coming back from the
database into ``unicode``.
Bytestrings are encoded using the dialect's
:attr:`~sqlalchemy.engine.base.Dialect.encoding`, which
defaults to `utf-8`.
If False, may be overridden by
:attr:`sqlalchemy.engine.base.Dialect.convert_unicode`.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.VARCHAR.__init__(self, *args, **kw)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MSSQL NVARCHAR type.
For variable-length unicode character data up to 4,000 characters."""
def __init__(self, *args, **kw):
"""Construct a NVARCHAR.
:param length: Optional, Maximum data length, in characters.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.NVARCHAR.__init__(self, *args, **kw)
class CHAR(_StringType, sqltypes.CHAR):
"""MSSQL CHAR type, for fixed-length non-Unicode data with a maximum
of 8,000 characters."""
def __init__(self, *args, **kw):
"""Construct a CHAR.
:param length: Optinal, maximum data length, in characters.
:param convert_unicode: defaults to False. If True, convert
``unicode`` data sent to the database to a ``str``
bytestring, and convert bytestrings coming back from the
database into ``unicode``.
Bytestrings are encoded using the dialect's
:attr:`~sqlalchemy.engine.base.Dialect.encoding`, which
defaults to `utf-8`.
If False, may be overridden by
:attr:`sqlalchemy.engine.base.Dialect.convert_unicode`.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.CHAR.__init__(self, *args, **kw)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MSSQL NCHAR type.
For fixed-length unicode character data up to 4,000 characters."""
def __init__(self, *args, **kw):
"""Construct an NCHAR.
:param length: Optional, Maximum data length, in characters.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.NCHAR.__init__(self, *args, **kw)
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = 'MONEY'
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = 'SMALLMONEY'
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = 'SQL_VARIANT'
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
'int' : INTEGER,
'bigint': BIGINT,
'smallint' : SMALLINT,
'tinyint' : TINYINT,
'varchar' : VARCHAR,
'nvarchar' : NVARCHAR,
'char' : CHAR,
'nchar' : NCHAR,
'text' : TEXT,
'ntext' : NTEXT,
'decimal' : DECIMAL,
'numeric' : NUMERIC,
'float' : FLOAT,
'datetime' : DATETIME,
'datetime2' : DATETIME2,
'datetimeoffset' : DATETIMEOFFSET,
'date': DATE,
'time': TIME,
'smalldatetime' : SMALLDATETIME,
'binary' : BINARY,
'varbinary' : VARBINARY,
'bit': BIT,
'real' : REAL,
'image' : IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': SMALLMONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
'sql_variant': SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, 'collation', None):
collation = 'COLLATE %s' % type_.collation
else:
collation = None
if type_.length:
spec = spec + "(%d)" % type_.length
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_FLOAT(self, type_):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_REAL(self, type_):
return "REAL"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_):
if type_.precision:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_):
return "SMALLDATETIME"
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_unicode_text(self, type_):
return self.visit_NTEXT(type_)
def visit_NTEXT(self, type_):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_):
return self._extend("VARCHAR", type_)
def visit_CHAR(self, type_):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_):
return self._extend("NVARCHAR", type_)
def visit_date(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_DATE(type_)
def visit_time(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_TIME(type_)
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return 'SMALLMONEY'
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_):
return 'SQL_VARIANT'
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self._enable_identity_insert and \
not self.executemany
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
if self._select_lastrowid:
if self.dialect.use_scope_identity:
self.cursor.execute(
"SELECT scope_identity() AS lastrowid", ())
else:
self.cursor.execute("SELECT @@identity AS lastrowid", ())
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = base.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.\
format_table(self.compiled.statement.table)
)
except:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return base.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'microseconds': 'microsecond'
})
def __init__(self, *args, **kwargs):
super(MSSQLCompiler, self).__init__(*args, **kwargs)
self.tablealiases = {}
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op(self, binary, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_match_op(self, binary, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select):
""" MS-SQL puts TOP, it's version of LIMIT here """
if select._distinct or select._limit:
s = select._distinct and "DISTINCT " or ""
if select._limit:
if not select._offset:
s += "TOP %s " % (select._limit,)
return s
return compiler.SQLCompiler.get_select_precolumns(self, select)
def limit_clause(self, select):
# Limit in mssql is after the select keyword
return ""
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if not getattr(select, '_mssql_visit', None) and select._offset:
# to use ROW_NUMBER(), an ORDER BY is required.
orderby = self.process(select._order_by_clause)
if not orderby:
raise exc.InvalidRequestError('MSSQL requires an order_by when '
'using an offset.')
_offset = select._offset
_limit = select._limit
select._mssql_visit = True
select = select.column(
sql.literal_column("ROW_NUMBER() OVER (ORDER BY %s)" \
% orderby).label("mssql_rn")
).order_by(None).alias()
limitselect = sql.select([c for c in select.c if
c.key!='mssql_rn'])
limitselect.append_whereclause("mssql_rn>%d" % _offset)
if _limit is not None:
limitselect.append_whereclause("mssql_rn<=%d" %
(_limit + _offset))
return self.process(limitselect, iswrapper=True, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_table(self, table, mssql_aliased=False, **kwargs):
if mssql_aliased:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=True, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
def visit_alias(self, alias, **kwargs):
# translate for schema-qualified table aliases
self.tablealiases[alias.original] = alias
kwargs['mssql_aliased'] = True
return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_column(self, column, result_map=None, **kwargs):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column)
if result_map is not None:
result_map[column.name.lower()] = \
(column.name, (column, ),
column.type)
return super(MSSQLCompiler, self).\
visit_column(converted,
result_map=None, **kwargs)
return super(MSSQLCompiler, self).visit_column(column,
result_map=result_map,
**kwargs)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression._BindParamClause)
and binary.operator == operator.eq
and not isinstance(binary.right, expression._BindParamClause)
):
return self.process(
expression._BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
else:
if (
(binary.operator is operator.eq or
binary.operator is operator.ne)
and (
(isinstance(binary.left, expression._FromGrouping)
and isinstance(binary.left.element,
expression._ScalarSelect))
or (isinstance(binary.right, expression._FromGrouping)
and isinstance(binary.right.element,
expression._ScalarSelect))
or isinstance(binary.left, expression._ScalarSelect)
or isinstance(binary.right, expression._ScalarSelect)
)
):
op = binary.operator == operator.eq and "IN" or "NOT IN"
return self.process(
expression._BinaryExpression(binary.left,
binary.right, op),
**kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
def col_label(col):
adapted = adapter.traverse(col)
if isinstance(col, expression._Label):
return adapted.label(c.key)
else:
return self.label_select_column(None, adapted, asfrom=False)
columns = [
self.process(
col_label(c),
within_columns_clause=True,
result_map=self.result_map
)
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op(self, binary, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op(self, binary, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_function(self, func, **kw):
kw['literal_binds'] = True
return super(MSSQLStrictCompiler, self).visit_function(func, **kw)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(column.type))
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.InvalidRequestError(
"mssql requires Table-bound columns "
"in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) and \
column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(drop.element.table.name),
self.preparer.quote(
self._index_identifier(drop.element.name),
drop.element.quote)
)
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=True):
"""Prepare a quoted table and schema name."""
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
return result
class MSDialect(default.DefaultDialect):
name = 'mssql'
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime : _MSDateTime,
sqltypes.Date : _MSDate,
sqltypes.Time : TIME,
}
ischema_names = ischema_names
supports_native_boolean = False
supports_unicode_binds = True
postfetch_lastrowid = True
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
def __init__(self,
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
schema_name=u"dbo", **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
util.warn("Savepoint support in mssql is experimental and "
"may lead to data loss.")
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
connection.execute("SAVE TRANSACTION %s" % name)
def do_release_savepoint(self, connection, name):
pass
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
if self.server_version_info[0] not in range(8, 17):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
"with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, "
"is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info) )
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
self.implicit_returning = True
def _get_default_schema_name(self, connection):
user_name = connection.scalar("SELECT user_name() as user_name;")
if user_name is not None:
# now, get the default schema
query = sql.text("""
SELECT default_schema_name FROM
sys.database_principals
WHERE name = :name
AND type = 'S'
""")
try:
default_schema_name = connection.scalar(query, name=user_name)
if default_schema_name is not None:
return unicode(default_schema_name)
except:
pass
return self.schema_name
def has_table(self, connection, tablename, schema=None):
current_schema = schema or self.default_schema_name
columns = ischema.columns
if current_schema:
whereclause = sql.and_(columns.c.table_name==tablename,
columns.c.table_schema==current_schema)
else:
whereclause = columns.c.table_name==tablename
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name]
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
current_schema = schema or self.default_schema_name
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == current_schema,
tables.c.table_type == u'BASE TABLE'
),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
current_schema = schema or self.default_schema_name
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == current_schema,
tables.c.table_type == u'VIEW'
),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
# The cursor reports it is closed after executing the sp.
@reflection.cache
def get_indexes(self, connection, tablename, schema=None, **kw):
current_schema = schema or self.default_schema_name
col_finder = re.compile("(\w+)")
full_tname = "%s.%s" % (current_schema, tablename)
indexes = []
s = sql.text("exec sp_helpindex '%s'" % full_tname)
rp = connection.execute(s)
if rp.closed:
# did not work for this setup.
return []
for row in rp:
if 'primary key' not in row['index_description']:
indexes.append({
'name' : row['index_name'],
'column_names' : col_finder.findall(row['index_keys']),
'unique': 'unique' in row['index_description']
})
return indexes
@reflection.cache
def get_view_definition(self, connection, viewname, schema=None, **kw):
current_schema = schema or self.default_schema_name
views = ischema.views
s = sql.select([views.c.view_definition],
sql.and_(
views.c.table_schema == current_schema,
views.c.table_name == viewname
),
)
rp = connection.execute(s)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
def get_columns(self, connection, tablename, schema=None, **kw):
# Get base columns
current_schema = schema or self.default_schema_name
columns = ischema.columns
if current_schema:
whereclause = sql.and_(columns.c.table_name==tablename,
columns.c.table_schema==current_schema)
else:
whereclause = columns.c.table_name==tablename
s = sql.select([columns], whereclause,
order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
row[columns.c.is_nullable] == 'YES',
row[columns.c.character_maximum_length],
row[columns.c.numeric_precision],
row[columns.c.numeric_scale],
row[columns.c.column_default],
row[columns.c.collation_name]
)
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
if coltype == MSText or \
(coltype in (MSString, MSNVarchar) and charlen == -1):
kwargs.pop('length')
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
if issubclass(coltype, sqltypes.Numeric) and \
coltype is not MSReal:
kwargs['scale'] = numericscale
kwargs['precision'] = numericprec
coltype = coltype(**kwargs)
cdict = {
'name' : name,
'type' : coltype,
'nullable' : nullable,
'default' : default,
'autoincrement':False,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
% (tablename, current_schema))
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
name='%s_identity' % col_name)
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (current_schema, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]['sequence'].update({
'start' : int(row[0]),
'increment' : int(row[1])
})
return cols
@reflection.cache
def get_primary_keys(self, connection, tablename, schema=None, **kw):
current_schema = schema or self.default_schema_name
pkeys = []
# information_schema.referential_constraints
RR = ischema.ref_constraints
# information_schema.table_constraints
TC = ischema.constraints
# information_schema.constraint_column_usage:
# the constrained column
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
# the referenced column
R = ischema.key_constraints.alias('R')
# Primary key constraints
s = sql.select([C.c.column_name, TC.c.constraint_type],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
C.c.table_name == tablename,
C.c.table_schema == current_schema)
)
c = connection.execute(s)
for row in c:
if 'PRIMARY' in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
return pkeys
@reflection.cache
def get_foreign_keys(self, connection, tablename, schema=None, **kw):
current_schema = schema or self.default_schema_name
# Add constraints
#information_schema.referential_constraints
RR = ischema.ref_constraints
# information_schema.table_constraints
TC = ischema.constraints
# information_schema.constraint_column_usage:
# the constrained column
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
# the referenced column
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == current_schema,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by = [
RR.c.constraint_name,
R.c.ordinal_position])
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or current_schema != rschema:
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return fkeys.values()
|
|
import faulthandler
import importlib
import io
import os
import sys
import time
import traceback
import unittest
from test import support
from test.libregrtest.refleak import dash_R, clear_caches
from test.libregrtest.save_env import saved_test_environment
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
_FORMAT_TEST_RESULT = {
PASSED: '%s passed',
FAILED: '%s failed',
ENV_CHANGED: '%s failed (env changed)',
SKIPPED: '%s skipped',
RESOURCE_DENIED: '%s skipped (resource denied)',
INTERRUPTED: '%s interrupted',
CHILD_ERROR: '%s crashed',
}
# Minimum duration of a test to display its duration or to mention that
# the test is running in background
PROGRESS_MIN_TIME = 30.0 # seconds
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def format_test_result(test_name, result):
fmt = _FORMAT_TEST_RESULT.get(result, "%s")
return fmt % test_name
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
def runtest(ns, test):
"""Run a single test.
ns -- regrtest namespace of options
test -- the name of the test
Returns the tuple (result, test_time), where result is one of the
constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
output_on_failure = ns.verbose3
use_timeout = (ns.timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(ns.timeout, exit=True)
try:
support.match_tests = ns.match_tests
if ns.failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(ns, test, display_failure=False)
if result[0] != PASSED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = ns.verbose # Tell tests to be moderately quiet
result = runtest_inner(ns, test, display_failure=not ns.verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, ns.verbose)
runtest.stringio = None
def runtest_inner(ns, test, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.') or ns.testdir:
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
clear_caches()
with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
start_time = time.time()
the_module = importlib.import_module(abstest)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
def test_runner():
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(the_module)
for error in loader.errors:
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
support.run_unittest(tests)
test_runner()
if ns.huntrleaks:
refleak = dash_R(the_module, test, test_runner, ns.huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not ns.quiet and not ns.pgo:
print(test, "skipped --", msg, flush=True)
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not ns.quiet and not ns.pgo:
print(test, "skipped --", msg, flush=True)
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if not ns.pgo:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr,
flush=True)
else:
print("test", test, "failed", file=sys.stderr, flush=True)
return FAILED, test_time
except:
msg = traceback.format_exc()
if not ns.pgo:
print("test", test, "crashed --", msg, file=sys.stderr,
flush=True)
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def findtestdir(path=None):
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
|
|
from datetime import date, timedelta
import mock
from suds import WebFault
from ssl import SSLError
from unittest2 import TestCase
from authorize.apis.recurring import PROD_URL, RecurringAPI, TEST_URL
from authorize.data import CreditCard
from authorize.exceptions import AuthorizeConnectionError, \
AuthorizeInvalidError, AuthorizeResponseError
class AttrDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
SUCCESS = AttrDict({
'resultCode': 'Ok',
'subscriptionId': '123',
})
ERROR = AttrDict({
'resultCode': 'Error',
'messages': [[AttrDict({
'code': 'E00016',
'text': 'The field type is invalid.',
})]],
})
class RecurringAPITests(TestCase):
def setUp(self):
self.patcher = mock.patch(
'authorize.apis.recurring.Client')
self.Client = self.patcher.start()
self.api = RecurringAPI('123', '456')
# Make the factory creator return mocks that know what kind they are
def create(kind):
created = mock.Mock()
created._kind = kind
return created
self.api.client.factory.create.side_effect = create
def tearDown(self):
self.patcher.stop()
def test_basic_api(self):
api = RecurringAPI('123', '456')
self.assertEqual(api.url, TEST_URL)
api = RecurringAPI('123', '456', debug=False)
self.assertEqual(api.url, PROD_URL)
def test_client_and_auth(self):
self.Client.reset_mock()
api = RecurringAPI('123', '456')
self.assertEqual(self.Client.call_args, None)
client_ = api.client
self.assertEqual(self.Client.call_args[0][0], TEST_URL)
client_auth = api.client_auth
self.assertEqual(client_auth.name, '123')
self.assertEqual(client_auth.transactionKey, '456')
def test_make_call(self):
self.api.client.service.TestService.return_value = SUCCESS
result = self.api._make_call('TestService', 'foo')
self.assertEqual(result, SUCCESS)
self.assertEqual(self.api.client.service.TestService.call_args[0],
(self.api.client_auth, 'foo'))
def test_make_call_connection_error(self):
self.api.client.service.TestService.side_effect = WebFault('a', 'b')
self.assertRaises(AuthorizeConnectionError, self.api._make_call,
'TestService', 'foo')
self.assertEqual(self.api.client.service.TestService.call_args[0],
(self.api.client_auth, 'foo'))
def test_make_call_ssl_error(self):
self.api.client.service.TestService.side_effect = SSLError('a', 'b')
self.assertRaises(AuthorizeConnectionError, self.api._make_call,
'TestService', 'foo')
self.assertEqual(self.api.client.service.TestService.call_args[0],
(self.api.client_auth, 'foo'))
def test_make_call_response_error(self):
self.api.client.service.TestService.return_value = ERROR
try:
self.api._make_call('TestService', 'foo')
except AuthorizeResponseError as e:
self.assertEqual(str(e), 'E00016: The field type is invalid.')
self.assertEqual(self.api.client.service.TestService.call_args[0],
(self.api.client_auth, 'foo'))
def test_create_subscription(self):
service = self.api.client.service.ARBCreateSubscription
service.return_value = SUCCESS
year = date.today().year + 10
credit_card = CreditCard('4111111111111111', year, 1, '911',
'Jeff', 'Schenck')
nameless_credit_card = CreditCard('4111111111111111', year, 1, '911')
start = date.today() + timedelta(days=7)
# Test missing credit card name
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
nameless_credit_card, 10, start, months=1, occurrences=10)
# Test both or neither of days and months arguments
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, occurrences=10)
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, days=30, months=1, occurrences=10)
# Test validation of months and of days arguments
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, days=1, occurrences=10)
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, days=400, occurrences=10)
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, months=0, occurrences=10)
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, months=13, occurrences=10)
# Test start date in the past
past_start = date.today() - timedelta(days=1)
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, past_start, months=1, occurrences=10)
# Test providing only one of trial_amount and trial_occurrences
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, months=1, occurrences=10,
trial_amount=5)
self.assertRaises(AuthorizeInvalidError, self.api.create_subscription,
credit_card, 10, start, months=1, occurrences=10,
trial_occurrences=3)
# Test basic successful subscription
subscription_id = self.api.create_subscription(credit_card, 10, start,
months=1, occurrences=10)
self.assertEqual(subscription_id, '123')
subscription = service.call_args[0][1]
self.assertEqual(subscription._kind, 'ARBSubscriptionType')
self.assertEqual(subscription.amount, '10.00')
self.assertEqual(subscription.payment._kind, 'PaymentType')
self.assertEqual(subscription.payment.creditCard._kind,
'CreditCardType')
self.assertEqual(subscription.payment.creditCard.cardNumber,
'4111111111111111')
self.assertEqual(subscription.payment.creditCard.expirationDate,
'{0}-01'.format(year))
self.assertEqual(subscription.payment.creditCard.cardCode, '911')
self.assertEqual(subscription.billTo.firstName, 'Jeff')
self.assertEqual(subscription.billTo.lastName, 'Schenck')
self.assertEqual(subscription.paymentSchedule.interval.length, 1)
self.assertEqual(subscription.paymentSchedule.startDate,
start.strftime('%Y-%m-%d'))
self.assertEqual(subscription.paymentSchedule.totalOccurrences, 10)
# Test with days interval
self.api.create_subscription(credit_card, 10, start, days=14,
occurrences=10)
subscription = service.call_args[0][1]
self.assertEqual(subscription.paymentSchedule.interval.length, 14)
# Test with infinite occurrences
self.api.create_subscription(credit_card, 10, start, months=1)
subscription = service.call_args[0][1]
self.assertEqual(subscription.paymentSchedule.totalOccurrences, 9999)
# Test with trial period
self.api.create_subscription(credit_card, 10, start, months=1,
occurrences=10, trial_amount=5, trial_occurrences=3)
subscription = service.call_args[0][1]
self.assertEqual(subscription.paymentSchedule.trialOccurrences, 3)
self.assertEqual(subscription.trialAmount, '5.00')
def test_update_subscription(self):
service = self.api.client.service.ARBUpdateSubscription
service.return_value = SUCCESS
start = date.today() + timedelta(days=7)
# Test start date in the past
past_start = date.today() - timedelta(days=1)
self.assertRaises(AuthorizeInvalidError, self.api.update_subscription,
'1', start=past_start)
# Test successful update with one argument
self.api.update_subscription('1', start=start)
subscription_id, subscription = service.call_args[0][1:]
self.assertEqual(subscription_id, '1')
self.assertEqual(subscription._kind, 'ARBSubscriptionType')
self.assertEqual(subscription.paymentSchedule.startDate,
start.strftime('%Y-%m-%d'))
self.assertTrue(isinstance(subscription.amount, mock.Mock))
self.assertTrue(isinstance(
subscription.paymentSchedule.totalOccurrences, mock.Mock))
self.assertTrue(isinstance(subscription.trialAmount, mock.Mock))
self.assertTrue(isinstance(
subscription.paymentSchedule.trialOccurrences, mock.Mock))
# Test successful update with all arguments
self.api.update_subscription('1', amount=25, start=start,
occurrences=21, trial_amount=24, trial_occurrences=1)
subscription_id, subscription = service.call_args[0][1:]
self.assertEqual(subscription_id, '1')
self.assertEqual(subscription._kind, 'ARBSubscriptionType')
self.assertTrue(subscription.amount, '25.00')
self.assertEqual(subscription.paymentSchedule.startDate,
start.strftime('%Y-%m-%d'))
self.assertTrue(subscription.paymentSchedule.totalOccurrences, 21)
self.assertTrue(subscription.trialAmount, '24.00')
self.assertTrue(subscription.paymentSchedule.trialOccurrences, 1)
def test_delete_subscription(self):
service = self.api.client.service.ARBCancelSubscription
service.return_value = SUCCESS
self.api.delete_subscription('1')
self.assertEqual(service.call_args[0][1], '1')
|
|
"""
Photon scattering in quantum optical systems
This module includes a collection of functions for numerically computing photon
scattering in driven arbitrary systems coupled to some configuration of output
waveguides. The implementation of these functions closely follows the
mathematical treatment given in K.A. Fischer, et. al., Scattering of Coherent
Pulses from Quantum Optical Systems (2017, arXiv:1710.02875).
"""
# Author: Ben Bartlett
# Contact: benbartlett@stanford.edu
import numpy as np
from itertools import product, combinations_with_replacement
from qutip import propagator, Options, basis, tensor, zero_ket, Qobj
__all__ = ['temporal_basis_vector',
'temporal_scattered_state',
'scattering_probability']
class Evolver:
"""
A caching class which takes a Hamiltonian and a list of times to calculate
and memoize propagators for the system between any two times as demanded.
Parameters
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian or effective Hamiltonian in `Qobj` or
list-callback format. If construct_effective_hamiltonian is not
specified, an effective Hamiltonian is constructed from H and c_ops.
times : list-like
List of times to evaluate propagators over.
options : :class: qutip.Options
Solver options to use when computing propagators.
Attributes
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian, may be time-dependent.
tlist : list-like
List of times to evaluate propagators over.
propagators : (dict of float: (dict of float: :class: qutip.Qobj))
Dictionary of dictionaries of propagator objects with keys of
evaluation times, e.g. propagators[t2][t1] returns U[t2,t1].
"""
def __init__(self, H, tlist, options=None):
self.H = H
self.tlist = tlist
if options is None:
self.options = Options(nsteps=10000, normalize_output=False)
else:
self.options = options
# Make a blank nested dictionary to store propagators
self.propagators = dict.fromkeys(tlist)
for t in tlist:
self.propagators[t] = dict.fromkeys(tlist)
def prop(self, tf, ti):
"""Compute U[t2,t1] where t2 > t1 or return the cached operator.
Parameters
----------
tf : float
Final time to compute the propagator U[tf, ti].
ti : float
Initial time to compute the propagator U[tf,ti].
Returns
-------
propagator : :class: qutip.Qobj
The propagation operator.
"""
left, right = np.searchsorted(self.tlist, [ti, tf], side='left')
t1, t2 = self.tlist[left], self.tlist[right]
if self.propagators[t2][t1] is None:
self.propagators[t2][t1] = propagator(self.H, [t1, t2],
options=self.options,
unitary_mode='single')
# Something is still broken about batch unitary mode (see #807)
return self.propagators[t2][t1]
def set_partition(collection, num_sets):
"""
Enumerate all ways of partitioning collection into num_sets different lists,
e.g. list(set_partition([1,2], 2)) = [[[1, 2], []], [[1], [2]], [[2], [1]],
[[], [1, 2]]].
Parameters
----------
collection : iterable
Collection to generate a set partition of.
num_sets : int
Number of sets to partition collection into.
Returns
-------
partition : iterable
The partitioning of collection into num_sets sets.
"""
for partitioning in product(range(num_sets), repeat=len(collection)):
partition = [[] for _ in range(num_sets)]
for i, set_index in enumerate(partitioning):
partition[set_index].append(collection[i])
yield tuple(tuple(indices) for indices in partition)
def photon_scattering_operator(evolver, c_ops, taus_list):
"""
Compute the scattering operator for a system emitting into multiple
waveguides.
Parameters
----------
evolver : :class: qutip.scattering.Evolver
Evolver-wrapped Hamiltonian describing the system.
c_ops : list
list of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`
taus_list : list-like
List of (list of emission times) for each waveguide.
Returns
-------
omega : :class: qutip.Qobj
The temporal scattering operator with dimensionality equal to the
system state.
"""
omega = 1
# Extract the full list of taus
taus = [(0.0, -1)] # temporal "ground state" for arbitrary waveguide
for i, tau_wg in enumerate(taus_list):
for tau in tau_wg:
taus.append((tau, i))
taus.sort(key = lambda tup: tup[0]) # sort taus by time
# Compute Prod Ueff(tq, tq-1)
for i in range(1, len(taus)):
tq, q = taus[i]
tprev, _ = taus[i - 1]
omega = c_ops[q] * evolver.prop(tq, tprev) * omega
# Add the <0|Uff(TP, tm)|0> term
tmax = evolver.tlist[-1]
taumax, _ = taus[-1]
# if taus[-1] < tmax:
omega = evolver.prop(tmax, taumax) * omega
return omega
def temporal_basis_vector(waveguide_emission_indices, n_time_bins):
"""
Generate a temporal basis vector for emissions at specified time bins into
specified waveguides.
Parameters
----------
waveguide_emission_indices : list or tuple
List of indices where photon emission occurs for each waveguide,
e.g. [[t1_wg1], [t1_wg2, t2_wg2], [], [t1_wg4, t2_wg4, t3_wg4]].
n_time_bins : int
Number of time bins; the range over which each index can vary.
Returns
-------
temporal_basis_vector : :class: qutip.Qobj
A basis vector representing photon scattering at the specified indices.
If there are W waveguides, T times, and N photon emissions, then the
basis vector has dimensionality (W*T)^N.
"""
# Cast waveguide_emission_indices to list for mutability
waveguide_emission_indices = [list(i) for i in waveguide_emission_indices]
# Calculate total number of waveguides
W = len(waveguide_emission_indices)
# Calculate total number of emissions
num_emissions = sum([len(waveguide_indices) for waveguide_indices in
waveguide_emission_indices])
if num_emissions == 0:
return basis(W * n_time_bins, 0)
# Pad the emission indices with zeros
offset_indices = []
for i, wg_indices in enumerate(waveguide_emission_indices):
offset_indices += [index + (i * n_time_bins) for index in wg_indices]
# Return an appropriate tensor product state
return tensor([basis(n_time_bins * W, i) for i in offset_indices])
def temporal_scattered_state(H, psi0, n_emissions, c_ops, tlist,
system_zero_state=None,
construct_effective_hamiltonian=True):
"""
Compute the scattered n-photon state projected onto the temporal basis.
Parameters
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian or effective Hamiltonian in Qobj or
list-callback format. If construct_effective_hamiltonian is not
specified, an effective Hamiltonian is constructed from `H` and
`c_ops`.
psi0 : :class: qutip.Qobj
Initial state density matrix :math:`\\rho(t_0)` or state vector
:math:`\\psi(t_0)`.
n_emissions : int
Number of photon emissions to calculate.
c_ops : list
List of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`
tlist : array_like
List of times for :math:`\\tau_i`. tlist should contain 0 and exceed
the pulse duration / temporal region of interest.
system_zero_state : :class: qutip.Qobj
State representing zero excitations in the system. Defaults to
:math:`\\psi(t_0)`
construct_effective_hamiltonian : bool
Whether an effective Hamiltonian should be constructed from H and c_ops:
:math:`H_{eff} = H - \\frac{i}{2} \\sum_n \\sigma_n^\\dagger \\sigma_n`
Default: True.
Returns
-------
phi_n : :class: qutip.Qobj
The scattered bath state projected onto the temporal basis given by
tlist. If there are W waveguides, T times, and N photon emissions, then
the state is a tensor product state with dimensionality T^(W*N).
"""
T = len(tlist)
W = len(c_ops)
if n_emissions == 0:
phi_n = zero_ket(W * T)
else:
phi_n = tensor([zero_ket(W * T)] * n_emissions)
if construct_effective_hamiltonian:
# Construct an effective Hamiltonian from system hamiltonian and c_ops
if isinstance(H, Qobj):
Heff = H - 1j / 2 * sum([op.dag() * op for op in c_ops])
elif isinstance(H, list):
Heff = H + [-1j / 2 * sum([op.dag() * op for op in c_ops])]
else:
raise TypeError("Hamiltonian must be Qobj or list-callback format")
else:
Heff = H
evolver = Evolver(Heff, tlist)
all_emission_indices = combinations_with_replacement(range(T), n_emissions)
if system_zero_state is None:
system_zero_state = psi0
# Compute <omega_tau> for all combinations of tau
for emission_indices in all_emission_indices:
# Consider unique partitionings of emission times into waveguides
partition = tuple(set(set_partition(emission_indices, W)))
# Consider all possible partitionings of time bins by waveguide
for indices in partition:
taus = [[tlist[i] for i in wg_indices] for wg_indices in indices]
omega = photon_scattering_operator(evolver, c_ops, taus)
phi_n_amp = system_zero_state.dag() * omega * psi0
# Add scatter amplitude times temporal basis to overall state
phi_n += phi_n_amp * temporal_basis_vector(indices, T)
return phi_n
def scattering_probability(H, psi0, n_emissions, c_ops, tlist,
system_zero_state=None,
construct_effective_hamiltonian=True):
"""
Compute the integrated probability of scattering n photons in an arbitrary
system. This function accepts a nonlinearly spaced array of times.
Parameters
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian or effective Hamiltonian in Qobj or
list-callback format. If construct_effective_hamiltonian is not
specified, an effective Hamiltonian is constructed from H and
`c_ops`.
psi0 : :class: qutip.Qobj
Initial state density matrix :math:`\\rho(t_0)` or state vector
:math:`\\psi(t_0)`.
n_emissions : int
Number of photons emitted by the system (into any combination of
waveguides).
c_ops : list
List of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`.
tlist : array_like
List of times for :math:`\\tau_i`. tlist should contain 0 and exceed
the pulse duration / temporal region of interest; tlist need not be
linearly spaced.
system_zero_state : :class: qutip.Qobj
State representing zero excitations in the system. Defaults to
`basis(systemDims, 0)`.
construct_effective_hamiltonian : bool
Whether an effective Hamiltonian should be constructed from H and c_ops:
:math:`H_{eff} = H - \\frac{i}{2} \\sum_n \\sigma_n^\\dagger \\sigma_n`
Default: True.
Returns
-------
scattering_prob : float
The probability of scattering n photons from the system over the time
range specified.
"""
phi_n = temporal_scattered_state(H, psi0, n_emissions, c_ops, tlist,
system_zero_state,
construct_effective_hamiltonian)
T = len(tlist)
W = len(c_ops)
# Compute <omega_tau> for all combinations of tau
all_emission_indices = combinations_with_replacement(range(T), n_emissions)
probs = np.zeros([T] * n_emissions)
# Project scattered state onto temporal basis
for emit_indices in all_emission_indices:
# Consider unique emission time partitionings
partition = tuple(set(set_partition(emit_indices, W)))
# wg_indices_list = list(set_partition(indices, W))
for wg_indices in partition:
projector = temporal_basis_vector(wg_indices, T)
amplitude = (projector.dag() * phi_n).full().item()
probs[emit_indices] += np.real(amplitude.conjugate() * amplitude)
# Iteratively integrate to obtain single value
while probs.shape != ():
probs = np.trapz(probs, x = tlist)
return np.abs(probs)
|
|
#Copyright (c) 2017 Vantiv eCommerce
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
from mock import *
class TestConfigOverride(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def testUserOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, user='Dan')
match_re = RegexMatcher(".*?<user>Dan</user>.*?")
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testPasswordOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, password = 'customPassword')
match_re = RegexMatcher(".*?<password>customPassword</password>.*?")
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testVersionOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, version="3.14")
match_re = RegexMatcher('.*?version="9.12".*?')
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testMerchantIdOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, merchantId="98765")
match_re = RegexMatcher('.*?merchantId="98765".*?')
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testReportGroupOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, reportGroup="testReports")
match_re = RegexMatcher('.*?reportGroup="testReports".*?')
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testTimeoutOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, timeout=42)
comm.http_post.assert_called_once_with(ANY, url=ANY, proxy=ANY, timeout=42)
def testUrlOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, url="www.customurl.com")
comm.http_post.assert_called_once_with(ANY, url="www.customurl.com", proxy=ANY, timeout=ANY)
def testProxyOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, proxy="bumpyproxy:1776")
comm.http_post.assert_called_once_with(ANY, url=ANY, proxy="bumpyproxy:1776", timeout=ANY)
def testMissingUser(self):
config2 = Configuration()
config2.password = 'Pass'
config2.merchantId = '12345'
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config2)
comm.http_post = MagicMock()
with self.assertRaises(AttributeError):
litleOnlineRequest(config2)
def testMissingPassword(self):
config3 = Configuration()
config3.username = 'User'
config3.merchantId = '12345'
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config3)
comm.http_post = MagicMock()
with self.assertRaises(AttributeError):
litleOnlineRequest(config3)
def testMissingId(self):
config4 = Configuration()
config4.username = 'User'
config4.password = 'Pass'
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config4)
comm.http_post = MagicMock()
with self.assertRaises(AttributeError):
litleOnlineRequest(config4)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestConfigOverride)
return suite
if __name__ =='__main__':
unittest.main()
|
|
"""Project
"""
from common import *
from mergedict import merge_recurse_inplace
from storage import ShelveStorage as Storage
import clang.cindex
import compdb
import logging
import math
import multiprocessing as mp
import os
import parser
import pprint
import re
import sys
import time
import util
def parse_one(src, compiler, *args, **kwargs):
logging.debug('parsing {} with {}'.format(src, args))
return parser.parse(src, *args, **kwargs)
@util.measure
def apply_parse(args, **kwargs):
logging.debug('apply_parse: {}, **kwargs: {}'.format(args, kwargs))
src, others = args
return parse_one(src, *others, **kwargs)
class Project:
def __init__(self, builddir, basedir):
self.builddir = builddir
self.basedir = basedir
if not self.basedir.endswith('/'):
self.basedir += '/'
@util.measure
def scan(self):
sources = []
processed = set()
re_src = re.compile(r'.*/(.*\.(c|cpp|cc))$')
for directory, arg_string in compdb.get_all_compile_commands(self.builddir):
args = arg_string.split()
compiler = args[0]
arguments = args[1:]
src = [x for x in arguments if re_src.match(x)]
assert(len(src) == 1)
src = src[0]
others = [x for x in arguments if not re_src.match(x)]
if src.endswith('.cpp') or src.endswith('.cc'):
others.extend(['-x' ,'c++'])
extensions = ('.hpp', '.h')
base, ext = os.path.splitext(src)
headers = [(base + ext, others)
for ext in extensions
if os.path.isfile(base + ext)]
sources.append((src, others))
sources.extend(headers)
return sources
def scan_modified(self, scanned_list, files):
if not files or not scanned_list:
return None
scanned_list_new = []
for src, others in scanned_list:
if self.has_file_modified_p(files, src):
scanned_list_new.append((src, others))
return scanned_list_new
@staticmethod
def get_files_from_db(dbname):
if os.path.exists(dbname):
parsed_dict = Storage(dbname, 'r')
files = parsed_dict.get(FILES, None)
logging.debug('get_files_from_db: dbname: {}, files: {}'.format(dbname, files))
parsed_dict.close()
return files
else:
return None
@staticmethod
def has_file_modified_p(files, src):
if os.path.exists(src):
time_file_modified = os.path.getmtime(src)
if src in files:
logging.debug('has_file_modified_p: {} > {}'.format(time_file_modified, files[src]))
if int(time_file_modified) >= int(files[src]):
return True
return False
@util.measure
def parse_all(self, sources, **kwargs):
# from http://eli.thegreenplace.net/2012/01/16/python-parallelizing-cpu-bound-tasks-with-multiprocessing
exclude_filters = '/usr/include'
sources = filter(lambda s: not s[0].startswith(exclude_filters), sources)
sources = filter(lambda s: not s[0].endswith('.c'), sources)
# sources = filter(lambda s: s[0].endswith('hello.cpp'), sources)
def worker(dirname, jobs, out_q):
result_dict = {}
files = {}
for job in jobs:
filename = job[0]
result_dict[filename] = apply_parse(job, basedir = dirname, **kwargs)
files[filename] = time.time()
parsed_dict = {}
for result in result_dict.values():
merge_recurse_inplace(parsed_dict, result)
parsed_dict[FILES] = files
out_q.put(parsed_dict)
out_q = mp.Queue()
nprocs = mp.cpu_count()
jobs = sources
chunksize = int(math.ceil(len(jobs) / float(nprocs)))
procs = []
for i in range(nprocs):
p = mp.Process(
target=worker,
args=(self.basedir, jobs[chunksize * i:chunksize * (i + 1)],
out_q))
procs.append(p)
p.start()
parsed_dict = {}
for i in range(nprocs):
result = out_q.get()
merge_recurse_inplace(parsed_dict, result)
for p in procs:
p.join()
parsed_dict['basedir'] = self.basedir
return parsed_dict
def parse_all_single(self, sources, **kwargs):
pp = pprint.PrettyPrinter(indent=4)
parsed_dict = {}
parsed_dict[FILES] = {}
for job in sources:
filename = job[0]
result = apply_parse(job, basedir = self.basedir, **kwargs)
result[FILES] = {}
result[FILES][filename] = time.time()
merge_recurse_inplace(parsed_dict, result)
parsed_dict['basedir'] = self.basedir
return parsed_dict
if __name__ == '__main__':
libclang_set_library_file()
logging.basicConfig(level=logging.INFO)
assert(len(sys.argv) > 1)
builddir = os.path.abspath(sys.argv[1])
basedir = os.path.abspath(sys.argv[2])
action = None
if len(sys.argv) == 4:
action = sys.argv[3]
project = Project(builddir, basedir)
parsed_dict = {}
import pprint
pp = pprint.PrettyPrinter(indent=4)
dbname = 'stags.db'
if action == 'scan':
pp.pprint(project.scan())
sys.exit(0)
elif action == 'new':
files = project.get_files_from_db(dbname)
if files:
scanned_list = project.scan()
pp.pprint(project.scan_modified(scanned_list, files))
else:
print('{} has no file modification data'.format(dbname))
sys.exit(0)
elif action == 'parse':
scanned_list = project.scan()
files = project.get_files_from_db(dbname)
pp.pprint(files)
if files:
scanned_list = project.scan_modified(scanned_list, files)
logging.debug('scanned_list: {}'.format([x[0] for x in scanned_list]))
parsed_dict = project.parse_all(scanned_list)
pp.pprint('Parsed {} in {} with keys'.format(len(parsed_dict), builddir))
pp.pprint(parsed_dict.keys())
elif action == 'parse_single':
scanned_list = project.scan()
files = project.get_files_from_db(dbname)
if files:
scanned_list = project.scan_modified(scanned_list, files)
parsed_dict = project.parse_all_single(scanned_list)
pp.pprint('Parsed {} in {} with keys'.format(len(parsed_dict), builddir))
pp.pprint(parsed_dict.keys())
storage = Storage(dbname, writeback=True)
storage_update = util.measure(storage.update)
merge_recurse_inplace(storage, parsed_dict, Storage)
storage.close()
|
|
# -*- coding: utf-8 -*-
"""Fake data generator.
To use:
1. Install fake-factory.
pip install fake-factory
2. Create your OSF user account
3. Run the script, passing in your username (email).
::
python -m scripts.create_fakes --user fred@cos.io
This will create 3 fake public projects, each with 3 fake contributors (with
you as the creator).
"""
from __future__ import print_function
import sys
import argparse
import logging
from modularodm.query.querydialect import DefaultQueryDialect as Q
from faker import Factory
from framework.auth import Auth
from website.app import init_app
from website import models, security
from framework.auth import utils
from tests.factories import UserFactory, ProjectFactory, NodeFactory
from faker.providers import BaseProvider
class Sciencer(BaseProvider):
# Science term Faker Provider created by @csheldonhess
# https://github.com/csheldonhess/FakeConsumer/blob/master/faker/providers/science.py
word_list = ('abiosis', 'abrade', 'absorption', 'acceleration', 'accumulation',
'acid', 'acidic', 'activist', 'adaptation', 'agonistic', 'agrarian', 'airborne',
'alchemist', 'alignment', 'allele', 'alluvial', 'alveoli', 'ambiparous',
'amphibian', 'amplitude', 'analysis', 'ancestor', 'anodize', 'anomaly',
'anther', 'antigen', 'apiary', 'apparatus', 'application', 'approximation',
'aquatic', 'aquifer', 'arboreal', 'archaeology', 'artery', 'assessment',
'asteroid', 'atmosphere', 'atomic', 'atrophy', 'attenuate', 'aven', 'aviary',
'axis', 'bacteria', 'balance', 'bases', 'biome', 'biosphere', 'black hole',
'blight', 'buoyancy', 'calcium', 'canopy', 'capacity', 'capillary', 'carapace',
'carcinogen', 'catalyst', 'cauldron', 'celestial', 'cells', 'centigrade',
'centimeter', 'centrifugal', 'chemical reaction', 'chemicals', 'chemistry',
'chlorophyll', 'choked', 'chromosome', 'chronic', 'churn', 'classification',
'climate', 'cloud', 'comet', 'composition', 'compound', 'compression',
'condensation', 'conditions', 'conduction', 'conductivity', 'conservation',
'constant', 'constellation', 'continental', 'convection', 'convention', 'cool',
'core', 'cosmic', 'crater', 'creature', 'crepuscular', 'crystals', 'cycle', 'cytoplasm',
'dampness', 'data', 'decay', 'decibel', 'deciduous', 'defoliate', 'density',
'denude', 'dependency', 'deposits', 'depth', 'desiccant', 'detritus',
'development', 'digestible', 'diluted', 'direction', 'disappearance', 'discovery',
'dislodge', 'displace', 'dissection', 'dissolution', 'dissolve', 'distance',
'diurnal', 'diverse', 'doldrums', 'dynamics', 'earthquake', 'eclipse', 'ecology',
'ecosystem', 'electricity', 'elements', 'elevation', 'embryo', 'endangered',
'endocrine', 'energy', 'entropy', 'environment', 'enzyme', 'epidermis', 'epoch',
'equilibrium', 'equine', 'erosion', 'essential', 'estuary', 'ethical', 'evaporation',
'event', 'evidence', 'evolution', 'examination', 'existence', 'expansion',
'experiment', 'exploration ', 'extinction', 'extreme', 'facet', 'fault', 'fauna',
'feldspar', 'fermenting', 'fission', 'fissure', 'flora', 'flourish', 'flowstone',
'foliage', 'food chain', 'forage', 'force', 'forecast', 'forensics', 'formations',
'fossil fuel', 'frequency', 'friction', 'fungi', 'fusion', 'galaxy', 'gastric',
'geo-science', 'geothermal', 'germination', 'gestation', 'global', 'gravitation',
'green', 'greenhouse effect', 'grotto', 'groundwater', 'habitat', 'heat', 'heavens',
'hemisphere', 'hemoglobin', 'herpetologist', 'hormones', 'host', 'humidity', 'hyaline',
'hydrogen', 'hydrology', 'hypothesis', 'ichthyology', 'illumination', 'imagination',
'impact of', 'impulse', 'incandescent', 'indigenous', 'inertia', 'inevitable', 'inherit',
'inquiry', 'insoluble', 'instinct', 'instruments', 'integrity', 'intelligence',
'interacts with', 'interdependence', 'interplanetary', 'invertebrate', 'investigation',
'invisible', 'ions', 'irradiate', 'isobar', 'isotope', 'joule', 'jungle', 'jurassic',
'jutting', 'kilometer', 'kinetics', 'kingdom', 'knot', 'laser', 'latitude', 'lava',
'lethal', 'life', 'lift', 'light', 'limestone', 'lipid', 'lithosphere', 'load',
'lodestone', 'luminous', 'luster', 'magma', 'magnet', 'magnetism', 'mangrove', 'mantle',
'marine', 'marsh', 'mass', 'matter', 'measurements', 'mechanical', 'meiosis', 'meridian',
'metamorphosis', 'meteor', 'microbes', 'microcosm', 'migration', 'millennia', 'minerals',
'modulate', 'moisture', 'molecule', 'molten', 'monograph', 'monolith', 'motion',
'movement', 'mutant', 'mutation', 'mysterious', 'natural', 'navigable', 'navigation',
'negligence', 'nervous system', 'nesting', 'neutrons', 'niche', 'nocturnal',
'nuclear energy', 'numerous', 'nurture', 'obsidian', 'ocean', 'oceanography', 'omnivorous',
'oolites (cave pearls)', 'opaque', 'orbit', 'organ', 'organism', 'ornithology',
'osmosis', 'oxygen', 'paleontology', 'parallax', 'particle', 'penumbra',
'percolate', 'permafrost', 'permutation', 'petrify', 'petrograph', 'phenomena',
'physical property', 'planetary', 'plasma', 'polar', 'pole', 'pollination',
'polymer', 'population', 'precipitation', 'predator', 'prehensile', 'preservation',
'preserve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals', 'thermometer', 'thrust', 'torque', 'toxin', 'trade winds', 'pterodactyl',
'transformation tremors', 'tropical', 'umbra', 'unbelievable', 'underwater', 'unearth',
'unique', 'unite', 'unity', 'universal', 'unpredictable', 'unusual', 'ursine', 'vacuole',
'valuable', 'vapor', 'variable', 'variety', 'vast', 'velocity', 'ventifact', 'verdant',
'vespiary', 'viable', 'vibration', 'virus', 'viscosity', 'visible', 'vista', 'vital',
'vitreous', 'volt', 'volume', 'vulpine', 'wave', 'wax', 'weather', 'westerlies', 'wetlands',
'whitewater', 'xeriscape', 'xylem', 'yield', 'zero-impact', 'zone', 'zygote', 'achieving',
'acquisition of', 'an alternative', 'analysis of', 'approach toward', 'area', 'aspects of',
'assessment of', 'assuming', 'authority', 'available', 'benefit of', 'circumstantial',
'commentary', 'components', 'concept of', 'consistent', 'corresponding', 'criteria',
'data', 'deduction', 'demonstrating', 'derived', 'distribution', 'dominant', 'elements',
'equation', 'estimate', 'evaluation', 'factors', 'features', 'final', 'function',
'initial', 'instance ', 'interpretation of', 'maintaining ', 'method', 'perceived',
'percent', 'period', 'positive', 'potential', 'previous', 'primary', 'principle',
'procedure', 'process', 'range', 'region', 'relevant', 'required', 'research',
'resources', 'response', 'role', 'section', 'select', 'significant ', 'similar',
'source', 'specific', 'strategies', 'structure', 'theory', 'transfer', 'variables',
'corvidae', 'passerine', 'Pica pica', 'Chinchilla lanigera', 'Nymphicus hollandicus',
'Melopsittacus undulatus', )
def science_word(cls):
"""
:example 'Lorem'
"""
return cls.random_element(cls.word_list)
def science_words(cls, nb=3):
"""
Generate an array of random words
:example array('Lorem', 'ipsum', 'dolor')
:param nb how many words to return
"""
return [cls.science_word() for _ in range(0, nb)]
def science_sentence(cls, nb_words=6, variable_nb_words=True):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words around how many words the sentence should contain
:param variable_nb_words set to false if you want exactly $nbWords returned,
otherwise $nbWords may vary by +/-40% with a minimum of 1
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
words = cls.science_words(nb_words)
words[0] = words[0].title()
return " ".join(words) + '.'
def science_sentences(cls, nb=3):
"""
Generate an array of sentences
:example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')
:param nb how many sentences to return
:return list
"""
return [cls.science_sentence() for _ in range(0, nb)]
def science_paragraph(cls, nb_sentences=3, variable_nb_sentences=True):
"""
Generate a single paragraph
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param nb_sentences around how many sentences the paragraph should contain
:param variable_nb_sentences set to false if you want exactly $nbSentences returned,
otherwise $nbSentences may vary by +/-40% with a minimum of 1
:return string
"""
if nb_sentences <= 0:
return ''
if variable_nb_sentences:
nb_sentences = cls.randomize_nb_elements(nb_sentences)
return " ".join(cls.science_sentences(nb_sentences))
def science_paragraphs(cls, nb=3):
"""
Generate an array of paragraphs
:example array($paragraph1, $paragraph2, $paragraph3)
:param nb how many paragraphs to return
:return array
"""
return [cls.science_paragraph() for _ in range(0, nb)]
def science_text(cls, max_nb_chars=200):
"""
Generate a text string.
Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param max_nb_chars Maximum number of characters the text should contain (minimum 5)
:return string
"""
text = []
if max_nb_chars < 5:
raise ValueError('text() can only generate text of at least 5 characters')
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars once;
while size < max_nb_chars:
word = (' ' if size else '') + cls.science_word()
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += '.'
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the $max_nb_chars once
while size < max_nb_chars:
sentence = (' ' if size else '') + cls.science_sentence()
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the $max_nb_chars once
while size < max_nb_chars:
paragraph = ('\n' if size else '') + cls.science_paragraph()
text.append(paragraph)
size += len(paragraph)
text.pop()
return "".join(text)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.ERROR)
fake = Factory.create()
fake.add_provider(Sciencer)
def create_fake_user():
email = fake.email()
name = fake.name()
parsed = utils.impute_names(name)
user = UserFactory.build(username=email, fullname=name,
is_registered=True, is_claimed=True,
verification_key=security.random_string(15),
date_registered=fake.date_time(),
emails=[email],
**parsed
)
user.set_password('faker123')
user.save()
logger.info('Created user: {0} <{1}>'.format(user.fullname, user.username))
return user
def parse_args():
parser = argparse.ArgumentParser(description='Create fake data.')
parser.add_argument('-u', '--user', dest='user', required=True)
parser.add_argument('--nusers', dest='n_users', type=int, default=3)
parser.add_argument('--nprojects', dest='n_projects', type=int, default=3)
parser.add_argument('--ncomponents', dest='n_components', type=int, default=0)
parser.add_argument('-p', '--privacy', dest="privacy", type=str, default='private', choices=['public', 'private'])
parser.add_argument('-n', '--name', dest='name', type=str, default=None)
parser.add_argument('-t', '--tags', dest='n_tags', type=int, default=5)
return parser.parse_args()
def create_fake_project(creator, n_users, privacy, n_components, name, n_tags):
auth = Auth(user=creator)
project_title = name if name else fake.science_sentence()
project = ProjectFactory.build(title=project_title, description=fake.science_paragraph(), creator=creator)
project.set_privacy(privacy)
for _ in range(n_users):
contrib = create_fake_user()
project.add_contributor(contrib, auth=auth)
for _ in range(n_components):
NodeFactory(project=project, title=fake.science_sentence(), description=fake.science_paragraph(), creator=creator)
for _ in range(n_tags):
project.add_tag(fake.science_word(), auth=auth)
project.save()
logger.info('Created project: {0}'.format(project.title))
return project
def main():
args = parse_args()
creator = models.User.find(Q('username', 'eq', args.user))[0]
for i in range(args.n_projects):
name = args.name + str(i) if args.name else ''
create_fake_project(creator, args.n_users, args.privacy, args.n_components, name, args.n_tags)
print('Created {n} fake projects.'.format(n=args.n_projects))
sys.exit(0)
if __name__ == '__main__':
app = init_app('website.settings', set_backends=True, routes=True)
main()
|
|
"creatures.py - Pyro creatures"
from util import *
import items
import dungeons
import astar
class Bite(items.MeleeAttackType):
name = "bite"
verbs = lang.verbs_bite # no damage, hit, crit
verbs_sp = lang.verbs_bite_2p
damage = "1d4"
class Claw(items.MeleeAttackType):
name = "claw"
verbs = lang.verbs_claw
verbs_sp = lang.verbs_claw_2p
damage = "1d4"
class AI(object):
"Artificial intelligence for mobs."
def __init__(self, mob):
self.mob = mob
class Berserker(AI):
"""
This AI routine wanders aimlessly until it sees the @. Then it charges
and fights to the death.
"""
def __init__(self, mob):
AI.__init__(self, mob)
self.target = None
self.tx, self.ty, self.dir = None, None, None
self.state = "wander"
def Update(self):
"Take one action"
pc = Global.pc
#TODO: Generalize this to follow any mob, not just PC.
if self.state == "wander":
if self.dir == None:
self.PickNewDirection()
if self.mob.can_see_pc:
self.state = "chase"
return
else:
blocker = self.mob.SquareBlocked(self.mob.x+self.dx, self.mob.y+self.dy)
if blocker is None:
self.mob.Walk(self.dx, self.dy)
return
# The square is blocked; see if it's an openable door:
if isinstance(blocker, dungeons.Door):
if self.mob.can_open_doors:
if not blocker.Open(self.mob):
# Tried and failed to open the door; waste some time:
self.mob.Walk(0, 0)
return
self.PickNewDirection()
return
elif self.state == "chase":
if adjacent(self.mob, pc):
self.mob.Attack(pc)
return
if self.mob.can_see_pc:
self.tx, self.ty = pc.x, pc.y
else:
if (self.mob.x, self.mob.y) == (self.tx, self.ty):
# We're at the last place we saw the @, and we still can't see him:
log("%s lost sight of its prey." % self.mob.name)
self.state = "wander"
return
# We can see the PC, but are not in melee range: use A*:
path = astar.path(self.mob.x, self.mob.y, self.tx, self.ty,
self.mob.PathfindPass, max_length=10)
if path:
dx, dy = path[0][0] - self.mob.x, path[0][1] - self.mob.y
self.mob.Walk(dx, dy)
log("%s found a path from (%s, %s) to (%s, %s) %s." %
(self.mob.name, self.mob.x, self.mob.y, self.tx, self.ty, path))
return
else:
log("%s failed pathfinding." % self.mob.name)
# Pathfinding failed, but we can see the @...just sit there and be mad:
self.mob.Walk(0, 0)
return
def PickNewDirection(self):
try:
self.dir = choice([d for d in range(9) if d != 4
and not self.mob.SquareBlocked(
self.mob.x+offsets[d][0],
self.mob.y+offsets[d][1])])
self.dx, self.dy = offsets[self.dir]
return True
except IndexError:
# No options for movement:
self.mob.Walk(0, 0)
return False
class Creature(object):
"An animate object."
name = "Generic Creature" # If this is seen in-game, it's a bug.
can_open_doors = False
is_pc, can_see_pc, pc_can_see = False, False, False
# Default stats:
hp_max, mp_max = 10, 0
hp, mp = hp_max, mp_max
tile = "@"
color = c_Magenta
AIType = Berserker
unique = False
dead = False
level = 9999 # By default won't be generated
rarity = 1.0
natural_armor = 0
vision_radius = 8
free_motion = False
friendly = False
age = 0 # Strictly increasing timer for effect durations, regeneration, etc.
heal_timer, mana_timer = 0, 0 # For regeneration
effects = []
def __init__(self):
self.equipped, self.unequipped = [], [] # By default, no equip slots
self.x, self.y, self.current_level = 0, 0, None
self.stats = Stats()
self.inventory = Inventory(self)
if self.AIType:
self.AI = self.AIType(self)
self.move_speed = 100
self.attack_speed = 100
self.cast_speed = 100
self.hp = self.hp_max
self.kill_xp = int(max(self.level+1, 1.5 ** self.level))
if not self.is_pc:
# For now, have every mob drop a level-appropriate item:
self.inventory.Pickup(items.random_item(int_range(self.level, self.level/4.0, 2)))
def Attack(self, target):
# If a weapon is wielded, attack with it:
try:
# TODO: Support dual (or more!) wielding by handling a multi-item return list:
attack = self.ItemsInSlot(lang.equip_slot_meleeweapon)[0].melee_attack
except IndexError:
# Otherwise, randomly choose a natural attack and use it:
attack = weighted_choice(self.attacks)
success = attack.Attempt(self, target)
def CanOccupyTerrain(self, terrain):
"Return whether the mob can enter a square with the given terrain."
if terrain == FLOOR:
return True
return False
def Delay(self, amount):
"Add the specified amount of delay to the creature."
self.timer += delay(amount)
self.age += delay(amount)
def Die(self):
# Creature has been reduced to <=0 hp, or otherwise should die:
self.inventory.DropAll()
self.current_level.RemoveCreature(self)
self.dead = True
def eSTR(self):
"Return the excess strength stat."
return int(self.stats("str") - ceil(self.inventory.TotalWeight()))
def EvasionBonus(self):
return min(self.eSTR(), self.RawEvasionBonus())
def FailedMove(self, mob):
# Something tried to move onto the mob; initiate an attack:
mob.TryAttack(self)
def Heal(self, amount):
"Heal the creature for the given amount."
# Can be overridden for creatures that respond differently to healing (undead etc)
heal_amount = min(amount, self.hp_max - self.hp)
self.hp_max += heal_amount
return heal_amount
def ItemInSlot(self, equip_slot):
"Return the *first* item equipped in the slot, or None if none."
# Not ideal for slots that can be duplicated (e.g. finger)
try:
return self.ItemsInSlot(equip_slot)[0]
except IndexError: return None
def ItemsInSlot(self, equip_slot):
"Return the item(s) currently equipped in a given slot as a (possibly empty) list."
return [item for item in self.equipped if item.equip_slot == equip_slot]
def MeleeDamageBonus(self):
str_bonus = self.stats("str") - 8
try:
weapon_bonus = self.ItemsInSlot(lang.equip_slot_meleeweapon)[0].damage_bonus
except IndexError:
# Nothing is wielded. Maybe include some monk/karate bonus here someday.
weapon_bonus = 0
return str_bonus + weapon_bonus
def MeleeHitBonus(self):
dex_bonus = self.stats("dex") - 8
try:
weapon_bonus = self.ItemsInSlot(lang.equip_slot_meleeweapon)[0].hit_bonus
except IndexError:
# Nothing is wielded. Maybe include some monk/karate bonus here someday.
weapon_bonus = 0
return dex_bonus + weapon_bonus
def MissileHitBonus(self):
# For now it's the same as melee:
return self.MeleeHitBonus()
def Name(self):
return self.name
def PathfindPass(self, x, y):
"Return whether the square is passable for the pathfinder."
b = self.SquareBlocked(x, y)
return (b is None) or (isinstance(b, dungeons.Door) and self.can_open_doors)
def ProtectionBonus(self):
return (self.natural_armor + sum([a.armor_points for a in self.equipped])) / 10.0
def Quaff(self, potion):
"Quaff a potion."
potion.Quaff(self)
def RawEvasionBonus(self):
return self.stats("dex") - 8
def Regenerate(self):
"See if the creature heals any hp/mp."
if self.age >= self.heal_timer:
turns = 30 - self.stats("str")
self.heal_timer = self.age + 1000 * turns
if self.hp < self.hp_max:
self.hp += 1
if self.hp > self.hp_max:
self.hp -= 1
if self.age >= self.mana_timer:
turns = 30 - self.stats("int")
self.mana_timer = self.age + 1000 * turns
if self.mp < self.mp_max:
self.mp += 1
if self.mp > self.mp_max:
self.mp -= 1
def RemoveEffect(self, effect):
"Remove an effect from the mob if it's still there."
try:
self.effects.remove(effect)
effect.Remove(self, silent=True)
except ValueError:
# It wasn't there.
pass
def SquareBlocked(self, x, y):
"Return the first thing, if any, blocking the square."
L = self.current_level
if not (0 < x < L.layout.level_width-1
and 0 < y < L.layout.level_height-1):
# Can't occupy squares outside the level no matter what:
return OUTSIDE_LEVEL
# Check whether another creature is there:
c = L.CreatureAt(x, y)
if c: return c
# Check whether the terrain type is passable:
terrain = L.layout.data[y][x]
if not self.CanOccupyTerrain(terrain):
return WALL
# Check whether there's an impassable feature (e.g. closed door):
feature = L.FeatureAt(x, y)
if feature and not self.CanOccupyTerrain(feature.block_type):
return feature
return None
def TakeDamage(self, amount, type=None, source=None):
# This method can be overridden for special behavior (fire heals elemental, etc)
self.hp -= amount
# Check for death:
if self.hp <= 0:
self.Die()
if source is Global.pc:
Global.pc.GainXP(self.kill_xp)
return amount
def TakeEffect(self, new_effect, duration):
"Apply a temporary or permanent effect to the creature."
if duration is None:
new_effect.expiration = None
else:
new_effect.expiration = self.age + duration
# First remove any effect that is overridden by the new one:
overridden = [e for e in self.effects if new_effect.Overrides(e)]
for e in overridden: self.RemoveEffect(e)
# Now check whether an existing effect overrides this one:
overrides = [e for e in self.effects if e.Overrides(new_effect)]
if not overrides:
new_effect.Apply(self)
self.effects.append(new_effect)
def TryAttack(self, target):
# Mob has tried to move onto another mob; possibly attack.
# This would be the place to abort an attack on a friendly mob, etc.
# TODO: implement the above so mobs won't attack each other
# For now it's hacked:
if self.is_pc or target.is_pc:
self.Attack(target)
def Unequip(self, item, silent=False):
# Unequip the given item if equipped:
try:
self.equipped.remove(item)
self.unequipped.append(item.equip_slot)
if self.is_pc and not silent:
Global.IO.Message(lang.msg_you_unequip % lang.ArticleName("the", item))
item.OnUnequip(self)
return True
except ValueError:
return False
def Update(self):
assert not self.dead
self.UpdateEffects()
self.Regenerate()
self.AI.Update()
def UpdateEffects(self):
"Update any temporary mods on self or carried items."
expired_effects = [e for e in self.effects if e.expiration is not None
and e.expiration < self.age]
for e in expired_effects:
e.Remove(self)
self.effects.remove(e)
# TODO: add item updates too, once that type of effect exists
def Walk(self, dx, dy):
"Try to move the specified amounts."
msg = ""
if dx == dy == 0:
self.Delay(self.move_speed)
return True, msg
blocker = self.SquareBlocked(self.x+dx, self.y+dy)
if blocker:
if not self.free_motion or isinstance(blocker, Creature) or blocker == OUTSIDE_LEVEL:
# Something blocked the mob from moving--
try:
# Let the blocker respond if it can:
msg = blocker.FailedMove(self)
except AttributeError:
pass
return False, msg
self.current_level.MoveCreature(self, self.x + dx, self.y + dy)
self.Delay(self.move_speed)
return True, ""
def Wield(self, item):
"Wield the item as a melee weapon."
# If the item we're wielding is stacked, split one off to wield:
if item.quantity > 1:
stack = item
item = self.inventory.Remove(stack, 1)
self.inventory.Add(item, nostack=True)
try:
# TODO: Ask which to replace if dual-wielding:
wielded = self.ItemsInSlot(lang.equip_slot_meleeweapon)[0]
except IndexError:
wielded = None
if wielded is not None:
# Unequip the old item:
self.Unequip(wielded)
# Remove and re-add it to inventory so it'll stack if it should:
self.inventory.Remove(wielded)
self.inventory.Add(wielded)
# Wield the new weapon:
self.Equip(item)
def Equip(self, item, silent=False):
# Equip the given item if possible:
if item.equip_slot in self.unequipped:
self.equipped.append(item)
self.unequipped.remove(item.equip_slot)
if self.is_pc and not silent:
Global.IO.Message(lang.msg_you_equip % lang.ArticleName("the", item))
item.OnEquip(self)
return True
else:
return False
class Inventory(object):
"Inventory class for creatures and the player."
def __init__(self, mob):
self.mob = mob
self.items = []
self.capacity = mob.stats("str") * 10
def Add(self, item, nostack=False):
for i, L in self.items:
if not nostack and i.StacksWith(item):
i.quantity += item.quantity
letter = L
break
else:
letter = self.NextLetter()
self.items.append((item, letter))
return letter
def CanHold(self, item):
"Return whether the item can be picked up."
return item.Weight() + self.TotalWeight() <= self.Capacity()
def Capacity(self):
return self.capacity
def Drop(self, item, qty=1):
dropped = self.Remove(item, qty)
assert dropped is not None
# If the item was equipped, unequip it first:
if item in self.mob.equipped:
self.mob.Unequip(item)
text = lang.you_unequip_and_drop_item % lang.ArticleName("the", dropped)
else:
text = lang.you_drop_item % lang.ArticleName("the", dropped)
# Put the item on the floor:
self.mob.current_level.AddItem(dropped, self.mob.x, self.mob.y)
return True, text
def DropAll(self):
"Drop all inventory items--e.g. when the mob dies."
for i in self.items:
self.Drop(i[0])
def GetItemByLetter(self, letter):
items = [i[0] for i in self.items if i[1] == letter]
if len(items) == 0:
return None
elif len(items) == 1:
return items[0]
else:
raise IndexError
def Has(self, item):
"Return whether the item exists in inventory."
return item in [i[0] for i in self.items]
def ItemsOfType(self, type, letters=True):
# Verify valid type:
assert len([t for t in items.types if t[0] == type]) != 0
# Return the list of items:
it = [i for i in self.items if i[0].type == type]
it.sort(key=lambda i:i[0])
if letters:
return it
else:
return [i[0] for i in it]
def NextLetter(self):
"Return the first free letter."
taken = [item[1] for item in self.items]
for L in letters:
if L not in taken:
return L
return None
def Num(self):
"Number of items in the inventory."
return len(self.items)
def Pickup(self, item, qty=1):
# If they want to pick up fewer items than are there, split stacks:
no_remove = False
if qty < item.quantity:
new_item = item.Duplicate() # item to be picked up
item.quantity -= qty
new_item.quantity = qty
no_remove = True
else:
new_item = item
if self.CanHold(new_item):
# Add to inventory:
letter = self.Add(new_item)
# If it's sitting on the floor of a level, remove it from there:
if not no_remove and new_item.current_level is not None:
new_item.current_level.Dirty(new_item.x, new_item.y)
new_item.current_level.RemoveItem(new_item)
return True, lang.you_pick_up_item % (lang.ArticleName("the", new_item), letter)
else:
return False, lang.error_too_heavy
def Remove(self, item, qty=1):
"Remove a quantity of an item from inventory, returning the item stack removed."
new_items = []
removed_item = None
for i in self.items:
if i[0] == item:
assert i[0].quantity >= qty # Can't drop more than we have.
if i[0].quantity == qty:
removed_item = i[0]
# If it was equipped, unequip it:
self.mob.Unequip(item)
continue
elif i[0].quantity > qty:
removed_item = deepcopy(i[0])
removed_item.quantity = qty
i[0].quantity -= qty
new_items.append(i)
else:
new_items.append(i)
self.items = new_items
return removed_item
def TotalWeight(self):
return sum([i[0].Weight() for i in self.items])
class StatMod(object):
"A temporary or permanent modification of a stat."
def __init__(self, amount, desc):
self.amount, self.desc = amount, desc
class Stat(object):
"Tracks a single stat."
def __init__(self, abbr, name, value):
self.abbr, self.name, self.base_value = abbr, name, value
self.mods = []
def BaseValue(self):
return self.base_value
def CurrentValue(self):
return self.base_value + sum([mod.amount for mod in self.mods])
def Modify(self, amount, desc="", permanent=False):
if permanent:
self.base_value += amount
else:
mod = StatMod(amount, desc)
# TODO: Only allow one instance with a given desc
self.mods.append(mod)
return mod
class Stats(object):
"Class to handle stat tracking for creatures."
def __init__(self, str=8, dex=8, int=8):
self.stats = {"str": Stat("str", lang.stat_name_str, str),
"dex": Stat("dex", lang.stat_name_dex, dex),
"int": Stat("int", lang.stat_name_int, int)}
def __call__(self, stat, base=False):
"Enables retrieving stats by: creature.stats('str')"
try:
if base:
return self.stats[stat].BaseValue()
else:
return self.stats[stat].CurrentValue()
except KeyError:
raise KeyError("Stat must be in %s." % self.stats.keys())
def Modify(self, stat, amount, desc="", permanent=False):
return self.stats[stat].Modify(amount, desc, permanent)
def Unmodify(self, mod):
for stat in self.stats.values():
try:
stat.mods.remove(mod)
except ValueError:
pass
######################### CREATURE FAMILIES ############################
class Humanoid(Creature):
tile = "h"
color = c_White
def __init__(self):
Creature.__init__(self)
self.unequipped = [
lang.equip_slot_head,
lang.equip_slot_torso,
lang.equip_slot_hands,
lang.equip_slot_waist,
lang.equip_slot_feet,
lang.equip_slot_finger,
lang.equip_slot_finger,
lang.equip_slot_neck,
lang.equip_slot_back,
lang.equip_slot_offhand,
lang.equip_slot_meleeweapon,
lang.equip_slot_missileweapon,
lang.equip_slot_ammo,
]
class Rodent(Creature):
tile = "r"
color = c_yellow
class Kobold(Creature):
tile = "k"
color = c_Green
class Goblin(Humanoid):
tile = "g"
color = c_green
####################### SPECIFIC CREATURE TYPES ########################
class Rat(Rodent):
name = lang.mob_name_rat
color = c_yellow
hp_max = 5
dex, str = 6, 8
level = 1
attacks = [
[Claw("1d2", 100), 2],
[Bite("1d3", 100), 1],
]
desc = lang.mob_desc_rat
class WimpyKobold(Kobold):
name = lang.mob_name_kobold
can_open_doors = True
hp_max = 6
str, dex, int = 2, 6, 3
level = 1
attacks = [[items.Punch("1d3", 100), 1]]
desc = lang.mob_desc_kobold
def __init__(self):
Kobold.__init__(self)
# Some kobolds carry weapons:
if irand(0, 10) < 7:
weapon = weighted_choice([
(items.ShortSword(), 1),
(items.Dagger(), 2),
(items.Club(), 3),
(items.Whip(), 0.5)])
self.inventory.Pickup(weapon)
self.Equip(weapon)
class WimpyGoblin(Goblin):
name = lang.mob_name_goblin
can_open_doors = True
hp_max = 7
level = 2
str, dex, int = 3, 6, 3
desc = lang.mob_desc_goblin
def __init__(self):
Goblin.__init__(self)
# Goblins always carry weapons:
weapon = weighted_choice([
(items.ShortSword(), 3),
(items.Club(), 4),
(items.LongSword(), 1)])
self.inventory.Pickup(weapon)
self.Equip(weapon)
class Wolf(Creature):
name = lang.mob_name_wolf
tile = "d"
color = c_White
hp_max = 7
level = 2
str, dex, int = 5, 7, 1
attacks = [(Bite("1d6", 100), 1)]
move_speed = 110
desc = lang.mob_desc_wolf
class Imp(Creature):
name = lang.mob_name_imp
tile = "i"
color = c_Red
hp_max = 4
str, dex, int = 2, 10, 9
move_speed = 110
attacks = [(Claw("1d3", 160), 1)]
level = 3
desc = lang.mob_desc_imp
class Ogre(Humanoid):
name = lang.mob_name_ogre
tile = "O"
color = c_Yellow
can_open_doors = True
hp_max = 15
str, dex, int = 14, 6, 3
level = 4
move_speed = 80
attacks = [[items.Punch("1d3", 80), 1]]
desc = lang.mob_desc_ogre
all = [Rat, WimpyKobold, WimpyGoblin, Wolf, Imp, Ogre]
def RandomMob(level):
"Create and return a mob appropriate to the given dungeon level."
mobs = [(mob, mob.rarity) for mob in all if -1 <= level - mob.level <= 1]
mob = weighted_choice(mobs)
return mob()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adagrad_update_numpy(param, accum, g_t, lr=0.001, epsilon=1e-7):
accum_t = accum + g_t * g_t
param_t = param - lr * g_t / (np.sqrt(accum_t) + epsilon)
return param_t, accum_t
def sparse_adagrad_update_numpy(param,
accum,
gindexs,
gvalues,
lr=0.001,
epsilon=1e-7):
accum_t = copy.deepcopy(accum)
param_t = copy.deepcopy(param)
# first loop accumulates repeated indices if necessary.
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
accum_t[gindex] = accum_t[gindex] + gvalue * gvalue
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
param_t[gindex] = param_t[gindex] - lr * gvalue / (
np.sqrt(accum_t[gindex]) + epsilon)
return param_t, accum_t
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self, use_callable_params=False):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.Adagrad(learning_rate)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, 3.0)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, 3.0)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasic(self):
self.doTestBasic()
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_callable_params=True)
def testBasicWithLearningRateDecay(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
ada_opt = adagrad.Adagrad(learning_rate, decay=decay)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, lr_np)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, lr_np)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
ada_opt = adagrad.Adagrad(lr_schedule)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, lr_np)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, lr_np)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0], [3.0, 4.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], var0.eval(), atol=0.01)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = constant_op.constant(3.0)
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, learning_rate)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 3.0, 4.0], var1.eval())
accum0_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype)
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
var0_np, accum0_np = sparse_adagrad_update_numpy(
var0_np, accum0_np, grads0_np_indices,
grads0_np[grads0_np_indices], learning_rate)
var1_np, accum1_np = sparse_adagrad_update_numpy(
var1_np, accum1_np, grads1_np_indices,
grads1_np[grads1_np_indices], learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = resource_variable_ops.ResourceVariable(
var_np, dtype=dtype)
aggregated_update_var = resource_variable_ops.ResourceVariable(
var_np, dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
@test_util.run_deprecated_v1
def testSparseRepeatedIndicesByEmbeddingLookUp(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = lambda: math_ops.reduce_sum( # pylint: disable=g-long-lambda
embedding_ops.embedding_lookup(var_repeated, [0, 0])) # pylint: disable=cell-var-from-loop
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = lambda: 2 * math_ops.reduce_sum( # pylint: disable=g-long-lambda
embedding_ops.embedding_lookup(var_aggregated, [0])) # pylint: disable=cell-var-from-loop
update_op_repeated = adagrad.Adagrad(2.0).minimize(
loss_repeated, var_list=[var_repeated])
update_op_aggregated = adagrad.Adagrad(2.0).minimize(
loss_aggregated, var_list=[var_aggregated])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
@test_util.run_deprecated_v1
def testSparseStability(self):
for dtype in [dtypes.half]:
with self.cached_session():
shape = [1, 6]
var0_np = np.array([[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257, -0.0105945
]],
dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
grads0_np = np.array([[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05, -8.4877e-05,
-9.48906e-05
]],
dtype=dtype.as_numpy_dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np), constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.Adagrad(1.0)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), var0.eval())
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEqual(slot0.shape, var0.shape)
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEqual(slot1.shape, var1.shape)
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
for _ in range(3):
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, learning_rate)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testConstructAdagradWithLR(self):
opt = adagrad.Adagrad(lr=1.0)
opt_2 = adagrad.Adagrad(learning_rate=0.1, lr=1.0)
opt_3 = adagrad.Adagrad(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testConstructAdagradWithEpsilonValues(self):
opt = adagrad.Adagrad(epsilon=None)
config = opt.get_config()
self.assertEqual(config["epsilon"], 1e-7)
opt = adagrad.Adagrad(epsilon=1e-6)
config = opt.get_config()
self.assertEqual(config["epsilon"], 1e-6)
with self.assertRaisesRegexp(ValueError,
"epsilon must be larger than 1e-7"):
opt = adagrad.Adagrad(epsilon=1e-8)
if __name__ == "__main__":
test.main()
|
|
import os
import os.path
import sys
import functools
import time
import imp
import code
import platform
import threading
import collections
import weakref
import gevent
import gevent.pool
import gevent.socket
import gevent.threadpool
import gevent.greenlet
import faststat
from support import context
from support import ll
ml = ll.LLogger()
# TODO: investigate replacing curtime with nanotime
# (mostly used in Threadpool dispatch
if hasattr(time, "perf_counter"):
curtime = time.perf_counter # 3.3
elif platform.system() == "Windows":
curtime = time.clock
else:
curtime = time.time
class Greenlet(gevent.Greenlet):
'''
A subclass of gevent.Greenlet which adds additional members:
- locals: a dict of variables that are local to the "spawn tree" of
greenlets
- spawner: a weak-reference back to the spawner of the
greenlet
- stacks: a record of the stack at which the greenlet was
spawned, and ancestors
'''
def __init__(self, f, *a, **kw):
super(Greenlet, self).__init__(f, *a, **kw)
spawner = self.spawn_parent = weakref.proxy(gevent.getcurrent())
if not hasattr(spawner, 'locals'):
spawner.locals = {}
self.locals = spawner.locals
stack = []
cur = sys._getframe()
while cur:
stack.extend((cur.f_code, cur.f_lineno))
cur = cur.f_back
self.stacks = (tuple(stack),) + getattr(spawner, 'stacks', ())[:10]
def get_spawntree_local(name):
"""
Essentially provides dynamic scope lookup for programming aspects
that cross greenlet borders. Be wary of overusing this
functionality, as it effectively constitutes mutating global state
which can lead to race conditions and architectural problems.
"""
locals = getattr(gevent.getcurrent(), 'locals', None)
if locals:
return locals.get(name)
return None
def set_spawntree_local(name, val):
"""
Similar to get_spawntree_local except that it allows setting these
values. Again, be wary of overuse.
"""
cur = gevent.getcurrent()
if not hasattr(cur, 'locals'):
cur.locals = {}
cur.locals[name] = val
def unset_spawntree_local(name):
'''
Delete a variable from the spawntree.
'''
locals = getattr(gevent.getcurrent(), 'locals', None)
if locals:
del locals[name]
def staggered_retries(run, *a, **kw):
"""
A version of spawn that will block will it is done
running the function, and which will call the function
repeatedly as time progresses through the timeouts list.
Best used for idempotent network calls (e.g. HTTP GETs).
e.g.::
user_data = async.staggered_retries(get_data, max_results,
latent_data_ok, public_credential_load,
timeouts_secs=[0.1, 0.5, 1, 2])
returns None on timeout.
"""
ctx = context.get_context()
ready = gevent.event.Event()
ready.clear()
def callback(source):
if source.successful():
ready.set()
if 'timeouts_secs' in kw:
timeouts_secs = kw.pop('timeouts_secs')
else:
timeouts_secs = [0.05, 0.1, 0.15, 0.2]
if timeouts_secs[0] > 0:
timeouts_secs.insert(0, 0)
gs = gevent.spawn(run, *a, **kw)
gs.link_value(callback)
running = [gs]
for i in range(1, len(timeouts_secs)):
this_timeout = timeouts_secs[i] - timeouts_secs[i - 1]
if ctx.dev:
this_timeout = this_timeout * 5.0
ml.ld2("Using timeout {0}", this_timeout)
try:
with gevent.Timeout(this_timeout):
ready.wait()
break
except gevent.Timeout:
ml.ld2("Timed out!")
log_rec = ctx.log.critical('ASYNC.STAGGER', run.__name__)
log_rec.failure('timed out after {timeout}',
timeout=this_timeout)
gs = gevent.spawn(run, *a, **kw)
gs.link_value(callback)
running.append(gs)
vals = [l.value for l in running if l.successful()]
for g in running:
g.kill()
if vals:
return vals[0]
else:
return None
def timed(f):
'''
Wrap a function and time all of its execution calls in milliseconds.
'''
fname = os.path.basename(f.__code__.co_filename) or '_'
line_no = repr(f.__code__.co_firstlineno)
name = 'timed.{0}[{1}:{2}](ms)'.format(f.__name__, fname, line_no)
@functools.wraps(f)
def g(*a, **kw):
s = faststat.nanotime()
r = f(*a, **kw)
context.get_context().stats[name].add((faststat.nanotime() - s) / 1e6)
return r
return g
class ThreadPoolDispatcher(object):
def __init__(self, pool, name):
self.pool = pool
self.name = name
def __call__(self, f):
@functools.wraps(f)
def g(*a, **kw):
enqueued = curtime()
ctx = context.get_context()
started = []
def in_thread(*a, **kw):
ml.ld3("In thread {0}", f.__name__)
started.append(curtime())
return f(*a, **kw)
# some modules import things lazily; it is too dangerous
# to run a function in another thread if the import lock is
# held by the current thread (this happens rarely -- only
# if the thread dispatched function is being executed at
# the import time of a module)
if not ctx.cpu_thread_enabled or imp.lock_held():
ret = in_thread(*a, **kw)
elif in_threadpool() is self.pool:
ret = in_thread(*a, **kw)
else:
ctx.stats[self.name + '.depth'].add(1 + len(self.pool))
ret = self.pool.apply_e((Exception,), in_thread, a, kw)
ml.ld3("Enqueued to thread {0}/depth {1}", f.__name__, len(pool))
start = started[0]
duration = curtime() - start
queued = start - enqueued
if hasattr(ret, '__len__') and callable(ret.__len__):
prsize = ret.__len__() # parameter-or-return size
elif a and hasattr(a[0], '__len__') and callable(a[0].__len__):
prsize = a[0].__len__()
else:
prsize = None
_queue_stats(name, f.__name__, queued, duration, prsize)
return ret
g.no_defer = f
return g
def in_threadpool():
'Function to return the threadpool in which code is currently executing (if any.)'
frame = sys._getframe()
while frame.f_back:
frame = frame.f_back
self = frame.f_locals.get('self')
if (isinstance(self, gevent.threadpool.ThreadPool) and
frame.f_code is getattr(getattr(self._worker, "im_func"), "func_code")):
return self
return None
class CPUThread(object):
'''
Manages a single worker thread to dispatch cpu intensive tasks to.
Signficantly less overhead than gevent.threadpool.ThreadPool() since it
uses prompt notifications rather than polling. The trade-off is that only
one thread can be managed this way.
Since there is only one thread, hub.loop.async() objects may be used
instead of polling to handle inter-thread communication.
'''
def __init__(self):
self.in_q = collections.deque()
self.out_q = collections.deque()
self.in_async = None
self.out_async = gevent.get_hub().loop.async()
self.out_q_has_data = gevent.event.Event()
self.out_async.start(self.out_q_has_data.set)
self.worker = threading.Thread(target=self._run)
self.worker.daemon = True
self.stopping = False
self.results = {}
# start running thread / greenlet after everything else is set up
self.worker.start()
self.notifier = gevent.spawn(self._notify)
def _run(self):
# in_cpubound_thread is sentinel to prevent double thread dispatch
context.get_context().thread_locals.in_cpubound_thread = True
try:
self.in_async = gevent.get_hub().loop.async()
self.in_q_has_data = gevent.event.Event()
self.in_async.start(self.in_q_has_data.set)
while not self.stopping:
if not self.in_q:
# wait for more work
self.in_q_has_data.clear()
self.in_q_has_data.wait()
continue
# arbitrary non-preemptive service discipline can go here
# FIFO for now, but we should experiment with others
jobid, func, args, kwargs, enqueued = self.in_q.popleft()
started = curtime()
try:
ret = self.results[jobid] = func(*args, **kwargs)
except Exception as e:
ret = self.results[jobid] = self._Caught(e)
self.out_q.append(jobid)
self.out_async.send()
# keep track of some statistics
queued, duration = started - enqueued, curtime() - started
size = None
# ret s set up above before async send
if hasattr(ret, '__len__') and callable(ret.__len__):
size = len(ret)
_queue_stats('cpu_bound', func.__name__, queued, duration, size)
except:
self._error()
# this may always halt the server process
def apply(self, func, args, kwargs):
done = gevent.event.Event()
self.in_q.append((done, func, args, kwargs, curtime()))
context.get_context().stats['cpu_bound.depth'].add(1 + len(self.in_q))
while not self.in_async:
gevent.sleep(0.01) # poll until worker thread has initialized
self.in_async.send()
done.wait()
res = self.results[done]
del self.results[done]
if isinstance(res, self._Caught):
raise res.err
return res
def _notify(self):
try:
while not self.stopping:
if not self.out_q:
# wait for jobs to complete
self.out_q_has_data.clear()
self.out_q_has_data.wait()
continue
self.out_q.popleft().set()
except:
self._error()
class _Caught(object):
def __init__(self, err):
self.err = err
def __repr__(self):
cn = self.__class__.__name__
return ("<%s@%s in_q:%s out_q:%s>" %
(cn, id(self), len(self.in_q), len(self.out_q)))
def _error(self):
# TODO: something better, but this is darn useful for debugging
import traceback
traceback.print_exc()
ctx = context.get_context()
tl = ctx.thread_locals
if hasattr(tl, 'cpu_bound_thread') and tl.cpu_bound_thread is self:
del tl.cpu_bound_thread
def _queue_stats(qname, fname, queued_ns, duration_ns, size_B=None):
ctx = context.get_context()
fprefix = qname + '.' + fname
ctx.stats[fprefix + '.queued(ms)'].add(queued_ns * 1000)
ctx.stats[fprefix + '.duration(ms)'].add(duration_ns * 1000)
ctx.stats[qname + '.queued(ms)'].add(queued_ns * 1000)
ctx.stats[qname + '.duration(ms)'].add(duration_ns * 1000)
if size_B is not None:
ctx.stats[fprefix + '.len'].add(size_B)
if duration_ns: # may be 0
ctx.stats[fprefix + '.rate(B/ms)'].add(size_B / (duration_ns * 1000.0))
# TODO: make size configurable
io_bound = ThreadPoolDispatcher(gevent.threadpool.ThreadPool(10), 'io_bound')
# N.B. In many cases fcntl could be used as an alternative method of
# achieving non-blocking file io on unix systems
def cpu_bound(f, p=None):
'''
Cause the decorated function to have its execution deferred to a
separate thread to avoid blocking the IO loop in the main thread.
Useful for wrapping encryption or serialization tasks.
Example usage::
@async.cpu_bound
def my_slow_function():
pass
'''
@functools.wraps(f)
def g(*a, **kw):
ctx = context.get_context()
# in_cpubound_thread is sentinel to prevent double-thread dispatch
if (not ctx.cpu_thread_enabled or imp.lock_held()
or getattr(ctx.thread_locals, 'in_cpubound_thread', False)):
return f(*a, **kw)
if not hasattr(ctx.thread_locals, 'cpu_bound_thread'):
ctx.thread_locals.cpu_bound_thread = CPUThread()
ml.ld3("Calling in cpu thread {0}", f.__name__)
return ctx.thread_locals.cpu_bound_thread.apply(f, a, kw)
g.no_defer = f
return g
def cpu_bound_if(p):
'''
Similar to cpu_bound, but should be called with a predicate
parameter which determines whether or not to dispatch to a
cpu_bound thread. The predicate will be passed the same
parameters as the function itself.
Example usage::
# will be deferred to a thread if parameter greater than 16k,
# else run inline
@async.cpu_bound_if(lambda s: len(s) > 16 * 1024)
def my_string_function(data):
pass
'''
def g(f):
f = cpu_bound(f)
@functools.wraps(f)
def h(*a, **kw):
if p(*a, **kw):
return f(*a, **kw)
return f.no_defer(*a, **kw)
return h
return g
def close_threadpool():
tlocals = context.get_context().thread_locals
if hasattr(tlocals, 'cpu_bound_thread'):
ml.ld2("Closing thread pool {0}", id(tlocals.cpu_thread))
cpu_thread = tlocals.cpu_bound_thread
cpu_thread.stopping = True
del tlocals.cpu_bound_thread
def killsock(sock):
"""Attempts to cleanly shutdown a socket. Regardless of cleanliness,
ensures that upon return, the socket is fully closed, catching any
exceptions along the way. A safe and prompt way to dispose of the
socket, freeing system resources.
"""
if hasattr(sock, '_sock'):
ml.ld("Killing socket {0}/FD {1}", id(sock), sock._sock.fileno())
else:
ml.ld("Killing socket {0}", id(sock))
try:
# TODO: better ideas for how to get SHUT_RDWR constant?
sock.shutdown(gevent.socket.SHUT_RDWR)
except gevent.socket.error:
pass # just being nice to the server, don't care if it fails
except Exception as e:
log_rec = context.get_context().log.info("SOCKET", "SHUTDOWN")
log_rec.failure('error ({exc}) shutting down socket: {socket}',
socket=sock, exc=e)
try:
sock.close()
except gevent.socket.error:
pass # just being nice to the server, don't care if it fails
except Exception as e:
log_rec = context.get_context().log.info("SOCKET", "CLOSE")
log_rec.failure('error ({exc}) closing socket: {socket}',
socket=sock, exc=e)
PID = os.getpid()
def check_fork(fn):
"""Hack for Django/gevent interaction to reset after non-gevent fork."""
@functools.wraps(fn)
def wrapper(request):
global PID
if PID != os.getpid():
gevent.get_hub().loop.reinit()
PID = os.getpid()
return fn(request)
return wrapper
# a little helper for running a greenlet-friendly console
# implemented here since it directly references gevent
class GreenConsole(code.InteractiveConsole):
@io_bound
def raw_input(self, prompt=""):
return code.InteractiveConsole.raw_input(self, prompt)
def start_repl(local=None, banner="infra REPL (exit with Ctrl+C)"):
gevent.spawn(GreenConsole().interact, banner)
def greenify(banner="REPL is now greenlet friendly (exit with Ctrl+C)"):
import __main__
GreenConsole(__main__.__dict__).interact(banner)
# The following are imported/aliased for user import convenience
spawn = Greenlet.spawn
sleep = gevent.sleep
Timeout = gevent.Timeout
with_timeout = gevent.with_timeout
nanotime = faststat.nanotime
# end user imports/aliases
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from txamqp.client import Closed
from txamqp.content import Content
from txamqp.testlib import TestBase
from twisted.internet.defer import inlineCallbacks
class QueueTests(TestBase):
"""Tests for 'methods' on the amqp queue 'class'"""
@inlineCallbacks
def test_purge(self):
"""
Test that the purge method removes messages from the queue
"""
channel = self.channel
#setup, declare a queue and add some messages to it:
yield channel.exchange_declare(exchange="test-exchange", type="direct")
yield channel.queue_declare(queue="test-queue", exclusive=True)
yield channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("one"))
channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("two"))
channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("three"))
#check that the queue now reports 3 messages:
reply = yield channel.queue_declare(queue="test-queue")
self.assertEqual(3, reply.message_count)
#now do the purge, then test that three messages are purged and the count drops to 0
reply = yield channel.queue_purge(queue="test-queue");
self.assertEqual(3, reply.message_count)
reply = yield channel.queue_declare(queue="test-queue")
self.assertEqual(0, reply.message_count)
#send a further message and consume it, ensuring that the other messages are really gone
channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("four"))
reply = yield channel.basic_consume(queue="test-queue", no_ack=True)
queue = yield self.client.queue(reply.consumer_tag)
msg = yield queue.get(timeout=1)
self.assertEqual("four", msg.content.body)
#check error conditions (use new channels):
channel = yield self.client.channel(2)
yield channel.channel_open()
try:
#queue specified but doesn't exist:
yield channel.queue_purge(queue="invalid-queue")
self.fail("Expected failure when purging non-existent queue")
except Closed, e:
self.assertChannelException(404, e.args[0])
channel = yield self.client.channel(3)
yield channel.channel_open()
try:
#queue not specified and none previously declared for channel:
yield channel.queue_purge()
self.fail("Expected failure when purging unspecified queue")
except Closed, e:
self.assertConnectionException(530, e.args[0])
#cleanup
other = yield self.connect()
channel = yield other.channel(1)
yield channel.channel_open()
yield channel.exchange_delete(exchange="test-exchange")
@inlineCallbacks
def test_declare_exclusive(self):
"""
Test that the exclusive field is honoured in queue.declare
"""
# TestBase.setUp has already opened channel(1)
c1 = self.channel
# Here we open a second separate connection:
other = yield self.connect()
c2 = yield other.channel(1)
yield c2.channel_open()
#declare an exclusive queue:
yield c1.queue_declare(queue="exclusive-queue", exclusive="True")
try:
#other connection should not be allowed to declare this:
yield c2.queue_declare(queue="exclusive-queue", exclusive="True")
self.fail("Expected second exclusive queue_declare to raise a channel exception")
except Closed, e:
self.assertChannelException(405, e.args[0])
@inlineCallbacks
def test_declare_passive(self):
"""
Test that the passive field is honoured in queue.declare
"""
channel = self.channel
#declare an exclusive queue:
yield channel.queue_declare(queue="passive-queue-1", exclusive="True")
yield channel.queue_declare(queue="passive-queue-1", passive="True")
try:
#other connection should not be allowed to declare this:
yield channel.queue_declare(queue="passive-queue-2", passive="True")
self.fail("Expected passive declaration of non-existant queue to raise a channel exception")
except Closed, e:
self.assertChannelException(404, e.args[0])
@inlineCallbacks
def test_bind(self):
"""
Test various permutations of the queue.bind method
"""
channel = self.channel
yield channel.queue_declare(queue="queue-1", exclusive="True")
#straightforward case, both exchange & queue exist so no errors expected:
yield channel.queue_bind(queue="queue-1", exchange="amq.direct", routing_key="key1")
#bind the default queue for the channel (i.e. last one declared):
yield channel.queue_bind(exchange="amq.direct", routing_key="key2")
#use the queue name where neither routing key nor queue are specified:
yield channel.queue_bind(exchange="amq.direct")
#try and bind to non-existant exchange
try:
yield channel.queue_bind(queue="queue-1", exchange="an-invalid-exchange", routing_key="key1")
self.fail("Expected bind to non-existant exchange to fail")
except Closed, e:
self.assertChannelException(404, e.args[0])
#need to reopen a channel:
channel = yield self.client.channel(2)
yield channel.channel_open()
#try and bind non-existant queue:
try:
yield channel.queue_bind(queue="queue-2", exchange="amq.direct", routing_key="key1")
self.fail("Expected bind of non-existant queue to fail")
except Closed, e:
self.assertChannelException(404, e.args[0])
@inlineCallbacks
def test_delete_simple(self):
"""
Test basic queue deletion
"""
channel = self.channel
#straight-forward case:
yield channel.queue_declare(queue="delete-me")
channel.basic_publish(routing_key="delete-me", content=Content("a"))
channel.basic_publish(routing_key="delete-me", content=Content("b"))
channel.basic_publish(routing_key="delete-me", content=Content("c"))
reply = yield channel.queue_delete(queue="delete-me")
self.assertEqual(3, reply.message_count)
#check that it has gone be declaring passively
try:
yield channel.queue_declare(queue="delete-me", passive="True")
self.fail("Queue has not been deleted")
except Closed, e:
self.assertChannelException(404, e.args[0])
#check attempted deletion of non-existant queue is handled correctly:
channel = yield self.client.channel(2)
yield channel.channel_open()
try:
yield channel.queue_delete(queue="i-dont-exist", if_empty="True")
self.fail("Expected delete of non-existant queue to fail")
except Closed, e:
self.assertChannelException(404, e.args[0])
@inlineCallbacks
def test_delete_ifempty(self):
"""
Test that if_empty field of queue_delete is honoured
"""
channel = self.channel
#create a queue and add a message to it (use default binding):
yield channel.queue_declare(queue="delete-me-2")
yield channel.queue_declare(queue="delete-me-2", passive="True")
channel.basic_publish(routing_key="delete-me-2", content=Content("message"))
#try to delete, but only if empty:
try:
yield channel.queue_delete(queue="delete-me-2", if_empty="True")
self.fail("Expected delete if_empty to fail for non-empty queue")
except Closed, e:
self.assertChannelException(406, e.args[0])
#need new channel now:
channel = yield self.client.channel(2)
yield channel.channel_open()
#empty queue:
reply = yield channel.basic_consume(queue="delete-me-2", no_ack=True)
queue = yield self.client.queue(reply.consumer_tag)
msg = yield queue.get(timeout=1)
self.assertEqual("message", msg.content.body)
yield channel.basic_cancel(consumer_tag=reply.consumer_tag)
#retry deletion on empty queue:
yield channel.queue_delete(queue="delete-me-2", if_empty="True")
#check that it has gone by declaring passively:
try:
yield channel.queue_declare(queue="delete-me-2", passive="True")
self.fail("Queue has not been deleted")
except Closed, e:
self.assertChannelException(404, e.args[0])
@inlineCallbacks
def test_delete_ifunused(self):
"""
Test that if_unused field of queue_delete is honoured
"""
channel = self.channel
#create a queue and register a consumer:
yield channel.queue_declare(queue="delete-me-3")
yield channel.queue_declare(queue="delete-me-3", passive="True")
reply = yield channel.basic_consume(queue="delete-me-3", no_ack=True)
#need new channel now:
channel2 = yield self.client.channel(2)
yield channel2.channel_open()
#try to delete, but only if empty:
try:
yield channel2.queue_delete(queue="delete-me-3", if_unused="True")
self.fail("Expected delete if_unused to fail for queue with existing consumer")
except Closed, e:
self.assertChannelException(406, e.args[0])
yield channel.basic_cancel(consumer_tag=reply.consumer_tag)
yield channel.queue_delete(queue="delete-me-3", if_unused="True")
#check that it has gone by declaring passively:
try:
yield channel.queue_declare(queue="delete-me-3", passive="True")
self.fail("Queue has not been deleted")
except Closed, e:
self.assertChannelException(404, e.args[0])
@inlineCallbacks
def test_close_queue(self):
from txamqp.queue import Closed as QueueClosed
channel = self.channel
reply = yield channel.queue_declare(queue="test-queue")
reply = yield channel.basic_consume(queue="test-queue")
queue = yield self.client.queue(reply.consumer_tag)
d = self.assertFailure(queue.get(timeout=1), QueueClosed)
self.client.close(None)
yield d
|
|
#!/usr/bin/env python
"""The in memory database methods for path handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.builtins import filter
from future.utils import iteritems
from future.utils import iterkeys
from typing import Dict
from typing import Iterable
from typing import Optional
from typing import Sequence
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.util import collection
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class _PathRecord(object):
"""A class representing all known information about particular path.
Attributes:
path_type: A path type of the path that this record corresponds to.
components: A path components of the path that this record corresponds to.
"""
def __init__(self, path_type, components):
self._path_type = path_type
self._components = components
self._path_infos = {}
self._children = set()
@property
def _stat_entries(self):
return {
ts: pi.stat_entry
for ts, pi in iteritems(self._path_infos)
if pi.stat_entry
}
@property
def _hash_entries(self):
return {
ts: pi.hash_entry
for ts, pi in iteritems(self._path_infos)
if pi.hash_entry
}
def AddStatEntry(self, stat_entry, timestamp):
"""Registers stat entry at a given timestamp."""
if timestamp in self._stat_entries:
message = ("Duplicated stat entry write for path '%s' of type '%s' at "
"timestamp '%s'. Old: %s. New: %s.")
message %= ("/".join(self._components), self._path_type, timestamp,
self._stat_entries[timestamp], stat_entry)
raise db.Error(message)
if timestamp not in self._path_infos:
path_info = rdf_objects.PathInfo(
path_type=self._path_type,
components=self._components,
timestamp=timestamp,
stat_entry=stat_entry)
self.AddPathInfo(path_info)
else:
self._path_infos[timestamp].stat_entry = stat_entry
def GetStatEntries(self):
return self._stat_entries.items()
def AddHashEntry(self, hash_entry, timestamp):
"""Registers hash entry at a given timestamp."""
if timestamp in self._hash_entries:
message = ("Duplicated hash entry write for path '%s' of type '%s' at "
"timestamp '%s'. Old: %s. New: %s.")
message %= ("/".join(self._components), self._path_type, timestamp,
self._hash_entries[timestamp], hash_entry)
raise db.Error(message)
if timestamp not in self._path_infos:
path_info = rdf_objects.PathInfo(
path_type=self._path_type,
components=self._components,
timestamp=timestamp,
hash_entry=hash_entry)
self.AddPathInfo(path_info)
else:
self._path_infos[timestamp].hash_entry = hash_entry
def GetHashEntries(self):
return self._hash_entries.items()
def ClearHistory(self):
self._path_infos = {}
def AddPathInfo(self, path_info):
"""Updates existing path information of the path record."""
if self._path_type != path_info.path_type:
message = "Incompatible path types: `%s` and `%s`"
raise ValueError(message % (self._path_type, path_info.path_type))
if self._components != path_info.components:
message = "Incompatible path components: `%s` and `%s`"
raise ValueError(message % (self._components, path_info.components))
if path_info.timestamp in self._path_infos:
raise ValueError("PathInfo with timestamp %r was added before." %
path_info.timestamp)
new_path_info = path_info.Copy()
if new_path_info.timestamp is None:
new_path_info.timestamp = rdfvalue.RDFDatetime.Now()
self._path_infos[new_path_info.timestamp] = new_path_info
def AddChild(self, path_info):
"""Makes the path aware of some child."""
if self._path_type != path_info.path_type:
message = "Incompatible path types: `%s` and `%s`"
raise ValueError(message % (self._path_type, path_info.path_type))
if self._components != path_info.components[:-1]:
message = "Incompatible path components, expected `%s` but got `%s`"
raise ValueError(message % (self._components, path_info.components[:-1]))
self._children.add(path_info.GetPathID())
def GetPathInfo(self, timestamp=None):
"""Generates a summary about the path record.
Args:
timestamp: A point in time from which the data should be retrieved.
Returns:
A `rdf_objects.PathInfo` instance.
"""
path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp)
try:
result = self._path_infos[path_info_timestamp].Copy()
except KeyError:
result = rdf_objects.PathInfo(
path_type=self._path_type,
components=self._components,
directory=True)
stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries,
timestamp)
result.last_stat_entry_timestamp = stat_entry_timestamp
result.stat_entry = self._stat_entries.get(stat_entry_timestamp)
hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries,
timestamp)
result.last_hash_entry_timestamp = hash_entry_timestamp
result.hash_entry = self._hash_entries.get(hash_entry_timestamp)
return result
def GetChildren(self):
return set(self._children)
@staticmethod
def _LastEntryTimestamp(dct, upper_bound_timestamp):
"""Searches for greatest timestamp lower than the specified one.
Args:
dct: A dictionary from timestamps to some items.
upper_bound_timestamp: An upper bound for timestamp to be returned.
Returns:
Greatest timestamp that is lower than the specified one. If no such value
exists, `None` is returned.
"""
if upper_bound_timestamp is None:
upper_bound = lambda _: True
else:
upper_bound = lambda key: key <= upper_bound_timestamp
try:
return max(filter(upper_bound, iterkeys(dct)))
except ValueError: # Thrown if `max` input (result of filtering) is empty.
return None
class InMemoryDBPathMixin(object):
"""InMemoryDB mixin for path related functions."""
@utils.Synchronized
def ReadPathInfo(self, client_id, path_type, components, timestamp=None):
"""Retrieves a path info record for a given path."""
try:
path_record = self.path_records[(client_id, path_type, components)]
return path_record.GetPathInfo(timestamp=timestamp)
except KeyError:
raise db.UnknownPathError(
client_id=client_id, path_type=path_type, components=components)
@utils.Synchronized
def ReadPathInfos(self, client_id, path_type, components_list):
"""Retrieves path info records for given paths."""
result = {}
for components in components_list:
try:
path_record = self.path_records[(client_id, path_type, components)]
result[components] = path_record.GetPathInfo()
except KeyError:
result[components] = None
return result
@utils.Synchronized
def ListDescendantPathInfos(self,
client_id,
path_type,
components,
timestamp=None,
max_depth=None):
"""Lists path info records that correspond to children of given path."""
result = []
root_dir_exists = False
for path_idx, path_record in iteritems(self.path_records):
other_client_id, other_path_type, other_components = path_idx
path_info = path_record.GetPathInfo(timestamp=timestamp)
if client_id != other_client_id or path_type != other_path_type:
continue
if other_components == tuple(components):
root_dir_exists = True
if not path_info.directory:
raise db.NotDirectoryPathError(client_id, path_type, components)
if len(other_components) == len(components):
continue
if not collection.StartsWith(other_components, components):
continue
if (max_depth is not None and
len(other_components) - len(components) > max_depth):
continue
result.append(path_info)
if not root_dir_exists and components:
raise db.UnknownPathError(client_id, path_type, components)
if timestamp is None:
return sorted(result, key=lambda _: tuple(_.components))
# We need to filter implicit path infos if specific timestamp is given.
# TODO(hanuszczak): If we were to switch to use path trie instead of storing
# records by path id, everything would be much easier.
class TrieNode(object):
"""A trie of path components with path infos as values."""
def __init__(self):
self.path_info = None
self.children = {}
self.explicit = False
def Add(self, path_info, idx=0):
"""Adds given path info to the trie (or one of its subtrees)."""
components = path_info.components
if idx == len(components):
self.path_info = path_info
self.explicit |= (
path_info.HasField("stat_entry") or
path_info.HasField("hash_entry"))
else:
child = self.children.setdefault(components[idx], TrieNode())
child.Add(path_info, idx=idx + 1)
self.explicit |= child.explicit
def Collect(self, path_infos):
if self.path_info is not None and self.explicit:
path_infos.append(self.path_info)
for component in sorted(iterkeys(self.children)):
self.children[component].Collect(path_infos)
trie = TrieNode()
for path_info in result:
trie.Add(path_info)
explicit_path_infos = []
trie.Collect(explicit_path_infos)
return explicit_path_infos
def _GetPathRecord(self, client_id, path_info, set_default=True):
components = tuple(path_info.components)
path_idx = (client_id, path_info.path_type, components)
if set_default:
default = _PathRecord(
path_type=path_info.path_type, components=components)
return self.path_records.setdefault(path_idx, default)
else:
return self.path_records.get(path_idx, None)
def _WritePathInfo(self, client_id, path_info):
"""Writes a single path info record for given client."""
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
path_record = self._GetPathRecord(client_id, path_info)
path_record.AddPathInfo(path_info)
parent_path_info = path_info.GetParent()
if parent_path_info is not None:
parent_path_record = self._GetPathRecord(client_id, parent_path_info)
parent_path_record.AddChild(path_info)
@utils.Synchronized
def MultiWritePathInfos(self, path_infos):
for client_id, client_path_infos in iteritems(path_infos):
self.WritePathInfos(client_id, client_path_infos)
@utils.Synchronized
def WritePathInfos(self, client_id, path_infos):
for path_info in path_infos:
self._WritePathInfo(client_id, path_info)
for ancestor_path_info in path_info.GetAncestors():
self._WritePathInfo(client_id, ancestor_path_info)
@utils.Synchronized
def ReadPathInfosHistories(
self,
client_id,
path_type,
components_list,
cutoff = None
):
"""Reads a collection of hash and stat entries for given paths."""
results = {}
for components in components_list:
try:
path_record = self.path_records[(client_id, path_type, components)]
except KeyError:
results[components] = []
continue
entries_by_ts = {}
for ts, stat_entry in path_record.GetStatEntries():
pi = rdf_objects.PathInfo(
path_type=path_type,
components=components,
timestamp=ts,
stat_entry=stat_entry)
entries_by_ts[ts] = pi
for ts, hash_entry in path_record.GetHashEntries():
try:
pi = entries_by_ts[ts]
except KeyError:
pi = rdf_objects.PathInfo(
path_type=path_type, components=components, timestamp=ts)
entries_by_ts[ts] = pi
pi.hash_entry = hash_entry
results[components] = []
for timestamp in sorted(iterkeys(entries_by_ts)):
if cutoff is not None and timestamp > cutoff:
continue
results[components].append(entries_by_ts[timestamp])
return results
@utils.Synchronized
def ReadLatestPathInfosWithHashBlobReferences(self,
client_paths,
max_timestamp=None):
"""Returns PathInfos that have corresponding HashBlobReferences."""
results = {}
for cp in client_paths:
results[cp] = None
try:
path_record = self.path_records[(cp.client_id, cp.path_type,
cp.components)]
except KeyError:
continue
stat_entries_by_ts = {
ts: stat_entry for ts, stat_entry in path_record.GetStatEntries()
}
for ts, hash_entry in sorted(
path_record.GetHashEntries(), key=lambda e: e[0], reverse=True):
if max_timestamp is not None and ts > max_timestamp:
continue
hash_id = rdf_objects.SHA256HashID.FromSerializedBytes(
hash_entry.sha256.AsBytes())
if hash_id not in self.blob_refs_by_hashes:
continue
pi = rdf_objects.PathInfo(
path_type=cp.path_type,
components=cp.components,
timestamp=ts,
hash_entry=hash_entry)
try:
pi.stat_entry = stat_entries_by_ts[ts]
except KeyError:
pass
results[cp] = pi
break
return results
|
|
import mock
from curling.lib import HttpClientError
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from test_utils import RequestFactory
import amo
import amo.tests
from mkt.constants.payments import (PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR)
from mkt.developers import forms_payments, models
from mkt.developers.providers import get_provider
from mkt.developers.tests.test_providers import Patcher
from mkt.developers.tests.test_views_payments import setup_payment_account
from mkt.prices.models import AddonPremium, Price
from mkt.reviewers.models import RereviewQueue
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
from mkt.webapps.models import Addon, AddonDeviceType, AddonUser
class TestPremiumForm(amo.tests.TestCase):
# None of the tests in this TC should initiate Solitude calls.
fixtures = fixture('webapp_337141')
def setUp(self):
self.request = RequestFactory()
self.request.POST = {'toggle-paid': ''}
self.addon = Addon.objects.get(pk=337141)
AddonDeviceType.objects.create(
addon=self.addon, device_type=amo.DEVICE_GAIA.id)
self.platforms = {'free_platforms': ['free-firefoxos'],
'paid_platforms': ['paid-firefoxos']}
self.price = Price.objects.create(price='0.99')
self.user = UserProfile.objects.get(email='steamcube@mozilla.com')
self.kwargs = {
'request': self.request,
'addon': self.addon,
'user': self.user,
}
def test_free_to_premium(self):
self.request.POST = {'toggle-paid': 'paid'}
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.addon.premium_type, amo.ADDON_PREMIUM)
eq_(self.addon.status, amo.STATUS_NULL)
def test_free_to_premium_pending(self):
# Pending apps shouldn't get re-reviewed.
self.addon.update(status=amo.STATUS_PENDING)
self.request.POST = {'toggle-paid': 'paid'}
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(RereviewQueue.objects.count(), 0)
def test_free_with_in_app_requires_in_app(self):
self.platforms.update(price='free', allow_inapp='False')
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
assert not form.is_valid()
def test_free_with_in_app(self):
self.make_premium(self.addon)
self.platforms.update(price='free', allow_inapp='True')
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
assert form.is_valid()
form.save()
eq_(self.addon.premium_type, amo.ADDON_FREE_INAPP)
def test_tier_zero_inapp_is_optional(self):
self.platforms.update(price='free', allow_inapp='False')
price = Price.objects.create(price='9.99')
self.platforms.update(price=price.pk, allow_inapp='True')
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
assert form.is_valid()
self.platforms.update(price=price.pk, allow_inapp='False')
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
assert form.is_valid()
def test_premium_to_free(self):
# Premium to Free is ok for public apps.
self.make_premium(self.addon)
self.request.POST = {'toggle-paid': 'free'}
self.platforms.update(price=self.price.pk)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(RereviewQueue.objects.count(), 0)
eq_(self.addon.premium_type, amo.ADDON_FREE)
eq_(self.addon.status, amo.STATUS_PUBLIC)
def test_is_paid_premium(self):
self.make_premium(self.addon)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
eq_(form.is_paid(), True)
def test_free_inapp_price_required(self):
self.addon.update(premium_type=amo.ADDON_FREE_INAPP)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert not form.is_valid()
def test_is_paid_premium_inapp(self):
self.addon.update(premium_type=amo.ADDON_PREMIUM_INAPP)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
eq_(form.is_paid(), True)
def test_is_paid_free_inapp(self):
self.addon.update(premium_type=amo.ADDON_FREE_INAPP)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
eq_(form.is_paid(), True)
def test_not_is_paid_free(self):
self.addon.update(premium_type=amo.ADDON_FREE)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
eq_(form.is_paid(), False)
def test_add_device(self):
self.addon.update(status=amo.STATUS_PENDING)
self.platforms['free_platforms'].append('free-desktop')
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
assert amo.DEVICE_DESKTOP in self.addon.device_types
eq_(RereviewQueue.objects.count(), 0)
eq_(self.addon.status, amo.STATUS_PENDING)
def test_add_device_public_rereview(self):
self.addon.update(status=amo.STATUS_PUBLIC)
self.platforms['free_platforms'].append('free-desktop')
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
assert amo.DEVICE_DESKTOP in self.addon.device_types
eq_(RereviewQueue.objects.count(), 1)
eq_(self.addon.status, amo.STATUS_PUBLIC)
def test_add_device_approved_rereview(self):
self.addon.update(status=amo.STATUS_APPROVED)
self.platforms['free_platforms'].append('free-desktop')
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
assert amo.DEVICE_DESKTOP in self.addon.device_types
eq_(RereviewQueue.objects.count(), 1)
eq_(self.addon.status, amo.STATUS_APPROVED)
def test_update(self):
self.make_premium(self.addon)
price = Price.objects.create(price='9.99')
self.platforms.update(price=price.pk)
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.addon.premium.price.pk, price.pk)
def test_update_wo_initial_price(self):
"""Test that if the app doesn't have an initial price (i.e.: it was
marked as paid during submission) that this is handled gracefully.
"""
# Don't give the app an initial price.
self.addon._premium = AddonPremium.objects.create(addon=self.addon)
self.addon.premium_type = amo.ADDON_PREMIUM
price = Price.objects.create(price='9.99')
self.platforms.update(price=price.pk)
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.addon.premium.price.pk, price.pk)
def test_update_new_with_acct(self):
# This was the situation for a new app that was getting linked to an
# existing bank account.
self.addon.update(premium_type=amo.ADDON_PREMIUM)
self.platforms.update(price=self.price.pk)
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
addon = Addon.objects.get(pk=self.addon.pk)
assert addon.premium
def test_update_with_bogus_price(self):
AddonPremium.objects.create(addon=self.addon)
self.addon.premium_type = amo.ADDON_PREMIUM
self.platforms.update(price='bogus')
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
eq_(form.is_valid(), False)
eq_(len(form.errors), 1)
ok_('price' in form.errors)
def test_premium_with_empty_price(self):
AddonPremium.objects.create(addon=self.addon)
self.addon.premium_type = amo.ADDON_PREMIUM
self.platforms.update(price='')
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
eq_(form.is_valid(), False)
eq_(len(form.errors), 1)
ok_('price' in form.errors)
def test_premium_with_price_does_not_exist(self):
AddonPremium.objects.create(addon=self.addon)
self.addon.premium_type = amo.ADDON_PREMIUM
self.platforms.update(price=9999)
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
form.fields['price'].choices = ((9999, 'foo'),)
eq_(form.is_valid(), False)
eq_(len(form.errors), 1)
ok_('price' in form.errors)
def test_optgroups_in_price_choices(self):
Price.objects.create(price='0.00', method=PAYMENT_METHOD_ALL)
Price.objects.create(price='0.10', method=PAYMENT_METHOD_OPERATOR)
Price.objects.create(price='1.00', method=PAYMENT_METHOD_CARD)
Price.objects.create(price='1.10', method=PAYMENT_METHOD_CARD)
Price.objects.create(price='1.00', method=PAYMENT_METHOD_ALL)
Price.objects.create(price='2.00', method=PAYMENT_METHOD_ALL)
form = forms_payments.PremiumForm(self.platforms, **self.kwargs)
# 1 x Free with inapp
# + 1 x price tier 0
# + 3 x values grouped by billing
# = 5
eq_(len(form.fields['price'].choices), 5)
html = form.as_p()
eq_(len(pq(html)('#id_price optgroup')), 3, 'Should be 3 optgroups')
def test_cannot_change_devices_on_toggle(self):
self.request.POST = {'toggle-paid': 'paid'}
self.platforms = {'paid_platforms': ['paid-firefoxos']}
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.addon.premium_type, amo.ADDON_PREMIUM)
eq_(self.addon.status, amo.STATUS_NULL)
self.assertSetEqual(self.addon.device_types, form.get_devices())
def test_cannot_set_desktop_for_packaged_app(self):
self.platforms = {'free_platforms': ['free-desktop']}
self.addon.update(is_packaged=True)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert not form.is_valid()
def test_can_set_desktop_for_packaged_app(self):
self.create_flag('desktop-packaged')
self.platforms = {'free_platforms': ['free-desktop']}
self.addon.update(is_packaged=True)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
def test_can_change_devices_for_hosted_app(self):
# Specify the free and paid. It shouldn't fail because you can't change
# payment types without explicitly specifying that.
self.platforms = {'free_platforms': ['free-desktop'],
'paid_platforms': ['paid-firefoxos']} # Ignored.
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
self.assertSetEqual(self.addon.device_types, [amo.DEVICE_DESKTOP])
def test_cannot_change_android_devices_for_packaged_app(self):
self.platforms = {'free_platforms': ['free-android-mobile'],
'paid_platforms': ['paid-firefoxos']} # Ignored.
self.addon.update(is_packaged=True)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert not form.is_valid()
self.assertSetEqual(self.addon.device_types, [amo.DEVICE_GAIA])
def test_can_change_devices_for_packaged_app_behind_flag(self):
self.create_flag('android-packaged')
self.platforms = {'free_platforms': ['free-android-mobile'],
'paid_platforms': ['paid-firefoxos']} # Ignored.
self.addon.update(is_packaged=True)
form = forms_payments.PremiumForm(data=self.platforms, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
self.assertSetEqual(self.addon.device_types, [amo.DEVICE_MOBILE])
def test_can_change_devices_for_android_app_behind_flag(self):
self.create_flag('android-payments')
data = {'paid_platforms': ['paid-firefoxos', 'paid-android-mobile'],
'price': 'free', 'allow_inapp': 'True'}
self.make_premium(self.addon)
form = forms_payments.PremiumForm(data=data, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
self.assertSetEqual(self.addon.device_types, [amo.DEVICE_MOBILE,
amo.DEVICE_GAIA])
def test_initial(self):
form = forms_payments.PremiumForm(**self.kwargs)
eq_(form._initial_price_id(), Price.objects.get(price='0.99').pk)
def test_initial_not_there(self):
Price.objects.get(price='0.99').update(active=False)
form = forms_payments.PremiumForm(**self.kwargs)
eq_(form._initial_price_id(), None)
class TestAccountListForm(Patcher, amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999', 'group_admin',
'user_admin', 'user_admin_group', 'prices')
def setUp(self):
super(TestAccountListForm, self).setUp()
self.addon = Addon.objects.get(pk=337141)
self.addon.update(status=amo.STATUS_NULL,
highest_status=amo.STATUS_PUBLIC)
self.provider = get_provider(name='bango')
self.price = Price.objects.filter()[0]
AddonPremium.objects.create(addon=self.addon, price=self.price)
self.user = UserProfile.objects.get(pk=31337)
amo.set_user(self.user)
self.other = UserProfile.objects.get(pk=999)
self.admin = UserProfile.objects.get(email='admin@mozilla.com')
self.kwargs = {
'addon': self.addon,
'provider': self.provider,
}
def create_user_account(self, user, **kwargs):
"""Create a user account"""
seller = models.SolitudeSeller.objects.create(
resource_uri='/path/to/sel', user=user, uuid='uuid-%s' % user.pk)
data = dict(user=user, uri='asdf-%s' % user.pk, name='test',
inactive=False, solitude_seller=seller,
seller_uri='suri-%s' % user.pk, account_id=123,
agreed_tos=True, shared=False)
data.update(**kwargs)
return models.PaymentAccount.objects.create(**data)
def make_owner(self, user):
AddonUser.objects.create(addon=self.addon,
user=user, role=amo.AUTHOR_ROLE_OWNER)
def is_owner(self, user):
return (self.addon.authors.filter(pk=user.pk,
addonuser__role=amo.AUTHOR_ROLE_OWNER).exists())
def associate_owner_account(self):
owner_account = self.create_user_account(self.user)
form = forms_payments.AccountListForm(
data={'accounts': owner_account.pk}, user=self.user, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
return owner_account
def test_with_owner_account(self):
user = self.user
account = self.create_user_account(user)
assert self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': account.pk}, user=user, **self.kwargs)
eq_(form.current_payment_account, None)
assert form.is_valid(), form.errors
form.save()
form = forms_payments.AccountListForm(None, user=user,
**self.kwargs)
eq_(form.fields['accounts'].widget.attrs.get('disabled'), None)
eq_(form.fields['accounts'].empty_label, None)
eq_(form.initial['accounts'], account)
def test_with_shared_account(self):
account = self.create_user_account(self.user)
shared = self.create_user_account(self.other, shared=True)
form = forms_payments.AccountListForm(user=self.user,
**self.kwargs)
self.assertSetEqual(form.fields['accounts'].queryset,
(account, shared))
def test_set_shared_account(self):
shared = self.create_user_account(self.other, shared=True)
form = forms_payments.AccountListForm(
data={'accounts': shared.pk}, user=self.user, **self.kwargs)
assert form.is_valid()
form.save()
accts = set(a.payment_account.pk for a in
self.addon.all_payment_accounts())
assert shared.pk in accts, 'Unexpected: {a}'.format(a=accts)
def test_with_non_owner_account(self):
user = self.other
account = self.create_user_account(user)
assert not self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': account.pk}, user=user, **self.kwargs)
eq_(form.current_payment_account, None)
assert form.fields['accounts'].widget.attrs['disabled'] is not None
assert not form.is_valid(), form.errors
def test_with_non_owner_admin_account(self):
user = self.admin
account = self.create_user_account(user)
assert not self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': account.pk}, user=user, **self.kwargs)
eq_(form.current_payment_account, None)
assert form.fields['accounts'].widget.attrs['disabled'] is not None
assert not form.is_valid(), form.errors
def test_admin_account_no_data(self):
self.associate_owner_account()
user = self.admin
assert not self.is_owner(user)
form = forms_payments.AccountListForm(
data={}, user=user, **self.kwargs)
assert form.fields['accounts'].widget.attrs['disabled'] is not None
assert form.is_valid(), form.errors
def test_admin_account_empty_string(self):
self.associate_owner_account()
user = self.admin
assert not self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': ''}, user=user, **self.kwargs)
assert form.fields['accounts'].widget.attrs['disabled'] is not None
assert not form.is_valid(), form.errors
def test_with_other_owner_account(self):
user = self.other
account = self.create_user_account(user)
self.make_owner(user)
assert self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': account.pk}, user=user, **self.kwargs)
assert form.is_valid(), form.errors
eq_(form.current_payment_account, None)
eq_(form.fields['accounts'].widget.attrs.get('disabled'), None)
form.save()
form = forms_payments.AccountListForm(None, user=user,
**self.kwargs)
eq_(form.fields['accounts'].empty_label, None)
eq_(form.initial['accounts'], account)
def test_with_non_owner_account_existing_account(self):
owner_account = self.associate_owner_account()
user = self.other
account = self.create_user_account(user)
assert not self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': account.pk}, user=user, **self.kwargs)
assert form.fields['accounts'].widget.attrs['disabled'] is not None
eq_(form.current_payment_account, owner_account)
assert not form.is_valid(), form.errors
def test_with_non_owner_admin_account_existing_account(self):
owner_account = self.associate_owner_account()
user = self.admin
account = self.create_user_account(user)
assert not self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': account.pk}, user=user, **self.kwargs)
assert form.fields['accounts'].widget.attrs['disabled'] is not None
eq_(form.current_payment_account, owner_account)
assert not form.is_valid(), form.errors
def test_with_other_owner_account_existing_account(self):
owner_account = self.associate_owner_account()
user = self.other
account = self.create_user_account(user)
self.make_owner(user)
assert self.is_owner(user)
form = forms_payments.AccountListForm(
data={'accounts': account.pk}, user=user, **self.kwargs)
eq_(form.current_payment_account, owner_account)
assert form.is_valid(), form.errors
form.save()
form = forms_payments.AccountListForm(None, user=user,
**self.kwargs)
eq_(form.fields['accounts'].empty_label, None)
eq_(form.initial['accounts'], account)
assert form.current_payment_account is None
class TestPaidRereview(Patcher, amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'prices')
def setUp(self):
super(TestPaidRereview, self).setUp()
self.addon = Addon.objects.get(pk=337141)
self.addon.update(status=amo.STATUS_NULL,
highest_status=amo.STATUS_PUBLIC)
self.provider = get_provider(name='bango')
self.price = Price.objects.filter()[0]
AddonPremium.objects.create(addon=self.addon, price=self.price)
self.user = UserProfile.objects.get(email='steamcube@mozilla.com')
amo.set_user(self.user)
seller = models.SolitudeSeller.objects.create(
resource_uri='/path/to/sel', user=self.user)
self.account = models.PaymentAccount.objects.create(
user=self.user, uri='asdf', name='test', inactive=False,
solitude_seller=seller, account_id=123, agreed_tos=True)
self.kwargs = {
'addon': self.addon,
'user': self.user,
'provider': self.provider,
}
@mock.patch('mkt.webapps.models.Webapp.is_fully_complete',
new=mock.MagicMock())
def test_rereview(self):
form = forms_payments.AccountListForm(
data={'accounts': self.account.pk}, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.addon.status, amo.STATUS_PUBLIC)
eq_(RereviewQueue.objects.count(), 1)
form = forms_payments.AccountListForm(None, **self.kwargs)
eq_(form.fields['accounts'].empty_label, None)
def test_disagreed_tos_rereview(self):
self.account.update(agreed_tos=False)
form = forms_payments.AccountListForm(
data={'accounts': self.account.pk}, **self.kwargs)
assert not form.is_valid()
eq_(form.errors['accounts'],
['Select a valid choice. That choice is not one of the available '
'choices.'])
@mock.patch('mkt.webapps.models.Webapp.is_fully_complete',
new=mock.MagicMock())
def test_norereview(self):
self.addon.update(highest_status=amo.STATUS_PENDING)
form = forms_payments.AccountListForm(
data={'accounts': self.account.pk}, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.addon.status, amo.STATUS_PENDING)
eq_(RereviewQueue.objects.count(), 0)
class TestRestoreAppStatus(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.addon = Addon.objects.get(pk=337141)
self.addon.status = amo.STATUS_NULL
def test_to_public(self):
self.addon.highest_status = amo.STATUS_PUBLIC
forms_payments._restore_app_status(self.addon)
eq_(self.addon.status, amo.STATUS_PUBLIC)
def test_to_null(self):
self.addon.highest_status = amo.STATUS_NULL
forms_payments._restore_app_status(self.addon)
# Apps without a highest status default to PENDING.
eq_(self.addon.status, amo.STATUS_PENDING)
class TestBangoAccountForm(Patcher, amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestBangoAccountForm, self).setUp()
self.app = Addon.objects.get(pk=337141)
self.user = self.app.addonuser_set.get().user
form = forms_payments.BangoPaymentAccountForm()
self.data = {}
for field in form.fields:
if 'currency' in field:
self.data[field] = 'USD'
elif 'Iso' in field:
self.data[field] = 'USA'
else:
self.data[field] = 'foo@bu.gs' # Good enough.
def test_bank_required(self):
"""When there is no account, require bank details."""
form = forms_payments.BangoPaymentAccountForm(self.data)
assert form.is_valid(), form.errors
del self.data['bankName']
form = forms_payments.BangoPaymentAccountForm(self.data)
assert not form.is_valid(), form.errors
def test_bank_not_required(self):
"""When an account is specified, don't require bank details."""
payment = setup_payment_account(self.app, self.user).payment_account
form = forms_payments.BangoPaymentAccountForm(
self.data, account=payment)
assert form.is_valid(), form.errors
del self.data['bankName']
form = forms_payments.BangoPaymentAccountForm(
self.data, account=payment)
assert form.is_valid(), form.errors # Still valid, even now.
def test_on_save(self):
"""Save should just trigger the account's update function."""
payment = setup_payment_account(self.app, self.user).payment_account
form = forms_payments.BangoPaymentAccountForm(
self.data, account=payment)
assert form.is_valid(), form.errors
form.cleaned_data = {'account_name': 'foo', 'name': 'bob'}
form.save()
payment = payment.reload()
eq_(payment.name, 'foo')
self.bango_patcher.api.by_url.assert_called_with('uid')
class TestBokuAccountForm(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestBokuAccountForm, self).setUp()
self.app = Addon.objects.get(pk=337141)
self.user = self.app.addonuser_set.get().user
@mock.patch('mkt.developers.forms_payments.client')
def test_valid_when_verified(self, client):
verify_service = client.api.boku.verify_service.post
verify_service.return_value = ''
form = forms_payments.BokuAccountForm({'account_name': 'Boku Acct',
'service_id': 'clearly-valid'})
ok_(form.is_valid())
verify_service.assert_called_with({'service_id': 'clearly-valid'})
@mock.patch('mkt.developers.forms_payments.client')
def test_invalid_when_not_verified(self, client):
verify_service = client.api.boku.verify_service.post
verify_service.side_effect = HttpClientError
form = forms_payments.BokuAccountForm({'account_name': 'Boku Acct',
'service_id': 'not-valid'})
ok_(not form.is_valid())
eq_(len(form.errors['service_id']), 1)
verify_service.assert_called_with({'service_id': 'not-valid'})
|
|
# coding:utf-8
# from django.test import TestCase
from mocker import (
MockerTestCase,
# ANY,
# KWARGS,
)
class AutomataTests(MockerTestCase):
def _makeOne(self, *args, **kwargs):
from export import markupfile
return markupfile.Automata(*args, **kwargs)
def test_instantiation(self):
from export.markupfile import Automata
dummy_journal = self.mocker.mock()
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertIsInstance(automata, Automata)
def test_citat_iso690(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result(u'iso690')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.citat, u'icitat')
def test_citat_nbr6023(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('nbr6023')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.citat, 'acitat')
def test_citat_other(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('other')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.citat, 'ocitat')
def test_citat_vancouv(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('vancouv')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.citat, 'vcitat')
def test_citat_apa(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('apa')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.citat, 'pcitat')
def test_citat_unknown_value_must_return_empty_string(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('foo')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.citat, '')
def test_citat_none_value_must_return_empty_string(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result(None)
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.citat, '')
def test_norma_iso690(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('iso690')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.norma_acron, 'iso')
def test_norma_nbr6023(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('nbr6023')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.norma_acron, 'abnt')
def test_norma_other(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('other')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.norma_acron, 'other')
def test_norma_vancouv(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('vancouv')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.norma_acron, 'vanc')
def test_norma_apa(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('apa')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.norma_acron, 'apa')
def test_norma_unknown_value_must_return_empty_string(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result('foo')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.norma_acron, '')
def test_norma_none_value_must_return_empty_string(self):
dummy_journal = self.mocker.mock()
dummy_journal.editorial_standard
self.mocker.result(None)
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.norma_acron, '')
def test_issn_for_printed(self):
dummy_journal = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('print')
dummy_journal.print_issn
self.mocker.result('1234-1234')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.issn, '1234-1234')
def test_issn_for_electronic(self):
dummy_journal = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('electronic')
dummy_journal.eletronic_issn
self.mocker.result('1234-1234')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.issn, '1234-1234')
def test_issn_for_printed_missing_value(self):
dummy_journal = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('print')
dummy_journal.print_issn
self.mocker.result(None)
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.issn, '')
def test_issn_for_electronic_missing_value(self):
dummy_journal = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('electronic')
dummy_journal.eletronic_issn
self.mocker.result(None)
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.issn, '')
def test_acron_must_be_the_same_as_journals(self):
dummy_journal = self.mocker.mock()
dummy_journal.acronym
self.mocker.result('foo')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.acron, 'foo')
def test_acron_must_be_lowercase(self):
dummy_journal = self.mocker.mock()
dummy_journal.acronym
self.mocker.result('FOO')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(automata.acron, 'foo')
def test_perfect_unicode_representation(self):
dummy_journal = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('print')
dummy_journal.print_issn
self.mocker.result('1234-1234')
dummy_journal.editorial_standard
self.mocker.result('nbr6023')
self.mocker.count(2)
dummy_journal.acronym
self.mocker.result('foo')
self.mocker.replay()
automata = self._makeOne(dummy_journal)
self.assertEqual(unicode(automata), '1234-1234;acitat;foo.amd;tgabnt.amd')
class IssueTests(MockerTestCase):
def _makeOne(self, *args, **kwargs):
from export import markupfile
return markupfile.Issue(*args, **kwargs)
def test_legend(self):
dummy_issue = self.mocker.mock()
dummy_journal = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_issue.volume
self.mocker.result('33')
dummy_issue.identification
self.mocker.result('3')
dummy_journal.short_title
self.mocker.result('Star Wars')
self.mocker.replay()
issue = self._makeOne(dummy_issue)
self.assertEqual(issue.legend, 'Star Wars v.33 n.3')
def test_period(self):
dummy_issue = self.mocker.mock()
dummy_issue.publication_start_month
self.mocker.result(3)
dummy_issue.publication_end_month
self.mocker.result(5)
self.mocker.replay()
issue = self._makeOne(dummy_issue)
self.assertEqual(issue.period, 'Mar/May')
def test_order(self):
dummy_issue = self.mocker.mock()
dummy_issue.order
self.mocker.result(7)
dummy_issue.publication_year
self.mocker.result(2012)
self.mocker.replay()
issue = self._makeOne(dummy_issue)
self.assertEqual(issue.order, '20127')
def test_perfect_unicode_representation(self):
dummy_issue = self.mocker.mock()
dummy_journal = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.short_title
self.mocker.result('Star Wars')
dummy_issue.volume
self.mocker.result('33')
dummy_issue.identification
self.mocker.result('3')
dummy_issue.publication_year
self.mocker.result(2012)
dummy_issue.publication_start_month
self.mocker.result(3)
dummy_issue.publication_end_month
self.mocker.result(5)
dummy_issue.order
self.mocker.result(7)
self.mocker.replay()
expected_result = 'Star Wars v.33 n.3\r\nMar/May\r\n20127\r\n\r\n'
issue = self._makeOne(dummy_issue)
self.assertEqual(unicode(issue), expected_result)
class JournalStandardTests(MockerTestCase):
def _makeOne(self, *args, **kwargs):
from export import markupfile
return markupfile.JournalStandard(*args, **kwargs)
def test_pub_type_for_print(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('print')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
pub_type = journalstd.pub_type
self.assertEqual(pub_type, u'ppub')
self.assertIsInstance(pub_type, unicode)
def test_pub_type_for_electronic(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('electronic')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
pub_type = journalstd.pub_type
self.assertEqual(pub_type, u'epub')
self.assertIsInstance(pub_type, unicode)
def test_study_area(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_study_area = self.mocker.mock()
dummy_journal.study_areas
self.mocker.result(dummy_study_area)
dummy_study_area.all()
self.mocker.result([dummy_study_area for i in range(5)])
dummy_study_area.study_area
self.mocker.result('bar')
self.mocker.count(5)
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
expected_study_area = u'bar/bar/bar/bar/bar'
self.assertEqual(journalstd.study_area, expected_study_area)
self.assertIsInstance(expected_study_area, unicode)
def test_study_area_empty_queryset(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_study_area = self.mocker.mock()
dummy_journal.study_areas
self.mocker.result(dummy_study_area)
dummy_study_area.all()
self.mocker.result([])
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
self.assertEqual(journalstd.study_area, '')
def test_medline_title_is_the_journal_medline_title(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.medline_title
self.mocker.result('spam')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
title = journalstd.medline_title
self.assertEqual(title, u'spam')
self.assertIsInstance(title, unicode)
def test_medline_code_is_the_journal_medline_code(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.medline_code
self.mocker.result('123456789')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
code = journalstd.medline_code
self.assertEqual(code, u'123456789')
self.assertIsInstance(code, unicode)
def test_pissn_is_the_journal_print_issn(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.print_issn
self.mocker.result('1234-1234')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
pissn = journalstd.pissn
self.assertEqual(pissn, u'1234-1234')
self.assertIsInstance(pissn, unicode)
def test_pissn_is_the_journal_electronic_issn(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.eletronic_issn
self.mocker.result('1234-1234')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
eissn = journalstd.eissn
self.assertEqual(eissn, u'1234-1234')
self.assertIsInstance(eissn, unicode)
def test_publisher_is_the_journal_publisher(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.publisher_name
self.mocker.result('foo')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
publisher = journalstd.publisher
self.assertEqual(publisher, u'foo')
self.assertIsInstance(publisher, unicode)
def test_title_is_the_journal_title(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_journal.title
self.mocker.result('foo')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
title = journalstd.title
self.assertEqual(title, u'foo')
self.assertIsInstance(title, unicode)
def test_journal_meta(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_study_area = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.short_title
self.mocker.result(u'blitz')
dummy_journal.editorial_standard
self.mocker.result('apa')
self.mocker.count(2)
dummy_journal.scielo_issn
self.mocker.result('electronic')
self.mocker.count(3)
dummy_journal.eletronic_issn
self.mocker.result('1234-1234')
self.mocker.count(3)
dummy_journal.study_areas
self.mocker.result(dummy_study_area)
dummy_study_area.all()
self.mocker.result([dummy_study_area for i in range(5)])
dummy_study_area.study_area
self.mocker.result('bar')
self.mocker.count(5)
dummy_journal.title
self.mocker.result('spam')
dummy_journal.medline_title
self.mocker.result('spam')
dummy_journal.medline_code
self.mocker.result('123456789')
dummy_journal.acronym
self.mocker.result('foo')
dummy_journal.print_issn
self.mocker.result('1234-123X')
dummy_journal.publisher_name
self.mocker.result('fizz')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal, dummy_issue)
journal_meta = journalstd.journal_meta
expected_journal_meta = u"""
1234-1234#blitz#apa#epub#1234-1234#bar/bar/bar/bar/bar#spam#123456789#spam#foo#1234-123X#1234-1234#fizz
""".strip()
self.assertEqual(journal_meta, expected_journal_meta)
self.assertIsInstance(journal_meta, unicode)
class L10nIssueTests(MockerTestCase):
def _makeOne(self, *args, **kwargs):
from export import markupfile
return markupfile.L10nIssue(*args, **kwargs)
def test_instantiation(self):
from export.markupfile import L10nIssue
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
self.assertIsInstance(l10nissue, L10nIssue)
def test_abbrev_title(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.title_iso
self.mocker.result(u'blitz')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
self.assertEqual(l10nissue.abbrev_title, u'blitz')
def test_abbrev_title_must_return_unicode(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.title_iso
self.mocker.result(u'blitz')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
self.assertIsInstance(l10nissue.abbrev_title, unicode)
def test_short_title(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.short_title
self.mocker.result(u'blitz')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
self.assertEqual(l10nissue.short_title, u'blitz')
def test_short_title_must_return_unicode(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.short_title
self.mocker.result(u'blitz')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
self.assertIsInstance(l10nissue.short_title, unicode)
def test_volume(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.volume
self.mocker.result('7')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
volume = l10nissue.volume
self.assertEqual(volume, u'7')
self.assertIsInstance(volume, unicode)
def test_volume_must_return_unicode_even_when_empty(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.volume
self.mocker.result(None)
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
volume = l10nissue.volume
self.assertIsInstance(volume, unicode)
def test_number(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.number
self.mocker.result('7')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
number = l10nissue.number
self.assertEqual(number, u'7')
self.assertIsInstance(number, unicode)
def test_number_must_return_unicode_even_when_empty(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.number
self.mocker.result(None)
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
number = l10nissue.number
self.assertIsInstance(number, unicode)
def test_suppl_volume(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.type
self.mocker.result('supplement')
dummy_issue.number
self.mocker.result('')
dummy_issue.suppl_text
self.mocker.result('foo')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
suppl_volume = l10nissue.suppl_volume
self.assertEqual(suppl_volume, u'foo')
self.assertIsInstance(suppl_volume, unicode)
def test_suppl_number(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.type
self.mocker.result('supplement')
dummy_issue.number
self.mocker.result('5')
dummy_issue.suppl_text
self.mocker.result('foo')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
suppl_number = l10nissue.suppl_number
self.assertEqual(suppl_number, u'foo')
self.assertIsInstance(suppl_number, unicode)
def test_suppl_number_must_return_unicode_even_when_empty(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.type
self.mocker.result('supplement')
dummy_issue.number
self.mocker.result('')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
suppl_number = l10nissue.suppl_number
self.assertIsInstance(suppl_number, unicode)
def test_date_iso(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.publication_end_month
self.mocker.result(00)
dummy_issue.publication_year
self.mocker.result('foo')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
date_iso = l10nissue.date_iso
self.assertEqual(date_iso, u'foo0000')
self.assertIsInstance(date_iso, unicode)
def test_date_iso_must_return_unicode_even_when_empty(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.publication_end_month
self.mocker.result(00)
dummy_issue.publication_year
self.mocker.result(None)
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
date_iso = l10nissue.date_iso
self.assertIsInstance(date_iso, unicode)
def test_status_must_return_always_1(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
status = l10nissue.status
self.assertEqual(status, u'1')
self.assertIsInstance(status, unicode)
def test_issue_meta(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.short_title
self.mocker.result(u'blitz')
dummy_issue.type
self.mocker.result('supplement')
self.mocker.count(2)
dummy_issue.volume
self.mocker.result('7')
dummy_issue.number
self.mocker.result('4')
self.mocker.count(3)
dummy_issue.suppl_text
self.mocker.result('bar')
dummy_issue.publication_end_month
self.mocker.result(00)
dummy_issue.publication_year
self.mocker.result('baz')
dummy_journal.scielo_issn
self.mocker.result('electronic')
dummy_journal.eletronic_issn
self.mocker.result('1234-1234')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
expected_issue_meta = u'blitz;7;;4;bar;baz0000;1234-1234;1'
self.assertEqual(l10nissue.issue_meta, expected_issue_meta)
def test_issue_meta_must_return_unicode(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.short_title
self.mocker.result('blitz')
dummy_issue.volume
self.mocker.result('7')
dummy_issue.number
self.mocker.result('4')
self.mocker.count(3) # accessed by suppl_number and suppl_volume
dummy_issue.type
self.mocker.result('supplement')
self.mocker.count(2) # accessed by suppl_number and suppl_volume
dummy_issue.suppl_text
self.mocker.result('bar')
dummy_issue.publication_end_month
self.mocker.result(00)
dummy_issue.publication_year
self.mocker.result('baz')
dummy_journal.scielo_issn
self.mocker.result('electronic')
dummy_journal.eletronic_issn
self.mocker.result('1234-1234')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
self.assertIsInstance(l10nissue.issue_meta, unicode)
def test_sections(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_section = self.mocker.mock()
dummy_issue.section
self.mocker.result(dummy_section)
dummy_section.available(True)
self.mocker.result(dummy_section)
dummy_section.all()
self.mocker.result(['sec%s' % i for i in range(5)])
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
expected_sections = u'sec0;sec1;sec2;sec3;sec4;No section title'
sections = l10nissue.sections
self.assertEqual(sections, expected_sections)
self.assertIsInstance(sections, unicode)
def test_sections_with_empty_queryset(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_section = self.mocker.mock()
dummy_issue.section
self.mocker.result(dummy_section)
dummy_section.available(True)
self.mocker.result(dummy_section)
dummy_section.all()
self.mocker.result([])
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
sections = l10nissue.sections
self.assertEqual(sections, u'No section title')
self.assertIsInstance(sections, unicode)
def test_section_ids(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_section = self.mocker.mock()
dummy_issue.section
self.mocker.result(dummy_section)
dummy_section.available(True)
self.mocker.result(dummy_section)
dummy_section.all()
self.mocker.result([dummy_section for i in range(5)])
dummy_section.actual_code
self.mocker.result('6')
self.mocker.count(5)
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
expected_ids = u'6;6;6;6;6;nd'
ids = l10nissue.sections_ids
self.assertEqual(ids, expected_ids)
self.assertIsInstance(ids, unicode)
def test_section_ids_with_empty_queryset(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_section = self.mocker.mock()
dummy_issue.section
self.mocker.result(dummy_section)
dummy_section.available(True)
self.mocker.result(dummy_section)
dummy_section.all()
self.mocker.result([])
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
ids = l10nissue.sections_ids
self.assertEqual(ids, 'nd')
self.assertIsInstance(ids, unicode)
def test_ctrl_vocabulary_decs(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.journal
self.mocker.result(dummy_journal)
dummy_journal.ctrl_vocabulary
self.mocker.result('decs')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
vocabulary = l10nissue.ctrl_vocabulary
self.assertEqual(vocabulary, u'Health Sciences Descriptors')
self.assertIsInstance(vocabulary, unicode)
def test_date_iso_if_publication_end_month_is_None(self):
dummy_journal = self.mocker.mock()
dummy_issue = self.mocker.mock()
dummy_issue.publication_end_month
self.mocker.result(None)
dummy_issue.publication_year
self.mocker.result('2013')
self.mocker.replay()
l10nissue = self._makeOne(dummy_journal, dummy_issue, 'en')
self.assertEqual(l10nissue.date_iso, u'20130000')
class AheadTests(MockerTestCase):
def _makeOne(self, *args, **kwargs):
from export import markupfile
return markupfile.Ahead(*args, **kwargs)
def test_legend(self):
dummy_journal = self.mocker.mock()
dummy_year = self.mocker.mock()
dummy_journal.title_iso
self.mocker.result('Star Wars')
self.mocker.replay()
ahead = self._makeOne(dummy_journal, dummy_year)
self.assertEqual(ahead.legend, 'Star Wars n.ahead')
def test_period(self):
dummy_journal = self.mocker.mock()
dummy_year = self.mocker.mock()
self.mocker.replay()
ahead = self._makeOne(dummy_journal, dummy_year)
self.assertEqual(ahead.period, '/')
def test_order(self):
dummy_journal = self.mocker.mock()
self.mocker.replay()
journal = self._makeOne(dummy_journal, '2012')
self.assertEqual(journal.order, '201250')
def test_perfect_unicode_representation(self):
dummy_journal = self.mocker.mock()
dummy_journal.title_iso
self.mocker.result('Star Wars')
self.mocker.replay()
expected_result = 'Star Wars n.ahead\r\n/\r\n201250\r\n\r\n'
ahead = self._makeOne(dummy_journal, '2012')
self.assertEqual(unicode(ahead), expected_result)
class L10nAheadTests(MockerTestCase):
def _makeOne(self, *args, **kwargs):
from export import markupfile
return markupfile.L10nAhead(*args, **kwargs)
def test_instantiation(self):
from export.markupfile import L10nAhead
dummy_journal = self.mocker.mock()
self.mocker.replay()
l10nahead = self._makeOne(dummy_journal, '2012', 'en')
self.assertIsInstance(l10nahead, L10nAhead)
def test_short_title(self):
dummy_journal = self.mocker.mock()
dummy_journal.short_title
self.mocker.result(u'blitz')
self.mocker.replay()
l10nahead = self._makeOne(dummy_journal, '2012', 'en')
self.assertEqual(l10nahead.short_title, u'blitz')
def test_short_title_must_return_unicode(self):
dummy_journal = self.mocker.mock()
dummy_journal.short_title
self.mocker.result(u'blitz')
self.mocker.replay()
l10nahead = self._makeOne(dummy_journal, '2012', 'en')
self.assertIsInstance(l10nahead.short_title, unicode)
def test_date_iso(self):
dummy_journal = self.mocker.mock()
self.mocker.replay()
l10nahead = self._makeOne(dummy_journal, '2012', 'en')
date_iso = l10nahead.date_iso
self.assertEqual(date_iso, u'20120000')
self.assertIsInstance(date_iso, unicode)
def test_date_iso_must_return_unicode_even_when_empty(self):
dummy_journal = self.mocker.mock()
self.mocker.replay()
l10nahead = self._makeOne(dummy_journal, '', 'en')
date_iso = l10nahead.date_iso
self.assertIsInstance(date_iso, unicode)
def test_status_must_return_always_1(self):
dummy_journal = self.mocker.mock()
self.mocker.replay()
l10nahead = self._makeOne(dummy_journal, '2012', 'en')
status = l10nahead.status
self.assertEqual(status, u'1')
self.assertIsInstance(status, unicode)
def test_issue_meta(self):
dummy_journal = self.mocker.mock()
dummy_journal.short_title
self.mocker.result(u'blitz')
dummy_journal.scielo_issn
self.mocker.result(u'print')
dummy_journal.print_issn
self.mocker.result('1234-1234')
self.mocker.replay()
l10nahead = self._makeOne(dummy_journal, '2012', 'en')
expected_issue_meta = u'blitz;;;ahead;;20120000;1234-1234;1'
self.assertEqual(l10nahead.ahead_meta, expected_issue_meta)
def test_issue_meta_must_return_unicode(self):
dummy_journal = self.mocker.mock()
dummy_journal.short_title
self.mocker.result('blitz')
dummy_journal.scielo_issn
self.mocker.result(u'print')
dummy_journal.print_issn
self.mocker.result('1234-1234')
self.mocker.replay()
l10nhead = self._makeOne(dummy_journal, '2012', 'en')
self.assertIsInstance(l10nhead.ahead_meta, unicode)
def test_sections(self):
dummy_journal = self.mocker.mock()
self.mocker.replay()
l10nhead = self._makeOne(dummy_journal, '2012', 'en')
expected_sections = u'No section title'
sections = l10nhead.sections
self.assertEqual(sections, expected_sections)
self.assertIsInstance(sections, unicode)
def test_ctrl_vocabulary_decs(self):
dummy_journal = self.mocker.mock()
dummy_journal.ctrl_vocabulary
self.mocker.result('decs')
self.mocker.replay()
l10nhead = self._makeOne(dummy_journal, '2012', 'en')
vocabulary = l10nhead.ctrl_vocabulary
self.assertEqual(vocabulary, u'Health Sciences Descriptors')
self.assertIsInstance(vocabulary, unicode)
class JournalStandardAheadTests(MockerTestCase):
def _makeOne(self, *args, **kwargs):
from export import markupfile
return markupfile.JournalStandardAhead(*args, **kwargs)
def test_pub_type_for_print(self):
dummy_journal = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('print')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
pub_type = journalstd.pub_type
self.assertEqual(pub_type, u'ppub')
self.assertIsInstance(pub_type, unicode)
def test_pub_type_for_electronic(self):
dummy_journal = self.mocker.mock()
dummy_journal.scielo_issn
self.mocker.result('electronic')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
pub_type = journalstd.pub_type
self.assertEqual(pub_type, u'epub')
self.assertIsInstance(pub_type, unicode)
def test_study_area(self):
dummy_journal = self.mocker.mock()
dummy_study_area = self.mocker.mock()
dummy_journal.study_areas
self.mocker.result(dummy_study_area)
dummy_study_area.all()
self.mocker.result([dummy_study_area for i in range(5)])
dummy_study_area.study_area
self.mocker.result('bar')
self.mocker.count(5)
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
expected_study_area = u'bar/bar/bar/bar/bar'
self.assertEqual(journalstd.study_area, expected_study_area)
self.assertIsInstance(expected_study_area, unicode)
def test_study_area_empty_queryset(self):
dummy_journal = self.mocker.mock()
dummy_study_area = self.mocker.mock()
dummy_journal.study_areas
self.mocker.result(dummy_study_area)
dummy_study_area.all()
self.mocker.result([])
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
self.assertEqual(journalstd.study_area, '')
def test_medline_title_is_the_journal_medline_title(self):
dummy_journal = self.mocker.mock()
dummy_journal.medline_title
self.mocker.result('spam')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
title = journalstd.medline_title
self.assertEqual(title, u'spam')
self.assertIsInstance(title, unicode)
def test_medline_code_is_the_journal_medline_code(self):
dummy_journal = self.mocker.mock()
dummy_journal.medline_code
self.mocker.result('123456789')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
code = journalstd.medline_code
self.assertEqual(code, u'123456789')
self.assertIsInstance(code, unicode)
def test_pissn_is_the_journal_print_issn(self):
dummy_journal = self.mocker.mock()
dummy_journal.print_issn
self.mocker.result('1234-1234')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
pissn = journalstd.pissn
self.assertEqual(pissn, u'1234-1234')
self.assertIsInstance(pissn, unicode)
def test_pissn_is_the_journal_electronic_issn(self):
dummy_journal = self.mocker.mock()
dummy_journal.eletronic_issn
self.mocker.result('1234-1234')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
eissn = journalstd.eissn
self.assertEqual(eissn, u'1234-1234')
self.assertIsInstance(eissn, unicode)
def test_publisher_is_the_journal_publisher(self):
dummy_journal = self.mocker.mock()
dummy_journal.publisher_name
self.mocker.result('foo')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
publisher = journalstd.publisher
self.assertEqual(publisher, u'foo')
self.assertIsInstance(publisher, unicode)
def test_title_is_the_journal_title(self):
dummy_journal = self.mocker.mock()
dummy_journal.title
self.mocker.result(u'foo')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
title = journalstd.title
self.assertEqual(title, u'foo')
self.assertIsInstance(title, unicode)
def test_journal_meta(self):
dummy_journal = self.mocker.mock()
dummy_study_area = self.mocker.mock()
dummy_journal.title
self.mocker.result(u'foo')
self.mocker.count(1)
dummy_journal.short_title
self.mocker.result(u'foo')
self.mocker.count(1)
dummy_journal.editorial_standard
self.mocker.result('apa')
self.mocker.count(2)
dummy_journal.scielo_issn
self.mocker.result('electronic')
self.mocker.count(3)
dummy_journal.eletronic_issn
self.mocker.result('1234-1234')
self.mocker.count(3)
dummy_journal.study_areas
self.mocker.result(dummy_study_area)
dummy_study_area.all()
self.mocker.result([dummy_study_area for i in range(5)])
dummy_study_area.study_area
self.mocker.result('bar')
self.mocker.count(5)
dummy_journal.medline_title
self.mocker.result('spam')
dummy_journal.medline_code
self.mocker.result('123456789')
dummy_journal.acronym
self.mocker.result('foo')
dummy_journal.print_issn
self.mocker.result('1234-123X')
dummy_journal.publisher_name
self.mocker.result('fizz')
self.mocker.replay()
journalstd = self._makeOne(dummy_journal)
journal_meta = journalstd.journal_meta
expected_journal_meta = u"""
1234-1234#foo#apa#epub#1234-1234#bar/bar/bar/bar/bar#spam#123456789#foo#foo#1234-123X#1234-1234#fizz
""".strip()
self.assertEqual(journal_meta, expected_journal_meta)
self.assertIsInstance(journal_meta, unicode)
|
|
from physicsTable import *
from operator import itemgetter
import sys
import geometry
import numpy as np
# Constants for ease
L = 101
R = -101
T = 103
B = -103
# Wall class for various operations
class Wall(object):
def __init__(self, p1, p2):
self.l = p1[0]
self.t = p1[1]
self.r = p2[0]
self.b = p2[1]
self.grouped = False
self.is_touching = []
# Returns boolean if two walls are touching
# NOTE: Only works for auto-generated trials where pixels are aligned
def touches(self, other):
# Do they touch on the side?
if self.r == other.l or self.l == other.r:
# Make sure that there is some overlap (e.g., the bottom of other is not above the top, or top of other not below the bottom)
return not (self.t > other.b or self.b < other.t)
# Do they touch on the top or bottom?
elif self.t == other.b or self.b == other.t:
# Make sure there is some overlap
return not (self.r < other.l or self.l > other.r)
else:
return False
# Figures out all touching walls and adds them to internal state
def get_touch_indices(self, others):
tidxs = [i for i in range(len(others)) if self.touches(others[i])]
self.is_touching = [others[i] for i in tidxs]
return tidxs
# Determines whether a point touches any side
def touches_wall(self, point):
return geometry.point_on_line(point, [self.l, self.t], [self.r, self.t]) or \
geometry.point_on_line(point, [self.r, self.t], [self.r, self.b]) or \
geometry.point_on_line(point, [self.r, self.b], [self.l, self.b]) or \
geometry.point_on_line(point, [self.l, self.b], [self.l, self.t])
def touches_top_wall(self, point):
return geometry.point_on_line(point, [self.l, self.t], [self.r, self.t])
# From a given point, traverses clockwise on the inside to collect points
def get_next_point_and_wall_and_dir(self, lastpt, dir):
# Traveling along the bottom wall
if dir == B:
# Sort touching walls from left to right
walls = sort_by_direction(self.is_touching, L)
for w in walls:
# Skip anything to the left of the last point
if w.l > lastpt[0]:
# The next wall is under the current one - this wall's top left is new, go down along the left wall
if w.t == self.b:
return (w.l, w.t), w, L
# The next wall is adjacent to this one and continues along the bottom
elif (self.r, self.b) == (w.l, w.b):
# Check along the bottom of this wall
return w.get_next_point_and_wall_and_dir((w.l, w.b), B)
# The next wall is to the right of this one
elif w.l == self.r:
return (self.r, self.b), w, L
return (self.r, self.b), self, R
# Traveling along the left wall
elif dir == L:
# Sort touching walls from top to bottom
walls = sort_by_direction(self.is_touching, T)
for w in walls:
# Skip anything above the last point
if w.t > lastpt[1]:
# The next wall is to the left of thecurrent one
if w.r == self.l:
return (w.r, w.t), w, T
# The next wall is adjacent and continues along the left
elif (self.l, self.b) == (w.l, w.t):
return w.get_next_point_and_wall_and_dir((w.l, w.t), L)
# The next wall is below the current one
elif w.t == self.b:
return (self.l, self.b), w, T
return (self.l, self.b), self, B
# Traveling along the top wall
elif dir == T:
# Sort touching walls from right to left
walls = sort_by_direction(self.is_touching, R)
for w in walls:
# Skip anything to the right of the last point
if w.r < lastpt[0]:
# The next wall is above the current one
if w.b == self.t:
return (w.r, w.b), w, R
# The next wall is adjancent and continues along the top
elif (self.l, self.t) == (w.r, w.t):
return w.get_next_point_and_wall_and_dir((w.r, w.t), T)
# The next wall is to the left of this one
elif w.r == self.l:
return (self.l, self.t), w, R
return (self.l, self.t), self, L
# Traveling along the right wall
elif dir == R:
walls = sort_by_direction(self.is_touching, B)
for w in walls:
# Skip anything below
if w.b < lastpt[1]:
# The next wall is to the right of the current one
if w.l == self.r:
return (w.l, w.b), w, B
# The next wall is adjancent and continues along the right
elif (self.r, self.t) == (w.r, w.b):
return w.get_next_point_and_wall_and_dir((w.r, w.b), R)
# The next wall is above this one
elif w.b == self.t:
return (self.r, self.t), w, B
return (self.r, self.t), self, T
def get_next_outer_point_and_wall_and_dir(self, lastpt, dir):
# Traveling along the bottom wall
if dir == B:
# Sort touching walls from right to left
walls = sort_by_outside_direction(self.is_touching, R)
for w in walls:
# Skip anything to the right of the last point
if w.r < lastpt[0]:
# The next wall is under the current one - this wall's top left is new, go down along the right wall
if w.t == self.b:
return (w.r, w.t), w, R
# The next wall is adjacent to this one and continues along the bottom
elif (self.l, self.b) == (w.r, w.b):
# Check along the bottom of this wall
return w.get_next_outer_point_and_wall_and_dir((w.r, w.b), B)
# The next wall is to the left of this one
elif w.r == self.l:
return (self.l, self.b), w, R
return (self.l, self.b), self, L
# Traveling along the left wall
elif dir == L:
# Sort touching walls from bottom to top
walls = sort_by_outside_direction(self.is_touching, B)
for w in walls:
# Skip anything below the last point
if w.b < lastpt[1]:
# The next wall is to the left of thecurrent one
if w.r == self.l:
return (w.r, w.b), w, B
# The next wall is adjacent and continues along the left
elif (self.l, self.t) == (w.l, w.b):
return w.get_next_outer_point_and_wall_and_dir((w.l, w.b), L)
# The next wall is above the current one
elif w.b == self.t:
return (self.l, self.t), w, B
return (self.l, self.t), self, T
# Traveling along the top wall
elif dir == T:
# Sort touching walls from left to right
walls = sort_by_outside_direction(self.is_touching, L)
for w in walls:
# Skip anything to the left of the last point
if w.l > lastpt[0]:
# The next wall is above the current one
if w.b == self.t:
return (w.l, w.b), w, L
# The next wall is adjancent and continues along the top
elif (self.r, self.t) == (w.l, w.t):
return w.get_next_outer_point_and_wall_and_dir((w.l, w.t), T)
# The next wall is to the right of this one
elif w.l == self.r:
return (self.r, self.t), w, L
return (self.r, self.t), self, R
# Traveling along the right wall
elif dir == R:
walls = sort_by_outside_direction(self.is_touching, T)
for w in walls:
# Skip anything above
if w.t > lastpt[1]:
# The next wall is to the right of the current one
if w.l == self.r:
return (w.l, w.t), w, T
# The next wall is adjancent and continues along the right
elif (self.r, self.b) == (w.r, w.t):
return w.get_next_outer_point_and_wall_and_dir((w.r, w.t), R)
# The next wall is below this one
elif w.t == self.b:
return (self.r, self.b), w, T
return (self.r, self.b), self, B
def to_pg_rect(self):
w = self.r - self.l
h = self.b - self.t
return pg.Rect((self.l, self.t), (w,h))
# Converts trial into a set of wall rectangles [right, top, left, bottom]
def convert_trial_2_wallrects(trial):
return [Wall(w[0],w[1]) for w in trial.normwalls]
def get_topleft_wall(walls):
best_idx = 0
most_top = walls[0].t
most_left = walls[0].l
for i in range(1,len(walls)):
w = walls[i]
if w.t < most_top or \
(w.t == most_top and w.l < most_left):
best_idx = i
most_top = w.t
most_left = w.l
return best_idx, walls[best_idx]
def get_topright_wall(walls):
best_idx = 0
most_top = walls[0].t
most_right = walls[0].r
for i in range(1, len(walls)):
w = walls[i]
if w.t < most_top or \
(w.t == most_top and w.r > most_right):
best_idx = i
most_top = w.t
most_right = w.r
return best_idx, walls[best_idx]
def sort_by_direction(walls, dir):
if dir == L:
pfnc = lambda x: (x.l, -x.b)
elif dir == R:
pfnc = lambda x: (-x.r, x.t)
elif dir == T:
pfnc = lambda x: (x.t, x.l)
elif dir == B:
pfnc = lambda x: (-x.b, -x.r)
vals = zip(map(pfnc, walls), walls)
vsort = sorted(vals, key=itemgetter(0))
return [w for _, w in vsort]
def sort_by_outside_direction(walls, dir):
if dir == L:
pfnc = lambda x: (x.l, x.t)
elif dir == R:
pfnc = lambda x: (-x.r, -x.b)
elif dir == T:
pfnc = lambda x: (x.t, -x.r)
elif dir == B:
pfnc = lambda x: (-x.b, x.l)
vals = zip(map(pfnc, walls), walls)
vsort = sorted(vals, key=itemgetter(0))
return [w for _, w in vsort]
# Takes in a list of Walls, returns a list including:
# [
# A list of points that form the inner hull,
# The walls used to form that hull
# ]
def get_inner_hull(walls):
# Start with the upper-left wall
tl_idx, tl_wall = get_topleft_wall(walls)
tl_wall.grouped = True
inc_walls = [tl_wall]
def add_walls(w1, others):
tidxs = w1.get_touch_indices(others)
for i in tidxs:
ow = others[i]
if not ow.grouped:
inc_walls.append(ow)
ow.grouped = True
add_walls(ow, others)
return
add_walls(tl_wall, walls)
getting_first = True
not_goods = []
while getting_first:
getting_first = False
chkwalls = [w for w in tl_wall.is_touching if w not in not_goods]
if len(chkwalls) == 0:
# Edge case where we're finding some internal walls
cur_wall = tl_wall
cur_pt = (cur_wall.r, cur_wall.b)
cur_traverse = R
else:
_, cur_wall = get_topright_wall(chkwalls)
cur_pt = (cur_wall.l, cur_wall.b)
# Check that there is not a wall below this point
for w in cur_wall.is_touching:
if w != tl_wall and w.touches_top_wall(cur_pt) and not getting_first:
#wait_4_kp(lambda: draw_everything(tb, [tl_wall, cur_wall], [cur_pt]))
not_goods.append(tl_wall)
not_goods.append(cur_wall)
tl_wall = w
getting_first = True
cur_traverse = B
inner_pts = [cur_pt]
running = True
while running:
cur_pt, cur_wall, cur_traverse = cur_wall.get_next_point_and_wall_and_dir(cur_pt, cur_traverse)
if cur_pt == inner_pts[0]:
running = False
else:
inner_pts.append(cur_pt)
#wait_4_kp(lambda: draw_everything(tb, walls, inner_pts))
return inner_pts, inc_walls
def get_outer_hull(wall_list):
_, cur_wall = get_topleft_wall(wall_list)
cur_pt = (cur_wall.l, cur_wall.t)
cur_traverse = T
outer_pts = [cur_pt]
while True:
cur_pt, cur_wall, cur_traverse = cur_wall.get_next_outer_point_and_wall_and_dir(cur_pt, cur_traverse)
if cur_pt == outer_pts[0]:
return outer_pts
else:
outer_pts.append(cur_pt)
def get_islands(rem_walls):
islands = []
while len(rem_walls) > 0:
tl_idx, tl_wall = get_topleft_wall(rem_walls)
tl_wall.grouped = True
this_island = [tl_wall]
def add_walls(w1, others):
tidxs = w1.get_touch_indices(others)
for i in tidxs:
ow = others[i]
if not ow.grouped:
this_island.append(ow)
ow.grouped = True
add_walls(ow, others)
return
add_walls(tl_wall, rem_walls)
islands.append(this_island)
rem_walls = [w for w in rem_walls if w not in this_island]
island_pts = [get_outer_hull(wl) for wl in islands]
return island_pts, islands
# Takes in a trial and produces the list of triangles that make up its inner container.
class Triangulation(object):
def __init__(self, trial):
self._trial = trial
self._walls = convert_trial_2_wallrects(self._trial)
inner_pts, inc_walls = get_inner_hull(self._walls)
rem_walls = [w for w in self._walls if w not in inc_walls]
islands, _ = get_islands(rem_walls)
self._init_tris, self._wound_hull = geometry.ear_clip_with_holes(inner_pts, islands)
self._has_all_tris = False
self._all_tris = [self._init_tris]
def make_all_triangulations(self):
if not self._has_all_tris:
for i in range(1, len(self._wound_hull)):
#if i == 5:
# print 'here'
# newtri = ear_clip(self._wound_hull[i:] + self._wound_hull[:i])
newtri = geometry.ear_clip(self._wound_hull[i:] + self._wound_hull[:i])
self._all_tris.append(newtri)
self._has_all_tris = True
def make_graph(self):
pass
def triangulation(self, index = 0):
if index >0 and not self._has_all_tris:
print "Cannot get triangle by index without make_all_triangulations"
index = 0
return self._all_tris[index]
def get_n_tris(self):
return len(self._all_tris)
n_triangles = property(get_n_tris)
class ACD(object):
def __init__(self, trial, convexity_limit = 10):
self._trial = trial
self._walls = convert_trial_2_wallrects(self._trial)
self._clim = convexity_limit
inner_pts, inc_walls = get_inner_hull(self._walls)
rem_walls = [w for w in self._walls if w not in inc_walls]
islands, _ = get_islands(rem_walls)
outer_shell = map(np.array, inner_pts)
polys = [outer_shell] + [map(np.array, pts) for pts in islands]
self._acd = geometry.approximate_convex_decomposition(polys, self._clim)
#self._acd = _acd_outer(outer_shell, self._clim, geometry._sl_concavity)
# TEMPORARY
from geometry import _find_cut_heur, _find_witness
def _acd_outer(vertices, tau, witness_fnc):
d, witness, pocket = _find_witness(vertices, witness_fnc)
if d < tau:
return [vertices] # We're below threshold or it's already convex
cut_v = _find_cut_heur(witness, vertices, pocket)
poly_1 = []
poly_2 = []
vidx = 0
on_poly_2 = False
for vidx in range(len(vertices)):
this_v = vertices[vidx]
# Are we at a cut point? If so, add to both polygons
if all(this_v == witness) or all(this_v == cut_v):
poly_1.append(this_v)
poly_2.append(this_v)
on_poly_2 = not on_poly_2
else:
if on_poly_2:
poly_2.append(this_v)
else:
poly_1.append(this_v)
wait_4_kp(lambda: draw_polys(tb, [poly_1, poly_2]))
return _acd_outer(poly_1, tau, witness_fnc) + _acd_outer(poly_2, tau, witness_fnc)
def trianglulate_trial(trial, do_all_triangulations = False):
walls = convert_trial_2_wallrects(trial)
inner_pts, inc_walls = get_inner_hull(walls)
rem_walls = [w for w in walls if w not in inc_walls]
islands, _ = get_islands(rem_walls)
init_tris, wound_hull = geometry.ear_clip_with_holes(inner_pts, islands)
if do_all_triangulations:
tris = [init_tris]
for i in range(1, len(wound_hull)):
tris.append(geometry.ear_clip(wound_hull[i:] + wound_hull[:i]))
return tris
else:
return init_tris
if __name__ == '__main__':
# If running directly, use PyGame -- otherwise allow loading without requiring these libraries
import pygame as pg
from pygame.constants import *
def draw_wall_outlines(surf, walls, color=(255, 0, 0)):
for w in walls:
r = w.to_pg_rect()
pg.draw.rect(surf, color, r, 2)
return surf
def draw_everything(table, wall_list=[], ptlist=[], tris = [], ptcol=(0, 255, 0), tricol=(255,0,0)):
s = table.draw()
draw_wall_outlines(s, wall_list)
for p in ptlist:
pg.draw.circle(s, ptcol, p, 4)
for t in tris:
pg.draw.polygon(s, tricol, t, 2)
def draw_polys(table, poly_list=[], ptlist=[], poly_col=(0,255,0), pt_col=(255,0,0)):
s = table.draw()
for p in poly_list:
pg.draw.polygon(s, poly_col, p, 2)
for p in ptlist:
pg.draw.circle(s, pt_col, p, 4)
def wait_4_kp(draw_fn, hz=20):
clk = pg.time.Clock()
while True:
draw_fn()
pg.display.flip()
for e in pg.event.get():
if e.type == KEYDOWN:
return
clk.tick(hz)
# Load in the trial
if len(sys.argv) == 1:
tr = loadTrial('automatic_generator/samp.ptr')
else:
tr = loadTrialFromJSON(sys.argv[1])
pg.init()
s = pg.display.set_mode((1000, 620))
tb = tr.makeTable()
#from numpy import array
#p1 = [array([79, 78]), array([493, 78]), array([493, 114]), array([678, 114]), array([678, 163]), array([928, 163]), array([928, 269]), array([934, 269]), array([934, 491]), array([974, 491]), array([974, 596]), array([909, 596]), array([909, 560]), array([663, 560]), array([663, 491]), array([699, 491]), array([699, 401]), array([656, 469]), array([588, 469]), array([588, 401]), array([551, 401]), array([551, 365]), array([438, 365]), array([438, 431]), array([153, 431]), array([153, 258]), array([ 79, 258])]
#p2 = [array([699,401]), array([656,401]), array([656,469])]
#wait_4_kp(lambda: draw_polys(tb, [p2]))
acd = ACD(tr, 20)
for cd in acd._acd:
print cd
wait_4_kp(lambda: draw_polys(tb, acd._acd))
#triang = Triangulation(tr)
#triang.make_all_triangulations()
#for i in range(triang.n_triangles):
# wait_4_kp(lambda: draw_everything(tb, [], [], triang.triangulation(i)))
|
|
# Copyright 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import jsonschema
from mock import MagicMock
from mock import Mock
from mock import patch
from testtools import TestCase
from testtools.matchers import Is, Equals
from trove.cluster import models
from trove.cluster.models import Cluster, DBCluster
from trove.cluster.service import ClusterController
from trove.cluster.tasks import ClusterTasks
from trove.cluster import views
import trove.common.cfg as cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.tests.unittests import trove_testtools
class TestClusterController(TestCase):
def setUp(self):
super(TestClusterController, self).setUp()
self.controller = ClusterController()
instances = [
{
"flavorRef": "7",
"volume": {
"size": 1
},
"availability_zone": "az",
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
}
] * 5
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": instances
}
}
self.add_shard = {
"add_shard": {}
}
def test_get_schema_create(self):
schema = self.controller.get_schema('create', self.cluster)
self.assertIsNotNone(schema)
self.assertTrue('cluster' in schema['properties'])
self.assertTrue('cluster')
def test_get_schema_action_add_shard(self):
schema = self.controller.get_schema('add_shard', self.add_shard)
self.assertIsNotNone(schema)
self.assertTrue('add_shard' in schema['properties'])
def test_validate_create(self):
body = self.cluster
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_add_shard(self):
body = self.add_shard
schema = self.controller.get_schema('add_shard', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_blankname(self):
body = self.cluster
body['cluster']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_blank_datastore(self):
body = self.cluster
body['cluster']['datastore']['type'] = ""
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(2))
self.assertIn("'' is too short", error_messages)
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
self.assertIn("type", error_paths)
@patch.object(Cluster, 'create')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters_disabled(self,
mock_get_datastore_version,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mysql'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.ClusterDatastoreNotSupported,
self.controller.create,
req,
body,
tenant_id)
@patch.object(Cluster, 'create')
@patch.object(utils, 'get_id_from_href')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters(self,
mock_get_datastore_version,
mock_id_from_href,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
datastore = Mock()
mock_get_datastore_version.return_value = (datastore,
datastore_version)
instances = [
{
'volume_size': 1,
'volume_type': None,
'flavor_id': '1234',
'availability_zone': 'az',
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
}
] * 5
mock_id_from_href.return_value = '1234'
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
mock_cluster_create.assert_called_with(context, 'products',
datastore, datastore_version,
instances, {})
@patch.object(Cluster, 'load')
def test_show_cluster(self,
mock_cluster_load):
tenant_id = Mock()
id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_load.return_value = mock_cluster
self.controller.show(req, tenant_id, id)
mock_cluster_load.assert_called_with(context, id)
@patch.object(Cluster, 'load')
@patch.object(Cluster, 'load_instance')
def test_show_cluster_instance(self,
mock_cluster_load_instance,
mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
instance_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
cluster = Mock()
mock_cluster_load.return_value = cluster
cluster.id = cluster_id
self.controller.show_instance(req, tenant_id, cluster_id, instance_id)
mock_cluster_load_instance.assert_called_with(context, cluster.id,
instance_id)
@patch.object(Cluster, 'load')
def test_delete_cluster(self, mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
req = MagicMock()
cluster = Mock()
trove_testtools.patch_notifier(self)
mock_cluster_load.return_value = cluster
self.controller.delete(req, tenant_id, cluster_id)
cluster.delete.assert_called_with()
class TestClusterControllerWithStrategy(TestCase):
def setUp(self):
super(TestClusterControllerWithStrategy, self).setUp()
self.controller = ClusterController()
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": [
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
}
]
}
}
def tearDown(self):
super(TestClusterControllerWithStrategy, self).tearDown()
cfg.CONF.clear_override('cluster_support', group='mongodb')
cfg.CONF.clear_override('api_strategy', group='mongodb')
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_disabled(self,
mock_cluster_create,
mock_get_datastore_version):
cfg.CONF.set_override('cluster_support', False, group='mongodb',
enforce_type=True)
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.TroveError, self.controller.create, req,
body, tenant_id)
@patch.object(views.ClusterView, 'data', return_value={})
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_enabled(self,
mock_cluster_create,
mock_get_datastore_version,
mock_cluster_view_data):
cfg.CONF.set_override('cluster_support', True, group='mongodb',
enforce_type=True)
body = self.cluster
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
mock_cluster = Mock()
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_multi_action(self,
mock_cluster_load):
body = {'do_stuff': {}, 'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.assertRaisesRegexp(exception.TroveError,
'should have exactly one action specified',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_no_strategy(self,
mock_cluster_load):
body = {'do_stuff2': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
db_info = DBCluster(ClusterTasks.NONE, id=cluster_id,
tenant_id=tenant_id)
cluster = Cluster(context, db_info, datastore='test_ds',
datastore_version='test_dsv')
mock_cluster_load.return_value = cluster
self.assertRaisesRegexp(exception.TroveError,
'Action do_stuff2 not supported',
self.controller.action, req,
body, tenant_id, cluster_id)
@patch.object(strategy, 'load_api_strategy')
@patch.object(models.Cluster, 'load')
def test_controller_action_found(self,
mock_cluster_load,
mock_cluster_api_strategy):
body = {'grow': {}}
tenant_id = Mock()
context = trove_testtools.TroveTestContext(self)
cluster_id = 'test_uuid'
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.instances_without_server = [Mock()]
cluster.datastore_version.manager = 'test_dsv'
mock_cluster_load.return_value = cluster
self.controller.action(req, body, tenant_id, cluster_id)
self.assertEqual(1, cluster.action.call_count)
|
|
import io
from pathlib import Path
from typing import List, Dict, Any, Union, Generator
import urllib.request as request
import urllib.parse as urlparse
import zlib
from os.path import splitext
import pickle
import fnmatch
"""
Copyright 2017 David B. Bracewell
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Resource(object):
"""
Abstraction of a resource (e.g. file, string, url) that can be read from or written to.
"""
def reader(self, params=None):
"""
Opens the resource in read mode
:param params: parameters to be used in the open (specific to each implementation)
:return: a stream that can be read from
"""
raise NotImplementedError
def writer(self, params=None):
"""
Opens the resource in write mode
:param params: parameters to be used in the open (specific to each implementation)
:return: a stream that can be written to
"""
raise NotImplementedError
def read(self, params=None) -> str:
"""
Reads the resource into a string
:param params: parameters to be used in the open (specific to each implementation)
:return: the string contents of the resource
"""
params = self._mkparams(params)
encoding = params["encoding"] if "encoding" in params else "utf-8"
compression = params["compress"] if "compress" in params else False
if compression:
params["mode"] = "rb"
with self.reader(params) as reader:
if compression:
return zlib.decompress(reader.read()).decode(encoding)
return reader.read()
def readlines(self, params=None) -> List[str]:
"""
Reads the resource line by line into a list of strings
:param params: parameters to be used in the open (specific to each implementation)
:return: the string contents of the resource
"""
return self.read(params).splitlines()
def lines(self, params=None):
with self.reader(params) as rdr:
for line in rdr:
yield line
def write(self, content: str, params=None) -> None:
"""
Writes the given content to the resource
:param content: The content to write
:param params: parameters to be used in the open (specific to each implementation)
:return: None
"""
encoding = params["encoding"] if "encoding" in params else "utf-8"
compression = params["compress"] if "compress" in params else False
if compression:
params["mode"] = "wb"
with self.writer(params) as writer:
if compression:
writer.write(zlib.compress(content.encode(encoding)))
else:
writer.write(content)
def descriptor(self) -> str:
raise NotImplementedError
def __str__(self) -> str:
return self.descriptor()
def __repr__(self) -> str:
return self.descriptor()
def __iter__(self):
with self.reader() as rdr:
for line in rdr:
yield line
def path(self) -> str:
"""
:return: The path of the resource
"""
return ""
def ext(self) -> str:
"""
:return: the extension of the file
"""
return ""
def basename(self) -> str:
"""
:return: the basename of the file
"""
return ""
def children(self, recursive: bool = False, pattern='*.*', include_dirs=True):
"""
:return: child resources
"""
return []
def child(self, subpath: str) -> 'Resource':
"""
Gets a resource relative to this one
:param subpath: The subpath
:return: A resource relative to this one
"""
return self
def parent(self) -> 'Resource':
"""
Gets the parent (resource) (i.e. one level up)
:return: The parent resource
"""
return self
def is_dir(self) -> bool:
"""
:return: True if this resource represents a directory
"""
return False
def exists(self) -> bool:
"""
:return: True if the resource exists
"""
return True
def mkdirs(self) -> None:
"""
Makes all directories down to and including this one
:return: None
"""
pass
def write_object(self, object):
with self.writer({"mode": "wb"}) as w:
w.write(zlib.compress(pickle.dumps(object)))
def read_object(self):
with self.reader({"mode": "rb"}) as r:
return pickle.load(zlib.decompress(r.read()))
def _mkparams(self, params: Dict[str, Any]):
if params is None:
return {}
return dict([(k.lower(), v) for k, v in params.items()])
class FileResource(Resource):
"""
Wraps local file based resource
"""
def __init__(self, location):
if isinstance(location, Path):
self._location = location
else:
self._location = Path(location)
def reader(self, params=None):
params = self._mkparams(params)
encoding = params["encoding"] if "encoding" in params else "utf-8"
mode = params["mode"] if "mode" in params else "r"
if "b" in mode:
return self._location.open(mode)
return self._location.open(encoding=encoding)
def children(self, recursive: bool = False, pattern: str = '*.*', include_dirs=True):
for f in self._location.iterdir():
try:
r = FileResource(f)
if r.is_dir():
if include_dirs and fnmatch.fnmatch(r.path(), pattern):
yield r
if recursive:
for cc in r.children(recursive):
yield cc
elif fnmatch.fnmatch(r.path(), pattern):
yield r
except OSError:
continue
def writer(self, params=None):
params = self._mkparams(params)
encoding = params["encoding"] if "encoding" in params else "utf-8"
mode = params["mode"] if "mode" in params else "w"
if "b" in mode:
return self._location.open(mode)
return self._location.open(mode, encoding=encoding)
def path(self) -> str:
return str(self._location.absolute())
def child(self, subpath: str) -> 'Resource':
return FileResource(self._location / subpath)
def parent(self) -> 'Resource':
return FileResource(self._location.parent)
def is_dir(self) -> bool:
return self._location.is_dir()
def exists(self) -> bool:
return self._location.exists()
def mkdirs(self) -> None:
self._location.mkdir()
def ext(self) -> str:
return self._location.suffix
def basename(self) -> str:
return self._location.name
def descriptor(self) -> str:
return self.path()
class StringIOWrapper(io.StringIO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bset = None
def set_buffer(self, buffer):
self.bset = buffer
def close(self, *args, **kwargs):
self.bset(self.getvalue())
return super().close(*args, **kwargs)
class StringResource(Resource):
"""
Wraps a string as a resource
"""
def __init__(self, string=""):
self._buffer = string
def reader(self, params=None):
return io.StringIO(self._buffer)
def __set_buffer(self, x):
self._buffer = x
def writer(self, params=None):
w = StringIOWrapper()
w.set_buffer(self.__set_buffer)
return w
def write(self, content: str, params=None) -> None:
with self.writer() as writer:
writer.write(content)
self._buffer = writer.getvalue()
def read(self, params=None) -> str:
if isinstance(self._buffer, bytes):
params = self._mkparams(params)
encoding = params["encoding"] if "encoding" in params else "utf-8"
return self._buffer.decode(encoding)
return self._buffer
def path(self) -> str:
return "string:"
def descriptor(self) -> str:
return "string:{}".format(self._buffer)
def write_object(self, object):
self._buffer = pickle.dumps(object)
def read_object(self):
return pickle.loads(self._buffer)
class UrlResource(Resource):
"""
Wraps a url as resource
"""
def writer(self, params=None):
raise Exception("URL not writable")
def __init__(self, url):
self._url = url
def reader(self, params=None):
params = self._mkparams(params)
timeout = params["timeout"] if "timeout" in params else 1000
data = params["data"] if "data" in params else None
return request.urlopen(self._url, timeout=timeout, data=data)
def child(self, subpath: str) -> 'Resource':
return UrlResource(request.urljoin(self._url, subpath))
def parent(self) -> 'Resource':
up = [x for x in urlparse.urlsplit(self._url)]
p = Path(up[2]).parent
up[2] = str(p)
return UrlResource(urlparse.urlunsplit(up))
def path(self) -> str:
return self._url
def ext(self) -> str:
return splitext(self._url)[1]
def basename(self) -> str:
return Path(request.urlsplit(self._url)[2]).name
def descriptor(self) -> str:
return self.path()
__resource_creator = {
"string:": lambda x: StringResource("" if x == "string:" else x[len("string:"):]),
"http:": lambda x: UrlResource(x),
"https:": lambda x: UrlResource(x)
}
def resource(path: Union[str, Resource]) -> 'Resource':
if isinstance(path, Resource):
return path
for key in __resource_creator.keys():
if path.startswith(key):
return __resource_creator[key](path)
return FileResource(path)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import mimetypes
import operator
import os
import parquet
import posixpath
import re
import shutil
import stat as stat_module
import urllib
from datetime import datetime
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from django.template.defaultfilters import stringformat, filesizeformat
from django.http import Http404, HttpResponse, HttpResponseNotModified
from django.views.decorators.http import require_http_methods
from django.views.static import was_modified_since
from django.shortcuts import redirect
from django.utils.functional import curry
from django.utils.http import http_date
from django.utils.html import escape
from django.utils.translation import ugettext as _
from cStringIO import StringIO
from gzip import GzipFile
from avro import datafile, io
from aws.s3.s3fs import S3FileSystemException
from desktop import appmanager
from desktop.lib import i18n, paginator
from desktop.lib.conf import coerce_bool
from desktop.lib.django_util import make_absolute, render, format_preserving_redirect
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.fs import splitpath
from hadoop.fs.hadoopfs import Hdfs
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.fsutils import do_overwrite_save
from filebrowser.conf import MAX_SNAPPY_DECOMPRESSION_SIZE
from filebrowser.conf import SHOW_DOWNLOAD_BUTTON
from filebrowser.conf import SHOW_UPLOAD_BUTTON
from filebrowser.lib.archives import archive_factory
from filebrowser.lib.rwx import filetype, rwx
from filebrowser.lib import xxd
from filebrowser.forms import RenameForm, UploadFileForm, UploadArchiveForm, MkDirForm, EditorForm, TouchForm,\
RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet, RestoreFormSet,\
TrashPurgeForm
DEFAULT_CHUNK_SIZE_BYTES = 1024 * 4 # 4KB
MAX_CHUNK_SIZE_BYTES = 1024 * 1024 # 1MB
DOWNLOAD_CHUNK_SIZE = 64 * 1024 * 1024 # 64MB
# Defaults for "xxd"-style output.
# Sentences refer to groups of bytes printed together, within a line.
BYTES_PER_LINE = 16
BYTES_PER_SENTENCE = 2
# The maximum size the file editor will allow you to edit
MAX_FILEEDITOR_SIZE = 256 * 1024
INLINE_DISPLAY_MIMETYPE = re.compile('video/|image/|audio/|application/pdf|application/msword|application/excel|'
'application/vnd\.ms|'
'application/vnd\.openxmlformats')
logger = logging.getLogger(__name__)
class ParquetOptions(object):
def __init__(self, col=None, format='json', no_headers=True, limit=-1):
self.col = col
self.format = format
self.no_headers = no_headers
self.limit = limit
def index(request):
# Redirect to home directory by default
path = request.user.get_home_directory()
try:
if not request.fs.isdir(path):
path = '/'
except Exception:
pass
return view(request, path)
def _file_reader(fh):
"""Generator that reads a file, chunk-by-chunk."""
while True:
chunk = fh.read(DOWNLOAD_CHUNK_SIZE)
if chunk == '':
fh.close()
break
yield chunk
def download(request, path):
"""
Downloads a file.
This is inspired by django.views.static.serve.
?disposition={attachment, inline}
"""
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s.") % {'path': escape(path)})
if not request.fs.isfile(path):
raise PopupException(_("'%(path)s' is not a file.") % {'path': path})
content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
stats = request.fs.stats(path)
mtime = stats['mtime']
size = stats['size']
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), mtime, size):
return HttpResponseNotModified()
# TODO(philip): Ideally a with statement would protect from leaks,
# but tricky to do here.
fh = request.fs.open(path)
response = HttpResponse(_file_reader(fh), content_type=content_type)
response["Last-Modified"] = http_date(stats['mtime'])
response["Content-Length"] = stats['size']
response['Content-Disposition'] = request.GET.get('disposition', 'attachment')
return response
def view(request, path):
"""Dispatches viewing of a path to either index() or fileview(), depending on type."""
# default_to_home is set in bootstrap.js
if 'default_to_home' in request.GET:
home_dir_path = request.user.get_home_directory()
if request.fs.isdir(home_dir_path):
return format_preserving_redirect(request, reverse(view, kwargs=dict(path=home_dir_path)))
# default_to_home is set in bootstrap.js
if 'default_to_trash' in request.GET:
home_trash = request.fs.join(request.fs.trash_path, 'Current', request.user.get_home_directory()[1:])
if request.fs.isdir(home_trash):
return format_preserving_redirect(request, reverse(view, kwargs=dict(path=home_trash)))
if request.fs.isdir(request.fs.trash_path):
return format_preserving_redirect(request, reverse(view, kwargs=dict(path=request.fs.trash_path)))
try:
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
stats = request.fs.stats(path)
if stats.isDir:
return listdir_paged(request, path)
else:
return display(request, path)
except (IOError, WebHdfsException), e:
msg = _("Cannot access: %(path)s. ") % {'path': escape(path)}
if "Connection refused" in e.message:
msg += _(" The HDFS REST service is not available. ")
if request.user.is_superuser and not _is_hdfs_superuser(request):
msg += _(' Note: you are a Hue admin but not a HDFS superuser, "%(superuser)s" or part of HDFS supergroup, "%(supergroup)s".') \
% {'superuser': request.fs.superuser, 'supergroup': request.fs.supergroup}
if request.is_ajax():
exception = {
'error': msg
}
return JsonResponse(exception)
else:
raise PopupException(msg , detail=e)
def home_relative_view(request, path):
home_dir_path = request.user.get_home_directory()
if request.fs.exists(home_dir_path):
path = '%s%s' % (home_dir_path, path)
return view(request, path)
def edit(request, path, form=None):
"""Shows an edit form for the given path. Path does not necessarily have to exist."""
try:
stats = request.fs.stats(path)
except IOError, ioe:
# A file not found is OK, otherwise re-raise
if ioe.errno == errno.ENOENT:
stats = None
else:
raise
# Can't edit a directory
if stats and stats['mode'] & stat_module.S_IFDIR:
raise PopupException(_("Cannot edit a directory: %(path)s") % {'path': path})
# Maximum size of edit
if stats and stats['size'] > MAX_FILEEDITOR_SIZE:
raise PopupException(_("File too big to edit: %(path)s") % {'path': path})
if not form:
encoding = request.REQUEST.get('encoding') or i18n.get_site_encoding()
if stats:
f = request.fs.open(path)
try:
try:
current_contents = unicode(f.read(), encoding)
except UnicodeDecodeError:
raise PopupException(_("File is not encoded in %(encoding)s; cannot be edited: %(path)s.") % {'encoding': encoding, 'path': path})
finally:
f.close()
else:
current_contents = u""
form = EditorForm(dict(path=path, contents=current_contents, encoding=encoding))
data = dict(
exists=(stats is not None),
stats=stats,
form=form,
path=path,
filename=os.path.basename(path),
dirname=os.path.dirname(path),
breadcrumbs = parse_breadcrumbs(path),
show_download_button = SHOW_DOWNLOAD_BUTTON.get())
return render("edit.mako", request, data)
def save_file(request):
"""
The POST endpoint to save a file in the file editor.
Does the save and then redirects back to the edit page.
"""
form = EditorForm(request.POST)
is_valid = form.is_valid()
path = form.cleaned_data.get('path')
if request.POST.get('save') == "Save As":
if not is_valid:
return edit(request, path, form=form)
else:
return render("saveas.mako", request, {'form': form})
if not path:
raise PopupException(_("No path specified"))
if not is_valid:
return edit(request, path, form=form)
encoding = form.cleaned_data['encoding']
data = form.cleaned_data['contents'].encode(encoding)
try:
if request.fs.exists(path):
do_overwrite_save(request.fs, path, data)
else:
request.fs.create(path, overwrite=False, data=data)
except WebHdfsException, e:
raise PopupException(_("The file could not be saved"), detail=e.message.splitlines()[0])
except Exception, e:
raise PopupException(_("The file could not be saved"), detail=e)
messages.info(request, _('Saved %(path)s.') % {'path': os.path.basename(path)})
request.path = reverse("filebrowser.views.edit", kwargs=dict(path=path))
return edit(request, path, form)
def parse_breadcrumbs(path):
parts = splitpath(path)
url, breadcrumbs = '', []
for part in parts:
if url and not url.endswith('/'):
url += '/'
url += part
breadcrumbs.append({'url': url, 'label': part})
return breadcrumbs
def listdir(request, path, chooser):
"""
Implements directory listing (or index).
Intended to be called via view().
TODO: Remove?
"""
if not request.fs.isdir(path):
raise PopupException(_("Not a directory: %(path)s") % {'path': path})
file_filter = request.REQUEST.get('file_filter', 'any')
assert file_filter in ['any', 'file', 'dir']
home_dir_path = request.user.get_home_directory()
breadcrumbs = parse_breadcrumbs(path)
data = {
'path': path,
'file_filter': file_filter,
'breadcrumbs': breadcrumbs,
'current_dir_path': path,
'current_request_path': request.path,
'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None,
'cwd_set': True,
'is_superuser': request.user.username == request.fs.superuser,
'groups': request.user.username == request.fs.superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
'users': request.user.username == request.fs.superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
'superuser': request.fs.superuser,
'show_upload': (request.REQUEST.get('show_upload') == 'false' and (False,) or (True,))[0],
'show_download_button': SHOW_DOWNLOAD_BUTTON.get(),
'show_upload_button': SHOW_UPLOAD_BUTTON.get()
}
stats = request.fs.listdir_stats(path)
# Include parent dir, unless at filesystem root.
if not request.fs.isroot(path):
parent_path = request.fs.parent_path(path)
parent_stat = request.fs.stats(parent_path)
# The 'path' field would be absolute, but we want its basename to be
# actually '..' for display purposes. Encode it since _massage_stats expects byte strings.
parent_stat['path'] = parent_path
stats.insert(0, parent_stat)
data['files'] = [_massage_stats(request, stat) for stat in stats]
if chooser:
return render('chooser.mako', request, data)
else:
return render('listdir.mako', request, data)
def _massage_page(page):
return {
'number': page.number,
'num_pages': page.num_pages(),
'previous_page_number': page.previous_page_number(),
'next_page_number': page.next_page_number(),
'start_index': page.start_index(),
'end_index': page.end_index(),
'total_count': page.total_count()
}
def listdir_paged(request, path):
"""
A paginated version of listdir.
Query parameters:
pagenum - The page number to show. Defaults to 1.
pagesize - How many to show on a page. Defaults to 15.
sortby=? - Specify attribute to sort by. Accepts:
(type, name, atime, mtime, size, user, group)
Defaults to name.
descending - Specify a descending sort order.
Default to false.
filter=? - Specify a substring filter to search for in
the filename field.
"""
if not request.fs.isdir(path):
raise PopupException("Not a directory: %s" % (path,))
pagenum = int(request.GET.get('pagenum', 1))
pagesize = int(request.GET.get('pagesize', 30))
do_as = None
if request.user.is_superuser or request.user.has_hue_permission(action="impersonate", app="security"):
do_as = request.GET.get('doas', request.user.username)
if hasattr(request, 'doas'):
do_as = request.doas
home_dir_path = request.user.get_home_directory()
breadcrumbs = parse_breadcrumbs(path)
if do_as:
all_stats = request.fs.do_as_user(do_as, request.fs.listdir_stats, path)
else:
all_stats = request.fs.listdir_stats(path)
# Filter first
filter_str = request.GET.get('filter', None)
if filter_str:
filtered_stats = filter(lambda sb: filter_str in sb['name'], all_stats)
all_stats = filtered_stats
# Sort next
sortby = request.GET.get('sortby', None)
descending_param = request.GET.get('descending', None)
if sortby is not None:
if sortby not in ('type', 'name', 'atime', 'mtime', 'user', 'group', 'size'):
logger.info("Invalid sort attribute '%s' for listdir." %
(sortby,))
else:
all_stats = sorted(all_stats,
key=operator.attrgetter(sortby),
reverse=coerce_bool(descending_param))
# Do pagination
page = paginator.Paginator(all_stats, pagesize).page(pagenum)
shown_stats = page.object_list
# Include parent dir always as second option, unless at filesystem root.
if not request.fs.isroot(path):
parent_path = request.fs.parent_path(path)
parent_stat = request.fs.stats(parent_path)
# The 'path' field would be absolute, but we want its basename to be
# actually '..' for display purposes. Encode it since _massage_stats expects byte strings.
parent_stat['path'] = parent_path
parent_stat['name'] = ".."
shown_stats.insert(0, parent_stat)
# Include same dir always as first option to see stats of the current folder
current_stat = request.fs.stats(path)
# The 'path' field would be absolute, but we want its basename to be
# actually '.' for display purposes. Encode it since _massage_stats expects byte strings.
current_stat['path'] = path
current_stat['name'] = "."
shown_stats.insert(1, current_stat)
page.object_list = [ _massage_stats(request, s) for s in shown_stats ]
is_trash_enabled = request.fs._get_scheme(path) == 'hdfs'
is_fs_superuser = _is_hdfs_superuser(request)
data = {
'path': path,
'breadcrumbs': breadcrumbs,
'current_request_path': request.path,
'is_trash_enabled': is_trash_enabled,
'files': page.object_list,
'page': _massage_page(page),
'pagesize': pagesize,
'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None,
'descending': descending_param,
# The following should probably be deprecated
'cwd_set': True,
'file_filter': 'any',
'current_dir_path': path,
'is_fs_superuser': is_fs_superuser,
'groups': is_fs_superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
'users': is_fs_superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
'superuser': request.fs.superuser,
'supergroup': request.fs.supergroup,
'is_sentry_managed': request.fs.is_sentry_managed(path),
'apps': appmanager.get_apps_dict(request.user).keys(),
'show_download_button': SHOW_DOWNLOAD_BUTTON.get(),
'show_upload_button': SHOW_UPLOAD_BUTTON.get()
}
return render('listdir.mako', request, data)
def chooser(request, path):
"""
Returns the html to JFrame that will display a file prompt.
Dispatches viewing of a path to either index() or fileview(), depending on type.
"""
# default_to_home is set in bootstrap.js
home_dir_path = request.user.get_home_directory()
if 'default_to_home' in request.GET and request.fs.isdir(home_dir_path):
return listdir(request, home_dir_path, True)
if request.fs.isdir(path):
return listdir(request, path, True)
elif request.fs.isfile(path):
return display(request, path)
else:
raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
def _massage_stats(request, stats):
"""
Massage a stats record as returned by the filesystem implementation
into the format that the views would like it in.
"""
path = stats['path']
normalized = request.fs.normpath(path)
return {
'path': normalized,
'name': stats['name'],
'stats': stats.to_json_dict(),
'mtime': datetime.fromtimestamp(stats['mtime']).strftime('%B %d, %Y %I:%M %p') if stats['mtime'] is not None else '',
'humansize': filesizeformat(stats['size']),
'type': filetype(stats['mode']),
'rwx': rwx(stats['mode'], stats['aclBit']),
'mode': stringformat(stats['mode'], "o"),
'url': make_absolute(request, "view", dict(path=normalized)),
'is_sentry_managed': request.fs.is_sentry_managed(path)
}
def stat(request, path):
"""
Returns just the generic stats of a file.
Intended for use via AJAX (and hence doesn't provide
an HTML view).
"""
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
stats = request.fs.stats(path)
return JsonResponse(_massage_stats(request, stats))
def content_summary(request, path):
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
response = {'status': -1, 'message': '', 'summary': None}
try:
stats = request.fs.get_content_summary(path)
response['status'] = 0
response['summary'] = stats.summary
except WebHdfsException, e:
response['message'] = _("The file could not be saved") + e.message.splitlines()[0]
return JsonResponse(response)
def display(request, path):
"""
Implements displaying part of a file.
GET arguments are length, offset, mode, compression and encoding
with reasonable defaults chosen.
Note that display by length and offset are on bytes, not on characters.
TODO(philip): Could easily built-in file type detection
(perhaps using something similar to file(1)), as well
as more advanced binary-file viewing capability (de-serialize
sequence files, decompress gzipped text files, etc.).
There exists a python-magic package to interface with libmagic.
"""
if not request.fs.isfile(path):
raise PopupException(_("Not a file: '%(path)s'") % {'path': path})
# display inline files just if it's not an ajax request
if not request.is_ajax():
mimetype = mimetypes.guess_type(path)[0]
if mimetype is not None and INLINE_DISPLAY_MIMETYPE.search(mimetype):
return redirect(reverse('filebrowser.views.download', args=[path]) + '?disposition=inline')
stats = request.fs.stats(path)
encoding = request.GET.get('encoding') or i18n.get_site_encoding()
# I'm mixing URL-based parameters and traditional
# HTTP GET parameters, since URL-based parameters
# can't naturally be optional.
# Need to deal with possibility that length is not present
# because the offset came in via the toolbar manual byte entry.
end = request.GET.get("end")
if end:
end = int(end)
begin = request.GET.get("begin", 1)
if begin:
# Subtract one to zero index for file read
begin = int(begin) - 1
if end:
offset = begin
length = end - begin
if begin >= end:
raise PopupException(_("First byte to display must be before last byte to display."))
else:
length = int(request.GET.get("length", DEFAULT_CHUNK_SIZE_BYTES))
# Display first block by default.
offset = int(request.GET.get("offset", 0))
mode = request.GET.get("mode")
compression = request.GET.get("compression")
if mode and mode not in ["binary", "text"]:
raise PopupException(_("Mode must be one of 'binary' or 'text'."))
if offset < 0:
raise PopupException(_("Offset may not be less than zero."))
if length < 0:
raise PopupException(_("Length may not be less than zero."))
if length > MAX_CHUNK_SIZE_BYTES:
raise PopupException(_("Cannot request chunks greater than %(bytes)d bytes.") % {'bytes': MAX_CHUNK_SIZE_BYTES})
# Do not decompress in binary mode.
if mode == 'binary':
compression = 'none'
# Read out based on meta.
compression, offset, length, contents =\
read_contents(compression, path, request.fs, offset, length)
# Get contents as string for text mode, or at least try
uni_contents = None
if not mode or mode == 'text':
uni_contents = unicode(contents, encoding, errors='replace')
is_binary = uni_contents.find(i18n.REPLACEMENT_CHAR) != -1
# Auto-detect mode
if not mode:
mode = is_binary and 'binary' or 'text'
# Get contents as bytes
if mode == "binary":
xxd_out = list(xxd.xxd(offset, contents, BYTES_PER_LINE, BYTES_PER_SENTENCE))
dirname = posixpath.dirname(path)
# Start with index-like data:
data = _massage_stats(request, request.fs.stats(path))
# And add a view structure:
data["success"] = True
data["view"] = {
'offset': offset,
'length': length,
'end': offset + len(contents),
'dirname': dirname,
'mode': mode,
'compression': compression,
'size': stats['size'],
'max_chunk_size': str(MAX_CHUNK_SIZE_BYTES)
}
data["filename"] = os.path.basename(path)
data["editable"] = stats['size'] < MAX_FILEEDITOR_SIZE
if mode == "binary":
# This might be the wrong thing for ?format=json; doing the
# xxd'ing in javascript might be more compact, or sending a less
# intermediate representation...
logger.debug("xxd: " + str(xxd_out))
data['view']['xxd'] = xxd_out
data['view']['masked_binary_data'] = False
else:
data['view']['contents'] = uni_contents
data['view']['masked_binary_data'] = is_binary
data['breadcrumbs'] = parse_breadcrumbs(path)
data['show_download_button'] = SHOW_DOWNLOAD_BUTTON.get()
return render("display.mako", request, data)
def read_contents(codec_type, path, fs, offset, length):
"""
Reads contents of a passed path, by appropriately decoding the data.
Arguments:
codec_type - The type of codec to use to decode. (Auto-detected if None).
path - The path of the file to read.
fs - The FileSystem instance to use to read.
offset - Offset to seek to before read begins.
length - Amount of bytes to read after offset.
Returns: A tuple of codec_type, offset, length and contents read.
"""
contents = ''
fhandle = None
try:
fhandle = fs.open(path)
stats = fs.stats(path)
# Auto codec detection for [gzip, avro, snappy, none]
if not codec_type:
contents = fhandle.read(3)
fhandle.seek(0)
codec_type = 'none'
if path.endswith('.gz') and detect_gzip(contents):
codec_type = 'gzip'
offset = 0
elif path.endswith('.avro') and detect_avro(contents):
codec_type = 'avro'
elif detect_parquet(fhandle):
codec_type = 'parquet'
elif path.endswith('.snappy') and snappy_installed():
codec_type = 'snappy'
elif snappy_installed() and stats.size <= MAX_SNAPPY_DECOMPRESSION_SIZE.get():
fhandle.seek(0)
if detect_snappy(fhandle.read()):
codec_type = 'snappy'
fhandle.seek(0)
if codec_type == 'gzip':
contents = _read_gzip(fhandle, path, offset, length, stats)
elif codec_type == 'avro':
contents = _read_avro(fhandle, path, offset, length, stats)
elif codec_type == 'parquet':
contents = _read_parquet(fhandle, path, offset, length, stats)
elif codec_type == 'snappy':
contents = _read_snappy(fhandle, path, offset, length, stats)
else:
# for 'none' type.
contents = _read_simple(fhandle, path, offset, length, stats)
finally:
if fhandle:
fhandle.close()
return (codec_type, offset, length, contents)
def _decompress_snappy(compressed_content):
try:
import snappy
return snappy.decompress(compressed_content)
except Exception, e:
raise PopupException(_('Failed to decompress snappy compressed file.'), detail=e)
def _read_snappy(fhandle, path, offset, length, stats):
if not snappy_installed():
raise PopupException(_('Failed to decompress snappy compressed file. Snappy is not installed.'))
if stats.size > MAX_SNAPPY_DECOMPRESSION_SIZE.get():
raise PopupException(_('Failed to decompress snappy compressed file. File size is greater than allowed max snappy decompression size of %d.') % MAX_SNAPPY_DECOMPRESSION_SIZE.get())
return _read_simple(StringIO(_decompress_snappy(fhandle.read())), path, offset, length, stats)
def _read_avro(fhandle, path, offset, length, stats):
contents = ''
try:
fhandle.seek(offset)
data_file_reader = datafile.DataFileReader(fhandle, io.DatumReader())
try:
contents_list = []
read_start = fhandle.tell()
# Iterate over the entire sought file.
for datum in data_file_reader:
read_length = fhandle.tell() - read_start
if read_length > length and len(contents_list) > 0:
break
else:
datum_str = str(datum) + "\n"
contents_list.append(datum_str)
finally:
data_file_reader.close()
contents = "".join(contents_list)
except:
logging.exception("Could not read avro file at %s" % path)
raise PopupException(_("Failed to read Avro file."))
return contents
def _read_parquet(fhandle, path, offset, length, stats):
try:
dumped_data = StringIO()
parquet._dump(fhandle, ParquetOptions(), out=dumped_data)
dumped_data.seek(offset)
return dumped_data.read()
except:
logging.exception("Could not read parquet file at %s" % path)
raise PopupException(_("Failed to read Parquet file."))
def _read_gzip(fhandle, path, offset, length, stats):
contents = ''
if offset and offset != 0:
raise PopupException(_("Offsets are not supported with Gzip compression."))
try:
contents = GzipFile('', 'r', 0, StringIO(fhandle.read())).read(length)
except:
logging.exception("Could not decompress file at %s" % path)
raise PopupException(_("Failed to decompress file."))
return contents
def _read_simple(fhandle, path, offset, length, stats):
contents = ''
try:
fhandle.seek(offset)
contents = fhandle.read(length)
except:
logging.exception("Could not read file at %s" % path)
raise PopupException(_("Failed to read file."))
return contents
def detect_gzip(contents):
'''This is a silly small function which checks to see if the file is Gzip'''
return contents[:2] == '\x1f\x8b'
def detect_avro(contents):
'''This is a silly small function which checks to see if the file is Avro'''
# Check if the first three bytes are 'O', 'b' and 'j'
return contents[:3] == '\x4F\x62\x6A'
def detect_snappy(contents):
'''
This is a silly small function which checks to see if the file is Snappy.
It requires the entire contents of the compressed file.
This will also return false if snappy decompression if we do not have the library available.
'''
try:
import snappy
return snappy.isValidCompressed(contents)
except:
logging.exception('failed to detect snappy')
return False
def detect_parquet(fhandle):
"""
Detect parquet from magic header bytes.
"""
return parquet._check_header_magic_bytes(fhandle)
def snappy_installed():
'''Snappy is library that isn't supported by python2.4'''
try:
import snappy
return True
except ImportError:
return False
except:
logging.exception('failed to verify if snappy is installed')
return False
def _calculate_navigation(offset, length, size):
"""
List of (offset, length, string) tuples for suggested navigation through the file.
If offset is -1, then this option is already "selected". (Whereas None would
be the natural pythonic way, Django's template syntax doesn't let us test
against None (since its truth value is the same as 0).)
By all means this logic ought to be in the template, but the template
language is too limiting.
"""
if offset == 0:
first, prev = (-1, None, _("First Block")), (-1, None, _("Previous Block"))
else:
first, prev = (0, length, _("First Block")), (max(0, offset - length), length, _("Previous Block"))
if offset + length >= size:
next, last = (-1, None, _("Next Block")), (-1, None, _("Last Block"))
else:
# 1-off Reasoning: if length is the same as size, you want to start at 0.
next, last = (offset + length, length, _("Next Block")), (max(0, size - length), length, _("Last Block"))
return first, prev, next, last
def default_initial_value_extractor(request, parameter_names):
initial_values = {}
for p in parameter_names:
val = request.GET.get(p)
if val:
initial_values[p] = val
return initial_values
def formset_initial_value_extractor(request, parameter_names):
"""
Builds a list of data that formsets should use by extending some fields to every object,
whilst others are assumed to be received in order.
Formsets should receive data that looks like this: [{'param1': <something>,...}, ...].
The formsets should then handle construction on their own.
"""
def _intial_value_extractor(request):
if not submitted:
return []
# Build data with list of in order parameters receive in POST data
# Size can be inferred from largest list returned in POST data
data = []
for param in submitted:
i = 0
for val in request.POST.getlist(param):
if len(data) == i:
data.append({})
data[i][param] = val
i += 1
# Extend every data object with recurring params
for kwargs in data:
for recurrent in recurring:
kwargs[recurrent] = request.POST.get(recurrent)
initial_data = data
return {'initial': initial_data}
return _intial_value_extractor
def default_arg_extractor(request, form, parameter_names):
return [form.cleaned_data[p] for p in parameter_names]
def formset_arg_extractor(request, formset, parameter_names):
data = []
for form in formset.forms:
data_dict = {}
for p in parameter_names:
data_dict[p] = form.cleaned_data[p]
data.append(data_dict)
return data
def default_data_extractor(request):
return {'data': request.POST.copy()}
def formset_data_extractor(recurring=[], submitted=[]):
"""
Builds a list of data that formsets should use by extending some fields to every object,
whilst others are assumed to be received in order.
Formsets should receive data that looks like this: [{'param1': <something>,...}, ...].
The formsets should then handle construction on their own.
"""
def _data_extractor(request):
if not submitted:
return []
# Build data with list of in order parameters receive in POST data
# Size can be inferred from largest list returned in POST data
data = []
for param in submitted:
i = 0
for val in request.POST.getlist(param):
if len(data) == i:
data.append({})
data[i][param] = val
i += 1
# Extend every data object with recurring params
for kwargs in data:
for recurrent in recurring:
kwargs[recurrent] = request.POST.get(recurrent)
initial = list(data)
return {'initial': initial, 'data': data}
return _data_extractor
def generic_op(form_class, request, op, parameter_names, piggyback=None, template="fileop.mako", data_extractor=default_data_extractor, arg_extractor=default_arg_extractor, initial_value_extractor=default_initial_value_extractor, extra_params=None):
"""
Generic implementation for several operations.
@param form_class form to instantiate
@param request incoming request, used for parameters
@param op callable with the filesystem operation
@param parameter_names list of form parameters that are extracted and then passed to op
@param piggyback list of form parameters whose file stats to look up after the operation
@param data_extractor function that extracts POST data to be used by op
@param arg_extractor function that extracts args from a given form or formset
@param initial_value_extractor function that extracts the initial values of a form or formset
@param extra_params dictionary of extra parameters to send to the template for rendering
"""
# Use next for non-ajax requests, when available.
next = request.GET.get("next", request.POST.get("next", None))
ret = dict({
'next': next
})
if extra_params is not None:
ret['extra_params'] = extra_params
for p in parameter_names:
val = request.REQUEST.get(p)
if val:
ret[p] = val
if request.method == 'POST':
form = form_class(**data_extractor(request))
ret['form'] = form
if form.is_valid():
args = arg_extractor(request, form, parameter_names)
try:
op(*args)
except (IOError, WebHdfsException), e:
msg = _("Cannot perform operation.")
# TODO: Only apply this message for HDFS
if request.user.is_superuser and not _is_hdfs_superuser(request):
msg += _(' Note: you are a Hue admin but not a HDFS superuser, "%(superuser)s" or part of HDFS supergroup, "%(supergroup)s".') \
% {'superuser': request.fs.superuser, 'supergroup': request.fs.supergroup}
raise PopupException(msg, detail=e)
except S3FileSystemException, e:
msg = _("S3 filesystem exception.")
raise PopupException(msg, detail=e)
except NotImplementedError, e:
msg = _("Cannot perform operation.")
raise PopupException(msg, detail=e)
if next:
logging.debug("Next: %s" % next)
# Doesn't need to be quoted: quoting is done by HttpResponseRedirect.
return format_preserving_redirect(request, next)
ret["success"] = True
try:
if piggyback:
piggy_path = form.cleaned_data[piggyback]
ret["result"] = _massage_stats(request, request.fs.stats(piggy_path))
except Exception, e:
# Hard to report these more naturally here. These happen either
# because of a bug in the piggy-back code or because of a
# race condition.
logger.exception("Exception while processing piggyback data")
ret["result_error"] = True
ret['user'] = request.user
return render(template, request, ret)
else:
# Initial parameters may be specified with get with the default extractor
initial_values = initial_value_extractor(request, parameter_names)
formset = form_class(initial=initial_values)
ret['form'] = formset
return render(template, request, ret)
def rename(request):
def smart_rename(src_path, dest_path):
"""If dest_path doesn't have a directory specified, use same dir."""
if "#" in dest_path:
raise PopupException(_("Could not rename folder \"%s\" to \"%s\": Hashes are not allowed in filenames." % (src_path, dest_path)))
if "/" not in dest_path:
src_dir = os.path.dirname(src_path)
dest_path = request.fs.join(src_dir, dest_path)
if request.fs.exists(dest_path):
raise PopupException(_('The destination path "%s" already exists.') % dest_path)
request.fs.rename(src_path, dest_path)
return generic_op(RenameForm, request, smart_rename, ["src_path", "dest_path"], None)
def mkdir(request):
def smart_mkdir(path, name):
# Make sure only one directory is specified at a time.
# No absolute directory specification allowed.
if posixpath.sep in name or "#" in name:
raise PopupException(_("Could not name folder \"%s\": Slashes or hashes are not allowed in filenames." % name))
request.fs.mkdir(request.fs.join(path, name))
return generic_op(MkDirForm, request, smart_mkdir, ["path", "name"], "path")
def touch(request):
def smart_touch(path, name):
# Make sure only the filename is specified.
# No absolute path specification allowed.
if posixpath.sep in name:
raise PopupException(_("Could not name file \"%s\": Slashes are not allowed in filenames." % name))
request.fs.create(request.fs.join(path, name))
return generic_op(TouchForm, request, smart_touch, ["path", "name"], "path")
@require_http_methods(["POST"])
def rmtree(request):
recurring = []
params = ["path"]
def bulk_rmtree(*args, **kwargs):
for arg in args:
request.fs.do_as_user(request.user, request.fs.rmtree, arg['path'], 'skip_trash' in request.GET)
return generic_op(RmTreeFormSet, request, bulk_rmtree, ["path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def move(request):
recurring = ['dest_path']
params = ['src_path']
def bulk_move(*args, **kwargs):
for arg in args:
request.fs.rename(arg['src_path'], arg['dest_path'])
return generic_op(RenameFormSet, request, bulk_move, ["src_path", "dest_path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def copy(request):
recurring = ['dest_path']
params = ['src_path']
def bulk_copy(*args, **kwargs):
for arg in args:
request.fs.copy(arg['src_path'], arg['dest_path'], recursive=True, owner=request.user)
return generic_op(CopyFormSet, request, bulk_copy, ["src_path", "dest_path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def chmod(request):
recurring = ["sticky", "user_read", "user_write", "user_execute", "group_read", "group_write", "group_execute", "other_read", "other_write", "other_execute"]
params = ["path"]
def bulk_chmod(*args, **kwargs):
op = curry(request.fs.chmod, recursive=request.POST.get('recursive', False))
for arg in args:
op(arg['path'], arg['mode'])
# mode here is abused: on input, it's a string, but when retrieved,
# it's an int.
return generic_op(ChmodFormSet, request, bulk_chmod, ['path', 'mode'], "path",
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def chown(request):
# This is a bit clever: generic_op takes an argument (here, args), indicating
# which POST parameters to pick out and pass to the given function.
# We update that mapping based on whether or not the user selected "other".
param_names = ["path", "user", "group"]
if request.POST.get("user") == "__other__":
param_names[1] = "user_other"
if request.POST.get("group") == "__other__":
param_names[2] = "group_other"
recurring = ["user", "group", "user_other", "group_other"]
params = ["path"]
def bulk_chown(*args, **kwargs):
op = curry(request.fs.chown, recursive=request.POST.get('recursive', False))
for arg in args:
varg = [arg[param] for param in param_names]
op(*varg)
return generic_op(ChownFormSet, request, bulk_chown, param_names, "path",
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def trash_restore(request):
recurring = []
params = ["path"]
def bulk_restore(*args, **kwargs):
for arg in args:
request.fs.do_as_user(request.user, request.fs.restore, arg['path'])
return generic_op(RestoreFormSet, request, bulk_restore, ["path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def trash_purge(request):
return generic_op(TrashPurgeForm, request, request.fs.purge_trash, [], None)
@require_http_methods(["POST"])
def upload_file(request):
"""
A wrapper around the actual upload view function to clean up the temporary file afterwards if it fails.
Returns JSON.
e.g. {'status' 0/1, data:'message'...}
"""
response = {'status': -1, 'data': ''}
try:
resp = _upload_file(request)
response.update(resp)
except Exception, ex:
response['data'] = str(ex).split('\n', 1)[0]
hdfs_file = request.FILES.get('hdfs_file')
if hdfs_file and hasattr(hdfs_file, 'remove'): # TODO: Call from proxyFS
hdfs_file.remove()
return JsonResponse(response)
def _upload_file(request):
"""
Handles file uploaded by HDFSfileUploadHandler.
The uploaded file is stored in HDFS at its destination with a .tmp suffix.
We just need to rename it to the destination path.
"""
form = UploadFileForm(request.POST, request.FILES)
response = {'status': -1, 'data': ''}
if request.META.get('upload_failed'):
raise PopupException(request.META.get('upload_failed'))
if form.is_valid():
uploaded_file = request.FILES['hdfs_file']
dest = form.cleaned_data['dest']
filepath = request.fs.join(dest, uploaded_file.name)
if request.fs.isdir(dest) and posixpath.sep in uploaded_file.name:
raise PopupException(_('Sorry, no "%(sep)s" in the filename %(name)s.' % {'sep': posixpath.sep, 'name': uploaded_file.name}))
try:
request.fs.upload(file=uploaded_file, path=dest, username=request.user.username)
response['status'] = 0
except IOError, ex:
already_exists = False
try:
already_exists = request.fs.exists(dest)
except Exception:
pass
if already_exists:
msg = _('Destination %(name)s already exists.') % {'name': dest}
else:
msg = _('Copy to %(name)s failed: %(error)s') % {'name': dest, 'error': ex}
raise PopupException(msg)
response.update({
'path': filepath,
'result': _massage_stats(request, request.fs.stats(filepath)),
'next': request.GET.get("next")
})
return response
else:
raise PopupException(_("Error in upload form: %s") % (form.errors,))
@require_http_methods(["POST"])
def upload_archive(request):
"""
A wrapper around the actual upload view function to clean up the temporary file afterwards.
Returns JSON.
e.g. {'status' 0/1, data:'message'...}
"""
response = {'status': -1, 'data': ''}
try:
try:
resp = _upload_archive(request)
response.update(resp)
except Exception, ex:
response['data'] = str(ex)
finally:
hdfs_file = request.FILES.get('hdfs_file')
if hdfs_file:
hdfs_file.remove()
return JsonResponse(response)
def _upload_archive(request):
"""
Handles archive upload.
The uploaded file is stored in memory.
We need to extract it and rename it.
"""
form = UploadArchiveForm(request.POST, request.FILES)
response = {'status': -1, 'data': ''}
if form.is_valid():
uploaded_file = request.FILES['archive']
# Always a dir
if request.fs.isdir(form.cleaned_data['dest']) and posixpath.sep in uploaded_file.name:
raise PopupException(_('No "%(sep)s" allowed in the filename %(name)s.' % {'sep': posixpath.sep, 'name': uploaded_file.name}))
dest = request.fs.join(form.cleaned_data['dest'], uploaded_file.name)
try:
# Extract if necessary
# Make sure dest path is without the extension
if dest.lower().endswith('.zip'):
temp_path = archive_factory(uploaded_file, 'zip').extract()
if not temp_path:
raise PopupException(_('Could not extract contents of file.'))
# Move the file to where it belongs
dest = dest[:-4]
elif dest.lower().endswith('.tar.gz') or dest.lower().endswith('.tgz'):
temp_path = archive_factory(uploaded_file, 'tgz').extract()
if not temp_path:
raise PopupException(_('Could not extract contents of file.'))
# Move the file to where it belongs
dest = dest[:-7] if dest.lower().endswith('.tar.gz') else dest[:-4]
elif dest.lower().endswith('.bz2') or dest.lower().endswith('.bzip2'):
temp_path = archive_factory(uploaded_file, 'bz2').extract()
if not temp_path:
raise PopupException(_('Could not extract contents of file.'))
# Move the file to where it belongs
dest = dest[:-6] if dest.lower().endswith('.bzip2') else dest[:-4]
else:
raise PopupException(_('Could not interpret archive type.'))
request.fs.copyFromLocal(temp_path, dest)
shutil.rmtree(temp_path)
response['status'] = 0
except IOError, ex:
already_exists = False
try:
already_exists = request.fs.exists(dest)
except Exception:
pass
if already_exists:
msg = _('Destination %(name)s already exists.') % {'name': dest}
else:
msg = _('Copy to %(name)s failed: %(error)s') % {'name': dest, 'error': ex}
raise PopupException(msg)
response.update({
'path': dest,
'result': _massage_stats(request, request.fs.stats(dest)),
'next': request.GET.get("next")
})
return response
else:
raise PopupException(_("Error in upload form: %s") % (form.errors,))
def status(request):
status = request.fs.status()
data = {
# Beware: "messages" is special in the context browser.
'msgs': status.get_messages(),
'health': status.get_health(),
'datanode_report': status.get_datanode_report(),
'name': request.fs.name
}
return render("status.mako", request, data)
def location_to_url(location, strict=True):
"""
If possible, returns a file browser URL to the location.
Prunes HDFS URI to path.
Location is a URI, if strict is True.
Python doesn't seem to have a readily-available URI-comparison
library, so this is quite hacky.
"""
if location is None:
return None
split_path = Hdfs.urlsplit(location)
if strict and not split_path[1] or not split_path[2]:
# No netloc not full url or no URL
return None
path = location
if split_path[0] == 'hdfs':
path = split_path[2]
return reverse("filebrowser.views.view", kwargs=dict(path=path))
def truncate(toTruncate, charsToKeep=50):
"""
Returns a string truncated to 'charsToKeep' length plus ellipses.
"""
if len(toTruncate) > charsToKeep:
truncated = toTruncate[:charsToKeep] + "..."
return truncated
else:
return toTruncate
def _is_hdfs_superuser(request):
return request.user.username == request.fs.superuser or request.user.groups.filter(name__exact=request.fs.supergroup).exists()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import bz2
import cStringIO
import gzip
import logging
import math
import random
import os
import tempfile
import unittest
import hamcrest as hc
import apache_beam as beam
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io import range_trackers
from apache_beam.io.filesystem import CompressionTypes
# importing following private classes for testing
from apache_beam.io.concat_source import ConcatSource
from apache_beam.io.filebasedsource import _SingleFileSource as SingleFileSource
from apache_beam.io.filebasedsource import FileBasedSource
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
from apache_beam.transforms.util import assert_that
from apache_beam.transforms.util import equal_to
from apache_beam.utils.value_provider import StaticValueProvider
from apache_beam.utils.value_provider import RuntimeValueProvider
class LineSource(FileBasedSource):
def read_records(self, file_name, range_tracker):
f = self.open_file(file_name)
try:
start = range_tracker.start_position()
if start > 0:
# Any line that starts after 'start' does not belong to the current
# bundle. Seeking to (start - 1) and skipping a line moves the current
# position to the starting position of the first line that belongs to
# the current bundle.
start -= 1
f.seek(start)
line = f.readline()
start += len(line)
current = start
line = f.readline()
while line:
if not range_tracker.try_claim(current):
return
yield line.rstrip('\n')
current += len(line)
line = f.readline()
finally:
f.close()
class EOL(object):
LF = 1
CRLF = 2
MIXED = 3
LF_WITH_NOTHING_AT_LAST_LINE = 4
def write_data(
num_lines, no_data=False, directory=None, prefix=tempfile.template,
eol=EOL.LF):
all_data = []
with tempfile.NamedTemporaryFile(
delete=False, dir=directory, prefix=prefix) as f:
sep_values = ['\n', '\r\n']
for i in range(num_lines):
data = '' if no_data else 'line' + str(i)
all_data.append(data)
if eol == EOL.LF:
sep = sep_values[0]
elif eol == EOL.CRLF:
sep = sep_values[1]
elif eol == EOL.MIXED:
sep = sep_values[i % len(sep_values)]
elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE:
sep = '' if i == (num_lines - 1) else sep_values[0]
else:
raise ValueError('Received unknown value %s for eol.', eol)
f.write(data + sep)
return f.name, all_data
def _write_prepared_data(data, directory=None,
prefix=tempfile.template, suffix=''):
with tempfile.NamedTemporaryFile(
delete=False, dir=directory, prefix=prefix, suffix=suffix) as f:
f.write(data)
return f.name
def write_prepared_pattern(data, suffixes=None):
assert data, 'Data (%s) seems to be empty' % data
if suffixes is None:
suffixes = [''] * len(data)
temp_dir = tempfile.mkdtemp()
for i, d in enumerate(data):
file_name = _write_prepared_data(d, temp_dir, prefix='mytemp',
suffix=suffixes[i])
return file_name[:file_name.rfind(os.path.sep)] + os.path.sep + 'mytemp*'
def write_pattern(lines_per_file, no_data=False):
temp_dir = tempfile.mkdtemp()
all_data = []
file_name = None
start_index = 0
for i in range(len(lines_per_file)):
file_name, data = write_data(lines_per_file[i], no_data=no_data,
directory=temp_dir, prefix='mytemp')
all_data.extend(data)
start_index += lines_per_file[i]
assert file_name
return (
file_name[:file_name.rfind(os.path.sep)] + os.path.sep + 'mytemp*',
all_data)
class TestConcatSource(unittest.TestCase):
class DummySource(iobase.BoundedSource):
def __init__(self, values):
self._values = values
def split(self, desired_bundle_size, start_position=None,
stop_position=None):
# simply devides values into two bundles
middle = len(self._values) / 2
yield iobase.SourceBundle(0.5, TestConcatSource.DummySource(
self._values[:middle]), None, None)
yield iobase.SourceBundle(0.5, TestConcatSource.DummySource(
self._values[middle:]), None, None)
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = len(self._values)
return range_trackers.OffsetRangeTracker(start_position, stop_position)
def read(self, range_tracker):
for index, value in enumerate(self._values):
if not range_tracker.try_claim(index):
return
yield value
def estimate_size(self):
return len(self._values) # Assuming each value to be 1 byte.
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def test_read(self):
sources = [TestConcatSource.DummySource(range(start, start + 10)) for start
in [0, 10, 20]]
concat = ConcatSource(sources)
range_tracker = concat.get_range_tracker(None, None)
read_data = [value for value in concat.read(range_tracker)]
self.assertItemsEqual(range(30), read_data)
def test_split(self):
sources = [TestConcatSource.DummySource(range(start, start + 10)) for start
in [0, 10, 20]]
concat = ConcatSource(sources)
splits = [split for split in concat.split()]
self.assertEquals(6, len(splits))
# Reading all splits
read_data = []
for split in splits:
range_tracker_for_split = split.source.get_range_tracker(
split.start_position,
split.stop_position)
read_data.extend([value for value in split.source.read(
range_tracker_for_split)])
self.assertItemsEqual(range(30), read_data)
def test_estimate_size(self):
sources = [TestConcatSource.DummySource(range(start, start + 10)) for start
in [0, 10, 20]]
concat = ConcatSource(sources)
self.assertEquals(30, concat.estimate_size())
class TestFileBasedSource(unittest.TestCase):
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def test_string_or_value_provider_only(self):
str_file_pattern = tempfile.NamedTemporaryFile(delete=False).name
self.assertEqual(str_file_pattern,
FileBasedSource(str_file_pattern)._pattern.value)
static_vp_file_pattern = StaticValueProvider(value_type=str,
value=str_file_pattern)
self.assertEqual(static_vp_file_pattern,
FileBasedSource(static_vp_file_pattern)._pattern)
runtime_vp_file_pattern = RuntimeValueProvider(
option_name='arg',
value_type=str,
default_value=str_file_pattern)
self.assertEqual(runtime_vp_file_pattern,
FileBasedSource(runtime_vp_file_pattern)._pattern)
invalid_file_pattern = 123
with self.assertRaises(TypeError):
FileBasedSource(invalid_file_pattern)
def test_validation_file_exists(self):
file_name, _ = write_data(10)
LineSource(file_name)
def test_validation_directory_non_empty(self):
temp_dir = tempfile.mkdtemp()
file_name, _ = write_data(10, directory=temp_dir)
LineSource(file_name)
def test_validation_failing(self):
no_files_found_error = 'No files found based on the file pattern*'
with self.assertRaisesRegexp(IOError, no_files_found_error):
LineSource('dummy_pattern')
with self.assertRaisesRegexp(IOError, no_files_found_error):
temp_dir = tempfile.mkdtemp()
LineSource(os.path.join(temp_dir, '*'))
def test_validation_file_missing_verification_disabled(self):
LineSource('dummy_pattern', validate=False)
def test_fully_read_single_file(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
fbs = LineSource(file_name)
range_tracker = fbs.get_range_tracker(None, None)
read_data = [record for record in fbs.read(range_tracker)]
self.assertItemsEqual(expected_data, read_data)
def test_single_file_display_data(self):
file_name, _ = write_data(10)
fbs = LineSource(file_name)
dd = DisplayData.create_from(fbs)
expected_items = [
DisplayDataItemMatcher('file_pattern', file_name),
DisplayDataItemMatcher('compression', 'auto')]
hc.assert_that(dd.items,
hc.contains_inanyorder(*expected_items))
def test_fully_read_file_pattern(self):
pattern, expected_data = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data) == 40
fbs = LineSource(pattern)
range_tracker = fbs.get_range_tracker(None, None)
read_data = [record for record in fbs.read(range_tracker)]
self.assertItemsEqual(expected_data, read_data)
def test_fully_read_file_pattern_with_empty_files(self):
pattern, expected_data = write_pattern([5, 0, 12, 0, 8, 0])
assert len(expected_data) == 25
fbs = LineSource(pattern)
range_tracker = fbs.get_range_tracker(None, None)
read_data = [record for record in fbs.read(range_tracker)]
self.assertItemsEqual(expected_data, read_data)
def test_estimate_size_of_file(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
fbs = LineSource(file_name)
self.assertEquals(10 * 6, fbs.estimate_size())
def test_estimate_size_of_pattern(self):
pattern, expected_data = write_pattern([5, 3, 10, 8, 8, 4])
assert len(expected_data) == 38
fbs = LineSource(pattern)
self.assertEquals(38 * 6, fbs.estimate_size())
pattern, expected_data = write_pattern([5, 3, 9])
assert len(expected_data) == 17
fbs = LineSource(pattern)
self.assertEquals(17 * 6, fbs.estimate_size())
def test_estimate_size_with_sampling_same_size(self):
num_files = 2 * FileBasedSource.MIN_NUMBER_OF_FILES_TO_STAT
pattern, _ = write_pattern([10] * num_files)
# Each line will be of length 6 since write_pattern() uses
# ('line' + line number + '\n') as data.
self.assertEqual(
6 * 10 * num_files, FileBasedSource(pattern).estimate_size())
def test_estimate_size_with_sampling_different_sizes(self):
num_files = 2 * FileBasedSource.MIN_NUMBER_OF_FILES_TO_STAT
# Each line will be of length 8 since write_pattern() uses
# ('line' + line number + '\n') as data.
base_size = 500
variance = 5
sizes = []
for _ in xrange(num_files):
sizes.append(int(random.uniform(base_size - variance,
base_size + variance)))
pattern, _ = write_pattern(sizes)
tolerance = 0.05
self.assertAlmostEqual(
base_size * 8 * num_files,
FileBasedSource(pattern).estimate_size(),
delta=base_size * 8 * num_files * tolerance)
def test_splits_into_subranges(self):
pattern, expected_data = write_pattern([5, 9, 6])
assert len(expected_data) == 20
fbs = LineSource(pattern)
splits = [split for split in fbs.split(desired_bundle_size=15)]
expected_num_splits = (
math.ceil(float(6 * 5) / 15) +
math.ceil(float(6 * 9) / 15) +
math.ceil(float(6 * 6) / 15))
assert len(splits) == expected_num_splits
def test_read_splits_single_file(self):
file_name, expected_data = write_data(100)
assert len(expected_data) == 100
fbs = LineSource(file_name)
splits = [split for split in fbs.split(desired_bundle_size=33)]
# Reading all splits
read_data = []
for split in splits:
source = split.source
range_tracker = source.get_range_tracker(split.start_position,
split.stop_position)
data_from_split = [data for data in source.read(range_tracker)]
read_data.extend(data_from_split)
self.assertItemsEqual(expected_data, read_data)
def test_read_splits_file_pattern(self):
pattern, expected_data = write_pattern([34, 66, 40, 24, 24, 12])
assert len(expected_data) == 200
fbs = LineSource(pattern)
splits = [split for split in fbs.split(desired_bundle_size=50)]
# Reading all splits
read_data = []
for split in splits:
source = split.source
range_tracker = source.get_range_tracker(split.start_position,
split.stop_position)
data_from_split = [data for data in source.read(range_tracker)]
read_data.extend(data_from_split)
self.assertItemsEqual(expected_data, read_data)
def _run_source_test(self, pattern, expected_data, splittable=True):
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
pattern, splittable=splittable))
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_source_file(self):
file_name, expected_data = write_data(100)
assert len(expected_data) == 100
self._run_source_test(file_name, expected_data)
def test_source_pattern(self):
pattern, expected_data = write_pattern([34, 66, 40, 24, 24, 12])
assert len(expected_data) == 200
self._run_source_test(pattern, expected_data)
def test_unsplittable_does_not_split(self):
pattern, expected_data = write_pattern([5, 9, 6])
assert len(expected_data) == 20
fbs = LineSource(pattern, splittable=False)
splits = [split for split in fbs.split(desired_bundle_size=15)]
self.assertEquals(3, len(splits))
def test_source_file_unsplittable(self):
file_name, expected_data = write_data(100)
assert len(expected_data) == 100
self._run_source_test(file_name, expected_data, False)
def test_source_pattern_unsplittable(self):
pattern, expected_data = write_pattern([34, 66, 40, 24, 24, 12])
assert len(expected_data) == 200
self._run_source_test(pattern, expected_data, False)
def test_read_file_bzip2(self):
_, lines = write_data(10)
filename = tempfile.NamedTemporaryFile(
delete=False, prefix=tempfile.template).name
with bz2.BZ2File(filename, 'wb') as f:
f.write('\n'.join(lines))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
filename,
splittable=False,
compression_type=CompressionTypes.BZIP2))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_file_gzip(self):
_, lines = write_data(10)
filename = tempfile.NamedTemporaryFile(
delete=False, prefix=tempfile.template).name
with gzip.GzipFile(filename, 'wb') as f:
f.write('\n'.join(lines))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
filename,
splittable=False,
compression_type=CompressionTypes.GZIP))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_pattern_bzip2(self):
_, lines = write_data(200)
splits = [0, 34, 100, 140, 164, 188, 200]
chunks = [lines[splits[i-1]:splits[i]] for i in xrange(1, len(splits))]
compressed_chunks = []
for c in chunks:
compressobj = bz2.BZ2Compressor()
compressed_chunks.append(
compressobj.compress('\n'.join(c)) + compressobj.flush())
file_pattern = write_prepared_pattern(compressed_chunks)
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
file_pattern,
splittable=False,
compression_type=CompressionTypes.BZIP2))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_pattern_gzip(self):
_, lines = write_data(200)
splits = [0, 34, 100, 140, 164, 188, 200]
chunks = [lines[splits[i-1]:splits[i]] for i in xrange(1, len(splits))]
compressed_chunks = []
for c in chunks:
out = cStringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write('\n'.join(c))
compressed_chunks.append(out.getvalue())
file_pattern = write_prepared_pattern(compressed_chunks)
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
file_pattern,
splittable=False,
compression_type=CompressionTypes.GZIP))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_single_file_bzip2(self):
_, lines = write_data(10)
filename = tempfile.NamedTemporaryFile(
delete=False, prefix=tempfile.template, suffix='.bz2').name
with bz2.BZ2File(filename, 'wb') as f:
f.write('\n'.join(lines))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
filename,
compression_type=CompressionTypes.AUTO))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_single_file_gzip(self):
_, lines = write_data(10)
filename = tempfile.NamedTemporaryFile(
delete=False, prefix=tempfile.template, suffix='.gz').name
with gzip.GzipFile(filename, 'wb') as f:
f.write('\n'.join(lines))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
filename,
compression_type=CompressionTypes.AUTO))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_pattern(self):
_, lines = write_data(200)
splits = [0, 34, 100, 140, 164, 188, 200]
chunks = [lines[splits[i - 1]:splits[i]] for i in xrange(1, len(splits))]
compressed_chunks = []
for c in chunks:
out = cStringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write('\n'.join(c))
compressed_chunks.append(out.getvalue())
file_pattern = write_prepared_pattern(
compressed_chunks, suffixes=['.gz']*len(chunks))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
file_pattern,
compression_type=CompressionTypes.AUTO))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_pattern_compressed_and_uncompressed(self):
_, lines = write_data(200)
splits = [0, 34, 100, 140, 164, 188, 200]
chunks = [lines[splits[i - 1]:splits[i]] for i in xrange(1, len(splits))]
chunks_to_write = []
for i, c in enumerate(chunks):
if i%2 == 0:
out = cStringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write('\n'.join(c))
chunks_to_write.append(out.getvalue())
else:
chunks_to_write.append('\n'.join(c))
file_pattern = write_prepared_pattern(chunks_to_write,
suffixes=(['.gz', '']*3))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> beam.io.Read(LineSource(
file_pattern,
compression_type=CompressionTypes.AUTO))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_splits_get_coder_from_fbs(self):
class DummyCoder(object):
val = 12345
class FileBasedSourceWithCoder(LineSource):
def default_output_coder(self):
return DummyCoder()
pattern, expected_data = write_pattern([34, 66, 40, 24, 24, 12])
self.assertEqual(200, len(expected_data))
fbs = FileBasedSourceWithCoder(pattern)
splits = [split for split in fbs.split(desired_bundle_size=50)]
self.assertTrue(len(splits))
for split in splits:
self.assertEqual(DummyCoder.val, split.source.default_output_coder().val)
class TestSingleFileSource(unittest.TestCase):
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def test_source_creation_fails_for_non_number_offsets(self):
start_not_a_number_error = 'start_offset must be a number*'
stop_not_a_number_error = 'stop_offset must be a number*'
file_name = 'dummy_pattern'
fbs = LineSource(file_name, validate=False)
with self.assertRaisesRegexp(TypeError, start_not_a_number_error):
SingleFileSource(
fbs, file_name='dummy_file', start_offset='aaa', stop_offset='bbb')
with self.assertRaisesRegexp(TypeError, start_not_a_number_error):
SingleFileSource(
fbs, file_name='dummy_file', start_offset='aaa', stop_offset=100)
with self.assertRaisesRegexp(TypeError, stop_not_a_number_error):
SingleFileSource(
fbs, file_name='dummy_file', start_offset=100, stop_offset='bbb')
with self.assertRaisesRegexp(TypeError, stop_not_a_number_error):
SingleFileSource(
fbs, file_name='dummy_file', start_offset=100, stop_offset=None)
with self.assertRaisesRegexp(TypeError, start_not_a_number_error):
SingleFileSource(
fbs, file_name='dummy_file', start_offset=None, stop_offset=100)
def test_source_creation_display_data(self):
file_name = 'dummy_pattern'
fbs = LineSource(file_name, validate=False)
dd = DisplayData.create_from(fbs)
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)]
hc.assert_that(dd.items,
hc.contains_inanyorder(*expected_items))
def test_source_creation_fails_if_start_lg_stop(self):
start_larger_than_stop_error = (
'start_offset must be smaller than stop_offset*')
fbs = LineSource('dummy_pattern', validate=False)
SingleFileSource(
fbs, file_name='dummy_file', start_offset=99, stop_offset=100)
with self.assertRaisesRegexp(ValueError, start_larger_than_stop_error):
SingleFileSource(
fbs, file_name='dummy_file', start_offset=100, stop_offset=99)
with self.assertRaisesRegexp(ValueError, start_larger_than_stop_error):
SingleFileSource(
fbs, file_name='dummy_file', start_offset=100, stop_offset=100)
def test_estimates_size(self):
fbs = LineSource('dummy_pattern', validate=False)
# Should simply return stop_offset - start_offset
source = SingleFileSource(
fbs, file_name='dummy_file', start_offset=0, stop_offset=100)
self.assertEquals(100, source.estimate_size())
source = SingleFileSource(fbs, file_name='dummy_file', start_offset=10,
stop_offset=100)
self.assertEquals(90, source.estimate_size())
def test_read_range_at_beginning(self):
fbs = LineSource('dummy_pattern', validate=False)
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = SingleFileSource(fbs, file_name, 0, 10 * 6)
range_tracker = source.get_range_tracker(0, 20)
read_data = [value for value in source.read(range_tracker)]
self.assertItemsEqual(expected_data[:4], read_data)
def test_read_range_at_end(self):
fbs = LineSource('dummy_pattern', validate=False)
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = SingleFileSource(fbs, file_name, 0, 10 * 6)
range_tracker = source.get_range_tracker(40, 60)
read_data = [value for value in source.read(range_tracker)]
self.assertItemsEqual(expected_data[-3:], read_data)
def test_read_range_at_middle(self):
fbs = LineSource('dummy_pattern', validate=False)
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = SingleFileSource(fbs, file_name, 0, 10 * 6)
range_tracker = source.get_range_tracker(20, 40)
read_data = [value for value in source.read(range_tracker)]
self.assertItemsEqual(expected_data[4:7], read_data)
def test_produces_splits_desiredsize_large_than_size(self):
fbs = LineSource('dummy_pattern', validate=False)
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = SingleFileSource(fbs, file_name, 0, 10 * 6)
splits = [split for split in source.split(desired_bundle_size=100)]
self.assertEquals(1, len(splits))
self.assertEquals(60, splits[0].weight)
self.assertEquals(0, splits[0].start_position)
self.assertEquals(60, splits[0].stop_position)
range_tracker = splits[0].source.get_range_tracker(None, None)
read_data = [value for value in splits[0].source.read(range_tracker)]
self.assertItemsEqual(expected_data, read_data)
def test_produces_splits_desiredsize_smaller_than_size(self):
fbs = LineSource('dummy_pattern', validate=False)
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = SingleFileSource(fbs, file_name, 0, 10 * 6)
splits = [split for split in source.split(desired_bundle_size=25)]
self.assertEquals(3, len(splits))
read_data = []
for split in splits:
source = split.source
range_tracker = source.get_range_tracker(split.start_position,
split.stop_position)
data_from_split = [data for data in source.read(range_tracker)]
read_data.extend(data_from_split)
self.assertItemsEqual(expected_data, read_data)
def test_produce_split_with_start_and_end_positions(self):
fbs = LineSource('dummy_pattern', validate=False)
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = SingleFileSource(fbs, file_name, 0, 10 * 6)
splits = [split for split in
source.split(desired_bundle_size=15, start_offset=10,
stop_offset=50)]
self.assertEquals(3, len(splits))
read_data = []
for split in splits:
source = split.source
range_tracker = source.get_range_tracker(split.start_position,
split.stop_position)
data_from_split = [data for data in source.read(range_tracker)]
read_data.extend(data_from_split)
self.assertItemsEqual(expected_data[2:9], read_data)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual file system for managing files locally or in the cloud."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import os
from entities import BaseEntity
import jinja2
from common import jinja_utils
from models import MemcacheManager
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# We want to use memcache for both objects that exist and do not exist in the
# datastore. If object exists we cache its instance, if object does not exist
# we cache this object below.
NO_OBJECT = {}
class AbstractFileSystem(object):
"""A generic file system interface that forwards to an implementation."""
def __init__(self, impl):
self._impl = impl
@property
def impl(self):
return self._impl
@classmethod
def normpath(cls, path):
"""Make Windows and Linux filenames to have the same separator '/'."""
# Replace '\' into '/' and force Unicode.
if not path:
return path
return u'' + path.replace('\\', '/')
def isfile(self, filename):
"""Checks if file exists, similar to os.path.isfile(...)."""
return self._impl.isfile(filename)
def open(self, filename):
"""Returns a stream with the file content, similar to open(...)."""
return self._impl.get(filename)
def get(self, filename):
"""Returns bytes with the file content, but no metadata."""
return self._impl.get(filename).read()
def put(self, filename, stream, **kwargs):
"""Replaces the contents of the file with the bytes in the stream."""
self._impl.put(filename, stream, **kwargs)
def delete(self, filename):
"""Deletes a file and metadata associated with it."""
self._impl.delete(filename)
def list(self, dir_name):
"""Lists all files in a directory."""
return self._impl.list(dir_name)
def get_jinja_environ(self, dir_names):
"""Configures jinja environment loaders for this file system."""
return self._impl.get_jinja_environ(dir_names)
def is_read_write(self):
return self._impl.is_read_write()
def is_draft(self, stream):
if not hasattr(stream, 'metadata'):
return False
if not stream.metadata:
return False
return stream.metadata.is_draft
class LocalReadOnlyFileSystem(object):
"""A read-only file system serving only local files."""
def __init__(self, logical_home_folder=None, physical_home_folder=None):
"""Creates a new instance of the disk-backed read-only file system.
Args:
logical_home_folder: A logical home dir of all files (/a/b/c/...).
physical_home_folder: A physical location on the file system (/x/y).
Returns:
A new instance of the object.
"""
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._physical_home_folder = AbstractFileSystem.normpath(
physical_home_folder)
def _logical_to_physical(self, filename):
filename = AbstractFileSystem.normpath(filename)
if not (self._logical_home_folder and self._physical_home_folder):
return filename
filename = os.path.join(
self._physical_home_folder,
os.path.relpath(filename, self._logical_home_folder))
return AbstractFileSystem.normpath(filename)
def _physical_to_logical(self, filename):
filename = AbstractFileSystem.normpath(filename)
if not (self._logical_home_folder and self._physical_home_folder):
return filename
filename = os.path.join(
self._logical_home_folder,
os.path.relpath(filename, self._physical_home_folder))
return AbstractFileSystem.normpath(filename)
def isfile(self, filename):
return os.path.isfile(self._logical_to_physical(filename))
def get(self, filename):
return open(self._logical_to_physical(filename), 'rb')
def put(self, unused_filename, unused_stream):
raise Exception('Not implemented.')
def delete(self, unused_filename):
raise Exception('Not implemented.')
def list(self, root_dir):
"""Lists all files in a directory."""
files = []
for dirname, unused_dirnames, filenames in os.walk(
self._logical_to_physical(root_dir)):
for filename in filenames:
files.append(
self._physical_to_logical(os.path.join(dirname, filename)))
return sorted(files)
def get_jinja_environ(self, dir_names):
"""Configure the environment for Jinja templates."""
physical_dir_names = []
for dir_name in dir_names:
physical_dir_names.append(self._logical_to_physical(dir_name))
return jinja_utils.create_jinja_environment(
loader=jinja2.FileSystemLoader(physical_dir_names))
def is_read_write(self):
return False
class FileMetadataEntity(BaseEntity):
"""An entity to represent a file metadata; absolute file name is a key."""
# TODO(psimakov): do we need 'version' to support concurrent updates
# TODO(psimakov): can we put 'data' here and still have fast isfile/list?
created_on = db.DateTimeProperty(auto_now_add=True, indexed=False)
updated_on = db.DateTimeProperty(indexed=True)
# Draft file is just as any other file. It's up to the consumer of the file
# to decide whether to treat draft differently (not to serve it to the
# public, for example). This class does not care and just stores the bit.
is_draft = db.BooleanProperty(indexed=False)
size = db.IntegerProperty(indexed=False)
class FileDataEntity(BaseEntity):
"""An entity to represent file content; absolute file name is a key."""
data = db.BlobProperty()
class FileStreamWrapped(object):
"""A class that wraps a file stream, but adds extra attributes to it."""
def __init__(self, metadata, data):
self._metadata = metadata
self._data = data
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
data = self._data
self._data = ''
return data
@property
def metadata(self):
return self._metadata
class StringStream(object):
"""A wrapper to pose a string as a UTF-8 byte stream."""
def __init__(self, text):
self._data = unicode.encode(text, 'utf-8')
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
data = self._data
self._data = ''
return data
def string_to_stream(text):
return StringStream(text)
def stream_to_string(stream):
return stream.read().decode('utf-8')
class VirtualFileSystemTemplateLoader(jinja2.BaseLoader):
"""Loader of jinja2 templates from a virtual file system."""
def __init__(self, fs, logical_home_folder, dir_names):
self._fs = fs
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._dir_names = []
if dir_names:
for dir_name in dir_names:
self._dir_names.append(AbstractFileSystem.normpath(dir_name))
def get_source(self, unused_environment, template):
for dir_name in self._dir_names:
filename = AbstractFileSystem.normpath(
os.path.join(dir_name, template))
if self._fs.isfile(filename):
return self._fs.get(
filename).read().decode('utf-8'), filename, True
raise jinja2.TemplateNotFound(template)
def list_templates(self):
all_templates = []
for dir_name in self._dir_names:
all_templates += self._fs.list(dir_name)
return all_templates
class DatastoreBackedFileSystem(object):
"""A read-write file system backed by a datastore."""
@classmethod
def make_key(cls, filename):
return 'vfs:dsbfs:%s' % filename
def __init__(
self, ns, logical_home_folder,
inherits_from=None, inheritable_folders=None):
"""Creates a new instance of the datastore-backed file system.
Args:
ns: A datastore namespace to use for storing all data and metadata.
logical_home_folder: A logical home dir of all files (/a/b/c/...).
inherits_from: A file system to use for the inheritance.
inheritable_folders: A list of folders that support inheritance.
Returns:
A new instance of the object.
Raises:
Exception: if invalid inherits_from is given.
"""
# We cache files loaded via inherited fs; make sure they don't change.
if inherits_from and not isinstance(
inherits_from, LocalReadOnlyFileSystem):
raise Exception('Can only inherit from LocalReadOnlyFileSystem.')
self._ns = ns
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._inherits_from = inherits_from
self._inheritable_folders = []
if inheritable_folders:
for folder in inheritable_folders:
self._inheritable_folders.append(AbstractFileSystem.normpath(
folder))
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
# Don't intercept access to private methods and attributes.
if name.startswith('_'):
return attr
# Do intercept all methods.
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
"""Set proper namespace for each method call."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._ns)
return attr(*args, **kwargs)
finally:
namespace_manager.set_namespace(old_namespace)
return newfunc
# Don't intercept access to non-method attributes.
return attr
def _logical_to_physical(self, filename):
filename = AbstractFileSystem.normpath(filename)
# For now we only support '/' as a physical folder name.
if self._logical_home_folder == '/':
return filename
if not filename.startswith(self._logical_home_folder):
raise Exception(
'Expected path \'%s\' to start with a prefix \'%s\'.' % (
filename, self._logical_home_folder))
rel_path = filename[len(self._logical_home_folder):]
if not rel_path.startswith('/'):
rel_path = '/%s' % rel_path
return rel_path
def physical_to_logical(self, filename):
"""Converts an internal filename to and external filename."""
# This class receives and stores absolute file names. The logical
# filename is the external file name. The physical filename is an
# internal filename. This function does the convertions.
# Let's say you want to store a file named '/assets/img/foo.png'.
# This would be a physical filename in the VFS. But the put() operation
# expects an absolute filename from the root of the app installation,
# i.e. something like '/dev/apps/coursebuilder/assets/img/foo.png',
# which is called a logical filename. This is a legacy expectation from
# the days the course was defined as files on the file system.
#
# This function will do the conversion you need.
return self._physical_to_logical(filename)
def _physical_to_logical(self, filename):
filename = AbstractFileSystem.normpath(filename)
# For now we only support '/' as a physical folder name.
if filename and not filename.startswith('/'):
filename = '/' + filename
if self._logical_home_folder == '/':
return filename
return '%s%s' % (self._logical_home_folder, filename)
def _can_inherit(self, filename):
"""Checks if a file can be inherited from a parent file system."""
for prefix in self._inheritable_folders:
if filename.startswith(prefix):
return True
return False
def get(self, afilename):
"""Gets a file from a datastore. Raw bytes stream, no encodings."""
filename = self._logical_to_physical(afilename)
# Load from cache.
result = MemcacheManager.get(
self.make_key(filename), namespace=self._ns)
if result:
return result
if NO_OBJECT == result:
return None
# Load from a datastore.
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
data = FileDataEntity.get_by_key_name(filename)
if data:
result = FileStreamWrapped(metadata, data.data)
MemcacheManager.set(
self.make_key(filename), result, namespace=self._ns)
return result
result = None
metadata = None
# Load from parent fs.
if self._inherits_from and self._can_inherit(filename):
result = self._inherits_from.get(afilename)
# Cache result.
if result:
result = FileStreamWrapped(metadata, result.read())
MemcacheManager.set(
self.make_key(filename), result, namespace=self._ns)
else:
MemcacheManager.set(
self.make_key(filename), NO_OBJECT, namespace=self._ns)
return result
@db.transactional(xg=True)
def put(self, filename, stream, is_draft=False, metadata_only=False):
"""Puts a file stream to a database. Raw bytes stream, no encodings."""
self.non_transactional_put(
filename, stream, is_draft=is_draft, metadata_only=metadata_only)
def non_transactional_put(
self, filename, stream, is_draft=False, metadata_only=False):
"""Non-transactional put; use only when transactions are impossible."""
filename = self._logical_to_physical(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if not metadata:
metadata = FileMetadataEntity(key_name=filename)
metadata.updated_on = datetime.datetime.now()
metadata.is_draft = is_draft
if not metadata_only:
# We operate with raw bytes. The consumer must deal with encoding.
raw_bytes = stream.read()
metadata.size = len(raw_bytes)
data = FileDataEntity(key_name=filename)
data.data = raw_bytes
data.put()
metadata.put()
MemcacheManager.delete(self.make_key(filename), namespace=self._ns)
def put_multi_async(self, filedata_list):
"""Initiate an async put of the given files.
This method initiates an asynchronous put of a list of file data
(presented as pairs of the form (filename, data_source)). It is not
transactional, and does not block, and instead immediately returns a
callback function. When this function is called it will block until
the puts are confirmed to have completed. At this point it will also
clear stale information out of the memcache. For maximum efficiency it's
advisable to defer calling the callback until all other request handling
has completed, but in any event, it MUST be called before the request
handler can exit successfully.
Args:
filedata_list: list. A list of tuples. The first entry of each
tuple is the file name, the second is a filelike object holding
the file data.
Returns:
callable. Returns a wait-and-finalize function. This function must
be called at some point before the request handler exists, in order
to confirm that the puts have succeeded and to purge old values
from the memcache.
"""
filename_list = []
data_list = []
metadata_list = []
for filename, stream in filedata_list:
filename = self._logical_to_physical(filename)
filename_list.append(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if not metadata:
metadata = FileMetadataEntity(key_name=filename)
metadata_list.append(metadata)
metadata.updated_on = datetime.datetime.now()
# We operate with raw bytes. The consumer must deal with encoding.
raw_bytes = stream.read()
metadata.size = len(raw_bytes)
data = FileDataEntity(key_name=filename)
data_list.append(data)
data.data = raw_bytes
data_future = db.put_async(data_list)
metadata_future = db.put_async(metadata_list)
def wait_and_finalize():
data_future.check_success()
metadata_future.check_success()
MemcacheManager.delete_multi(
[self.make_key(filename) for filename in filename_list],
namespace=self._ns)
return wait_and_finalize
@db.transactional(xg=True)
def delete(self, filename):
filename = self._logical_to_physical(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
metadata.delete()
data = FileDataEntity(key_name=filename)
if data:
data.delete()
MemcacheManager.delete(self.make_key(filename), namespace=self._ns)
def isfile(self, afilename):
"""Checks file existence by looking up the datastore row."""
filename = self._logical_to_physical(afilename)
# Check cache.
result = MemcacheManager.get(
self.make_key(filename), namespace=self._ns)
if result:
return True
if NO_OBJECT == result:
return False
# Check datastore.
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
return True
result = False
# Check with parent fs.
if self._inherits_from and self._can_inherit(filename):
result = self._inherits_from.isfile(afilename)
# Put NO_OBJECT marker into memcache to avoid repeated lookups.
if not result:
MemcacheManager.set(
self.make_key(filename), NO_OBJECT, namespace=self._ns)
return result
def list(self, dir_name, include_inherited=False):
"""Lists all files in a directory by using datastore query.
Args:
dir_name: string. Directory to list contents of.
include_inherited: boolean. If True, includes all inheritable files
from the parent filesystem.
Returns:
List of string. Lexicographically-sorted unique filenames
recursively found in dir_name.
"""
dir_name = self._logical_to_physical(dir_name)
result = set()
keys = FileMetadataEntity.all(keys_only=True)
for key in keys.fetch(1000):
filename = key.name()
if filename.startswith(dir_name):
result.add(self._physical_to_logical(filename))
if include_inherited and self._inherits_from:
for inheritable_folder in self._inheritable_folders:
result.update(set(self._inherits_from.list(
self._physical_to_logical(inheritable_folder))))
return sorted(list(result))
def get_jinja_environ(self, dir_names):
return jinja_utils.create_jinja_environment(
loader=VirtualFileSystemTemplateLoader(
self, self._logical_home_folder, dir_names))
def is_read_write(self):
return True
def run_all_unit_tests():
"""Runs all unit tests in the project."""
if __name__ == '__main__':
run_all_unit_tests()
|
|
# -*- coding: utf-8 -*-
"""
Coop_cms settings : central place for coop_cms settings
the settings should be accessed from here and not directly from django.conf.settings
"""
import os.path
import sys
from six import string_types
from django.conf import settings as django_settings
from django.conf.urls.i18n import i18n_patterns
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.utils.translation import ugettext as _
from importlib import import_module
from coop_cms.logger import logger
DEFAULT_NAVTREE_CLASS = COOP_CMS_NAVTREE_CLASS = 'coop_cms.NavTree'
DEPRECATED_COOP_CMS_NAVTREE_CLASS = getattr(django_settings, 'COOP_CMS_NAVTREE_CLASS', 'basic_cms.NavTree')
DEFAULT_MEDIA_ROOT = ''
def load_class(settings_key, default_value):
"""returns the form to be used for creating a new article"""
full_class_name = getattr(django_settings, settings_key, '') or default_value
if full_class_name:
try:
module_name, class_name = full_class_name.rsplit('.', 1)
except ValueError:
raise ImportError("Unable to import {0}: full path is required".format(full_class_name))
module = import_module(module_name)
class_object = getattr(module, class_name)
return class_object
return None
def get_navigable_content_types():
"""returns the list of content types of navigable (which can be used in navigation) models"""
ct_choices = []
try:
content_apps = django_settings.COOP_CMS_CONTENT_APPS
except AttributeError:
content_apps = []
not_to_be_mapped = ('south', 'django_extensions', 'd2rq')
for module in django_settings.INSTALLED_APPS:
if (not module.startswith('django.')) and (module not in not_to_be_mapped):
content_apps.append(module)
apps_labels = [app.rsplit('.')[-1] for app in content_apps]
navigable_content_types = ContentType.objects.filter(app_label__in=apps_labels).order_by('app_label')
for content_type in navigable_content_types:
is_navnode = ((content_type.model == 'navnode') and (content_type.app_label == 'coop_cms'))
if (not is_navnode) and 'get_absolute_url' in dir(content_type.model_class()):
ct_choices.append((content_type.id, content_type.app_label + '.' + content_type.model))
return ct_choices
def get_navtree_class(defaut_class=None):
"""
returns the custom navtree class
Warning : It is not recommend to define a custom NavTree. This feature is deprecated
"""
if hasattr(get_navtree_class, '_cache_class'):
return getattr(get_navtree_class, '_cache_class')
else:
navtree_class = None
if DEFAULT_NAVTREE_CLASS != COOP_CMS_NAVTREE_CLASS:
full_class_name = COOP_CMS_NAVTREE_CLASS
app_label, model_name = full_class_name.split('.')
model_name = model_name.lower()
try:
content_type = ContentType.objects.get(app_label=app_label, model=model_name)
navtree_class = content_type.model_class()
except Exception:
navtree_class = None
if navtree_class is None:
module = import_module('coop_cms.models')
navtree_class = module.NavTree
setattr(get_navtree_class, '_cache_class', navtree_class)
return navtree_class
def get_article_class():
"""
returns the custom Article class
This makes possible to customize the Article model. However, It must inherit from BaseArticle
"""
if hasattr(get_article_class, '_cache_class'):
return getattr(get_article_class, '_cache_class')
else:
default_value = ""
if 'coop_cms.apps.basic_cms' in django_settings.INSTALLED_APPS:
default_value = 'coop_cms.apps.basic_cms.models.Article'
article_class = load_class('COOP_CMS_ARTICLE_CLASS', default_value)
if not article_class:
raise Exception('No article class configured')
setattr(get_article_class, '_cache_class', article_class)
return article_class
def get_default_logo():
"""returns the default logo"""
return getattr(django_settings, 'COOP_CMS_DEFAULT_ARTICLE_LOGO', 'img/default-logo.png')
def get_article_form():
"""returns a form to be used for editing an article"""
return load_class('COOP_CMS_ARTICLE_FORM', 'coop_cms.forms.ArticleForm')
def get_article_settings_form():
"""returns the form to use for editing article settings"""
return load_class('COOP_CMS_ARTICLE_SETTINGS_FORM', 'coop_cms.forms.ArticleSettingsForm')
def get_new_article_form():
"""returns the form to be used for creating a new article"""
return load_class('COOP_CMS_NEW_ARTICLE_FORM', 'coop_cms.forms.NewArticleForm')
def get_newsletter_templates(newsletter, user):
"""returns the list of newsletter templates"""
try:
return getattr(django_settings, 'COOP_CMS_NEWSLETTER_TEMPLATES')
except AttributeError:
return ()
def get_newsletter_form():
"""returns the form to use for editing a newsletter"""
return load_class('COOP_CMS_NEWSLETTER_FORM', 'coop_cms.forms.NewsletterForm')
def get_newsletter_settings_form():
"""returns the form to use for for newsletter settings"""
return load_class('COOP_CMS_NEWSLETTER_SETTINGS_FORM', 'coop_cms.forms.NewsletterSettingsForm')
def get_article_templates(article, user):
"""returns the list of article templates"""
if hasattr(django_settings, 'COOP_CMS_ARTICLE_TEMPLATES'):
coop_cms_article_templates = getattr(django_settings, 'COOP_CMS_ARTICLE_TEMPLATES')
if type(coop_cms_article_templates) in string_types:
# COOP_CMS_ARTICLE_TEMPLATES is a string :
# - a function name that will return a tuple
# - a variable name that contains a tuple
# extract module and function/var names
module_name, object_name = coop_cms_article_templates.rsplit('.', 1)
module = import_module(module_name) # import module
article_templates_object = getattr(module, object_name) # get the object
if callable(article_templates_object):
# function: call it
article_templates = article_templates_object(article, user)
else:
# var: assign
article_templates = article_templates_object
else:
# COOP_CMS_ARTICLE_TEMPLATES is directly a tuple, assign it
article_templates = coop_cms_article_templates
else:
article_templates = None
return article_templates
def _get_article_setting(article, setting_name, default_value):
"""private function: access an article-dependant setting"""
try:
get_setting_name = getattr(django_settings, setting_name)
try:
module_name, fct_name = get_setting_name.rsplit('.', 1)
module = import_module(module_name)
get_setting = getattr(module, fct_name)
if callable(get_setting):
#If the setting is a function get the value as return value of the function call
value = get_setting(article)
else:
#else Take the value as it is
value = get_setting
except ValueError:
value = get_setting_name
except AttributeError:
value = default_value
return value
def get_article_logo_size(article):
"""get the article logo size"""
return _get_article_setting(article, 'COOP_CMS_ARTICLE_LOGO_SIZE', '48x48')
def get_article_logo_crop(article):
"""get the article logo crop"""
return _get_article_setting(article, 'COOP_CMS_ARTICLE_LOGO_CROP', 'center')
def get_headline_image_size(article):
"""get the headline image size"""
return _get_article_setting(article, 'COOP_CMS_HEADLINE_IMAGE_SIZE', '900')
def get_headline_image_crop(article):
"""get the headline image crop"""
return _get_article_setting(article, 'COOP_CMS_HEADLINE_IMAGE_CROP', None)
def get_max_image_width(image):
"""get the ax image width: avoid user to use very large image"""
return _get_article_setting(image, 'COOP_CMS_MAX_IMAGE_WIDTH', None)
def get_newsletter_item_classes():
"""get items thant can be used in newsletter"""
if hasattr(get_newsletter_item_classes, '_cache_class'):
return getattr(get_newsletter_item_classes, '_cache_class')
else:
item_classes = []
try:
full_classes_names = getattr(django_settings, 'COOP_CMS_NEWSLETTER_ITEM_CLASSES')
except AttributeError:
item_classes = (get_article_class(),)
else:
item_classes = []
for full_class_name in full_classes_names:
module_name, class_name = full_class_name.rsplit('.', 1)
module = import_module(module_name)
item_classes.append(getattr(module, class_name))
item_classes = tuple(item_classes)
if not item_classes:
raise Exception('No newsletter item classes configured')
setattr(get_newsletter_item_classes, '_cache_class', item_classes)
return item_classes
def get_newsletter_context_callbacks():
"""get the context for newsletter template"""
if hasattr(get_newsletter_context_callbacks, '_cache_func'):
return getattr(get_newsletter_context_callbacks, '_cache_func')
else:
try:
callback_names = getattr(django_settings, 'COOP_CMS_NEWSLETTER_CONTEXT')
except AttributeError:
return ()
else:
callbacks = []
for callback_name in callback_names:
module_name, func_name = callback_name.rsplit('.', 1)
module = import_module(module_name)
callbacks.append(getattr(module, func_name))
callbacks = tuple(callbacks)
setattr(get_newsletter_context_callbacks, '_cache_func', callbacks)
return callbacks
def is_localized():
"""return True if possible to have different languages in the site"""
if 'modeltranslation' in django_settings.INSTALLED_APPS:
return True
return False
def has_localized_urls():
"""return True if use language URL prefix"""
has_locale_urls = getattr(django_settings, 'COOP_CMS_HAS_LOCALIZED_URLS', None)
if has_locale_urls is None:
has_locale_urls = is_localized()
return has_locale_urls
def is_multilang():
"""return true if several languages are set"""
return len(django_settings.LANGUAGES) > 1
def multilang_mode():
"""return true if several languages are set"""
return len(django_settings.LANGUAGES)
def install_csrf_failure_view():
"""Make possible to customize csrf failure page"""
dont_do_it = getattr(django_settings, 'COOP_CMS_DO_NOT_INSTALL_CSRF_FAILURE_VIEW', False)
if not dont_do_it:
setattr(django_settings, 'CSRF_FAILURE_VIEW', 'coop_cms.views.webutils.csrf_failure')
def cms_no_homepage():
"""returns true if homepage is not managed by coop_cms"""
return getattr(django_settings, 'COOP_CMS_NO_HOMEPAGE', False)
def hide_media_library_menu():
"""returns True if media is not displayed in coop_bar menu"""
return getattr(django_settings, 'COOP_CMS_HIDE_MEDIA_LIBRARY_MENU', False)
def is_requestprovider_installed():
"""returns True if possible to get request from anywhere in the code"""
is_installed = ('coop_cms.utils.RequestMiddleware' in django_settings.MIDDLEWARE)
if not is_installed:
logger.warn("You should add coop_cms.utils.RequestMiddleware to the MIDDLEWARE settings")
return is_installed
def can_rewrite_url():
"""returns True if user is allowed to change article slugs"""
return getattr(django_settings, 'COOP_CMS_CAN_EDIT_ARTICLE_SLUG', False)
def get_article_views():
"""returns article views"""
try:
article_views = getattr(django_settings, 'COOP_CMS_ARTICLE_VIEWS')
return article_views
except AttributeError:
from coop_cms.views.articles import ArticleView
return {
'article_view': ArticleView,
'edit_article_view': ArticleView,
}
def is_perm_middleware_installed():
"""returns True if permission middleware is installed"""
return 'coop_cms.middleware.PermissionsMiddleware' in django_settings.MIDDLEWARE
# Check that languages are correctly set
if is_localized():
if django_settings.LANGUAGE_CODE[:2] != django_settings.LANGUAGES[0][0]:
text = "coop_cms settings error: LANGUAGE_CODE ({0}) should be first in LANGUAGES (currently first is {1})"
text = text.format(django_settings.LANGUAGE_CODE[:2], django_settings.LANGUAGES[0][0])
logger.warning(text)
def is_multi_site():
"""returns True if several sites are configured"""
return Site.objects.count() > 1
def get_img_folder(instance, filename):
"""image folder"""
try:
img_root = django_settings.IMAGE_FOLDER
except AttributeError:
img_root = 'img'
return '{0}/{1}'.format(img_root, filename)
def get_articles_category_page_size(article_category):
"""returns number of articles for pagination"""
if article_category.pagination_size:
return article_category.pagination_size
return getattr(django_settings, 'COOP_CMS_ARTICLES_CATEGORY_PAGINATION', 10)
def get_url_patterns():
"""return urlspatterns to use"""
if has_localized_urls():
return i18n_patterns
else:
def url_list(*args):
if args and isinstance(args[0], string_types):
# remove prefix if any
return list(args[1:])
else:
return list(args)
return url_list
def get_unit_test_media_root():
"""return unit testing_media root"""
global DEFAULT_MEDIA_ROOT
if not DEFAULT_MEDIA_ROOT:
DEFAULT_MEDIA_ROOT = django_settings.MEDIA_ROOT
django_settings.MEDIA_ROOT = os.path.join(django_settings.MEDIA_ROOT, '_unit_tests')
return django_settings.MEDIA_ROOT
def get_media_root():
"""return unit testing_media root if unit test, regular unit test if not"""
if 'test' in sys.argv:
return get_unit_test_media_root()
else:
return django_settings.MEDIA_ROOT
def homepage_no_redirection():
"""Indicates if the homepage should be served directly or as a redirection (default)"""
return getattr(django_settings, 'COOP_CMS_HOMEPAGE_NO_REDIRECTION', False)
def get_eastern_languages():
"""returns list of eastern language (not having the english alphabet)"""
eastern_langs = getattr(django_settings, 'COOP_CMS_EASTERN_LANGUAGES', None)
if eastern_langs is None:
eastern_langs = (
'ru', # Russian
'ja', # Japanese
'ko', # Korean
'iw', # Hebrew
'el', # Greek
'ar', # Arabic
'zh', # Chinese
'cn', # Chinese
)
return eastern_langs
def is_cache_enabled():
"""True if cache editable content"""
return getattr(django_settings, 'COOP_CMS_CACHE', False)
def change_site_id():
"""Change SITE ID"""
if (
django_settings.DEBUG and not getattr(django_settings, 'DISABLE_CHANGE_SITE', False) and
(len(sys.argv) > 1) and sys.argv[1] == "runserver"
):
local_dev_address = "127.0.0.1:8000"
current_site = Site.objects.get_current()
if current_site.domain != local_dev_address:
print(_("The current site is NOT localhost (127.0.0.1:8000)"))
if Site.objects.filter(domain=local_dev_address).exclude(id=current_site.id).exists():
print(_("Another site is already set as localhost"))
else:
print(_("Do you want to turn it into localhost?"))
choice = input(_("0: No\n1: Yes\n"))
if choice == "0":
print(_("You can disable this by adding in your settings DISABLE_CHANGE_SITE=True"))
elif choice == "1":
current_site.domain = "127.0.0.1:8000"
current_site.name = "localhost"
current_site.save()
print(_("Your domain site is now: "), current_site.domain)
def is_xsendfile_disabled():
return getattr(django_settings, 'COOP_CMS_DISABLE_XSENDFILE', False)
def xsendfile_no_file_size():
return getattr(django_settings, 'COOP_CMS_XSENDFILE_NO_FILESIZE', True)
|
|
import unittest
from mako.lexer import Lexer
from mako import exceptions, util
from util import flatten_result, result_lines
from mako.template import Template
import re
from test import TemplateTest, template_base, skip_if, eq_, assert_raises_message
# create fake parsetree classes which are constructed
# exactly as the repr() of a real parsetree object.
# this allows us to use a Python construct as the source
# of a comparable repr(), which is also hit by the 2to3 tool.
def repr_arg(x):
if isinstance(x, dict):
return util.sorted_dict_repr(x)
else:
return repr(x)
from mako import parsetree
for cls in parsetree.__dict__.values():
if isinstance(cls, type) and \
issubclass(cls, parsetree.Node):
clsname = cls.__name__
exec ("""
class %s(object):
def __init__(self, *args):
self.args = args
def __repr__(self):
return "%%s(%%s)" %% (
self.__class__.__name__,
", ".join(repr_arg(x) for x in self.args)
)
""" % clsname) in locals()
# NOTE: most assertion expressions were generated, then formatted
# by PyTidy, hence the dense formatting.
class LexerTest(TemplateTest):
def _compare(self, node, expected):
eq_(repr(node), repr(expected))
def test_text_and_tag(self):
template = """
<b>Hello world</b>
<%def name="foo()">
this is a def.
</%def>
and some more text.
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({},
[Text(u'''\n<b>Hello world</b>\n ''', (1,
1)), DefTag(u'def', {u'name': u'foo()'}, (3, 9),
[Text(u'''\n this is a def.\n ''',
(3, 28))]),
Text(u'''\n \n and some more text.\n''',
(5, 16))]))
def test_unclosed_tag(self):
template = """
<%def name="foo()">
other text
"""
try:
nodes = Lexer(template).parse()
assert False
except exceptions.SyntaxException, e:
assert str(e) == "Unclosed tag: <%def> at line: 5 char: 9"
def test_onlyclosed_tag(self):
template = \
"""
<%def name="foo()">
foo
</%def>
</%namespace>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_noexpr_allowed(self):
template = \
"""
<%namespace name="${foo}"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_unmatched_tag(self):
template = \
"""
<%namespace name="bar">
<%def name="foo()">
foo
</%namespace>
</%def>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_nonexistent_tag(self):
template = """
<%lala x="5"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_wrongcase_tag(self):
template = \
"""
<%DEF name="foo()">
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_percent_escape(self):
template = \
"""
%% some whatever.
%% more some whatever
% if foo:
% endif
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text(u'''\n \n''',
(1, 1)), Text(u'''% some whatever.\n\n''', (3, 2)),
Text(u' %% more some whatever\n', (5, 2)),
ControlLine(u'if', u'if foo:', False, (6, 1)),
ControlLine(u'if', u'endif', True, (7, 1)),
Text(u' ', (8, 1))]))
def test_text_tag(self):
template = \
"""
## comment
% if foo:
hi
% endif
<%text>
# more code
% more code
<%illegal compionent>/></>
<%def name="laal()">def</%def>
</%text>
<%def name="foo()">this is foo</%def>
% if bar:
code
% endif
"""
node = Lexer(template).parse()
self._compare(node,
TemplateNode({}, [Text(u'\n', (1, 1)),
Comment(u'comment', (2, 1)),
ControlLine(u'if', u'if foo:', False, (3, 1)),
Text(u' hi\n', (4, 1)),
ControlLine(u'if', u'endif', True, (5, 1)),
Text(u' ', (6, 1)), TextTag(u'text', {},
(6, 9),
[Text(u'''\n # more code\n '''
'''\n % more code\n '''
'''<%illegal compionent>/></>\n '''
'''<%def name="laal()">def</%def>\n '''
''' \n \n ''',
(6, 16))]), Text(u'''
''', (14, 17)),
DefTag(u'def', {u'name': u'foo()'}, (16, 9),
[Text(u'this is foo', (16, 28))]),
Text(u'''\n \n''', (16, 46)),
ControlLine(u'if', u'if bar:', False, (18, 1)),
Text(u' code\n', (19, 1)),
ControlLine(u'if', u'endif', True, (20, 1)),
Text(u' ', (21, 1))]))
def test_def_syntax(self):
template = \
"""
<%def lala>
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_def_syntax_2(self):
template = \
"""
<%def name="lala">
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_whitespace_equals(self):
template = \
"""
<%def name = "adef()" >
adef
</%def>
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text(u'\n ',
(1, 1)), DefTag(u'def', {u'name': u'adef()'}, (2,
13),
[Text(u'''\n adef\n ''',
(2, 36))]), Text(u'\n ', (4, 20))]))
def test_ns_tag_closed(self):
template = \
"""
<%self:go x="1" y="2" z="${'hi' + ' ' + 'there'}"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
CallNamespaceTag(u'self:go', {u'x': u'1', u'y'
: u'2', u'z': u"${'hi' + ' ' + 'there'}"}, (3,
13), []), Text(u'\n ', (3, 64))]))
def test_ns_tag_empty(self):
template = \
"""
<%form:option value=""></%form:option>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), CallNamespaceTag(u'form:option',
{u'value': u''}, (2, 13), []), Text(u'\n '
, (2, 51))]))
def test_ns_tag_open(self):
template = \
"""
<%self:go x="1" y="${process()}">
this is the body
</%self:go>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
CallNamespaceTag(u'self:go', {u'x': u'1', u'y'
: u'${process()}'}, (3, 13),
[Text(u'''
this is the body
''',
(3, 46))]), Text(u'\n ', (5, 24))]))
def test_expr_in_attribute(self):
"""test some slightly trickier expressions.
you can still trip up the expression parsing, though, unless we
integrated really deeply somehow with AST."""
template = \
"""
<%call expr="foo>bar and 'lala' or 'hoho'"/>
<%call expr='foo<bar and hoho>lala and "x" + "y"'/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), CallTag(u'call', {u'expr'
: u"foo>bar and 'lala' or 'hoho'"}, (2, 13), []),
Text(u'\n ', (2, 57)), CallTag(u'call'
, {u'expr': u'foo<bar and hoho>lala and "x" + "y"'
}, (3, 13), []), Text(u'\n ', (3, 64))]))
def test_pagetag(self):
template = \
"""
<%page cached="True", args="a, b"/>
some template
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), PageTag(u'page', {u'args': u'a, b',
u'cached': u'True'}, (2, 13), []),
Text(u'''
some template
''',
(2, 48))]))
def test_nesting(self):
template = \
"""
<%namespace name="ns">
<%def name="lala(hi, there)">
<%call expr="something()"/>
</%def>
</%namespace>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
NamespaceTag(u'namespace', {u'name': u'ns'}, (3,
9), [Text(u'\n ', (3, 31)),
DefTag(u'def', {u'name': u'lala(hi, there)'}, (4,
13), [Text(u'\n ', (4, 42)),
CallTag(u'call', {u'expr': u'something()'}, (5,
17), []), Text(u'\n ', (5, 44))]),
Text(u'\n ', (6, 20))]),
Text(u'''
''', (7, 22))]))
if util.py3k:
def test_code(self):
template = \
"""text
<%
print("hi")
for x in range(1,5):
print(x)
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text(u'text\n ', (1, 1)),
Code(u'\nprint("hi")\nfor x in range(1,5):\n '
'print(x)\n \n', False, (2, 5)),
Text(u'\nmore text\n ', (6, 7)),
Code(u'\nimport foo\n \n', True, (8, 5)),
Text(u'\n', (10, 7))])
)
else:
def test_code(self):
template = \
"""text
<%
print "hi"
for x in range(1,5):
print x
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text(u'text\n ', (1, 1)),
Code(u'\nprint "hi"\nfor x in range(1,5):\n '
'print x\n \n', False, (2, 5)),
Text(u'\nmore text\n ', (6, 7)),
Code(u'\nimport foo\n \n', True, (8, 5)),
Text(u'\n', (10, 7))])
)
def test_code_and_tags(self):
template = \
"""
<%namespace name="foo">
<%def name="x()">
this is x
</%def>
<%def name="y()">
this is y
</%def>
</%namespace>
<%
result = []
data = get_data()
for x in data:
result.append(x+7)
%>
result: <%call expr="foo.x(result)"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
NamespaceTag(u'namespace', {u'name': u'foo'}, (2,
1), [Text(u'\n ', (2, 24)), DefTag(u'def',
{u'name': u'x()'}, (3, 5),
[Text(u'''\n this is x\n ''', (3, 22))]),
Text(u'\n ', (5, 12)), DefTag(u'def', {u'name'
: u'y()'}, (6, 5),
[Text(u'''\n this is y\n ''', (6, 22))]),
Text(u'\n', (8, 12))]), Text(u'''\n\n''', (9, 14)),
Code(u'''\nresult = []\ndata = get_data()\n'''
'''for x in data:\n result.append(x+7)\n\n''',
False, (11, 1)), Text(u'''\n\n result: ''', (16,
3)), CallTag(u'call', {u'expr': u'foo.x(result)'
}, (18, 13), []), Text(u'\n', (18, 42))]))
def test_expression(self):
template = \
"""
this is some ${text} and this is ${textwith | escapes, moreescapes}
<%def name="hi()">
give me ${foo()} and ${bar()}
</%def>
${hi()}
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'\n this is some ', (1, 1)),
Expression(u'text', [], (2, 22)),
Text(u' and this is ', (2, 29)),
Expression(u'textwith ', ['escapes', 'moreescapes'
], (2, 42)), Text(u'\n ', (2, 76)),
DefTag(u'def', {u'name': u'hi()'}, (3, 9),
[Text(u'\n give me ', (3, 27)),
Expression(u'foo()', [], (4, 21)), Text(u' and ',
(4, 29)), Expression(u'bar()', [], (4, 34)),
Text(u'\n ', (4, 42))]), Text(u'\n '
, (5, 16)), Expression(u'hi()', [], (6, 9)),
Text(u'\n', (6, 16))]))
def test_tricky_expression(self):
template = """
${x and "|" or "hi"}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n \n ', (1, 1)),
Expression(u'x and "|" or "hi"', [], (3, 13)),
Text(u'\n ', (3, 33))
])
)
template = """
${hello + '''heres '{|}' text | | }''' | escape1}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n \n ', (1, 1)),
Expression(u"hello + '''heres '{|}' text | | }''' ",
['escape1'], (3, 13)),
Text(u'\n ', (3, 62))
])
)
def test_tricky_code(self):
if util.py3k:
template = """<% print('hi %>') %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"print('hi %>') \n", False, (1, 1))]))
else:
template = """<% print 'hi %>' %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"print 'hi %>' \n", False, (1, 1))]))
def test_tricky_code_2(self):
template = \
"""<%
# someone's comment
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""
# someone's comment
""",
False, (1, 1)), Text(u'\n ', (3, 11))]))
if util.py3k:
def test_tricky_code_3(self):
template = \
"""<%
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
""",
False, (1, 1)),
Text(u" '''and now some text '''", (10,
11))]))
else:
def test_tricky_code_3(self):
template = \
"""<%
print 'hi'
# this is a comment
# another comment
x = 7 # someone's '''comment
print '''
there
'''
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""\nprint 'hi'\n# this is a comment\n"""
"""# another comment\nx = 7 """
"""# someone's '''comment\nprint '''\n """
"""there\n '''\n# someone else's """
"""comment\n \n""",
False, (1, 1)),
Text(u" '''and now some text '''", (10,11))]))
def test_control_lines(self):
template = \
"""
text text la la
% if foo():
mroe text la la blah blah
% endif
and osme more stuff
% for l in range(1,5):
tex tesl asdl l is ${l} kfmas d
% endfor
tetx text
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''\ntext text la la\n''', (1, 1)),
ControlLine(u'if', u'if foo():', False, (3, 1)),
Text(u' mroe text la la blah blah\n', (4, 1)),
ControlLine(u'if', u'endif', True, (5, 1)),
Text(u'''\n and osme more stuff\n''', (6,
1)), ControlLine(u'for', u'for l in range(1,5):',
False, (8, 1)), Text(u' tex tesl asdl l is ',
(9, 1)), Expression(u'l', [], (9, 24)),
Text(u' kfmas d\n', (9, 28)), ControlLine(u'for',
u'endfor', True, (10, 1)),
Text(u''' tetx text\n \n''', (11, 1))]))
def test_control_lines_2(self):
template = \
"""% for file in requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [ControlLine(u'for',
u"for file in requestattr['toc'].filenames:",
False, (1, 1)), Text(u' x\n', (2, 1)),
ControlLine(u'for', u'endfor', True, (3, 1))]))
def test_long_control_lines(self):
template = \
"""
% for file in \\
requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n', (1, 1)),
ControlLine(u'for', u"for file in \\\n "
"requestattr['toc'].filenames:",
False, (2, 1)),
Text(u' x\n', (4, 1)),
ControlLine(u'for', u'endfor', True, (5, 1)),
Text(u' ', (6, 1))
])
)
def test_unmatched_control(self):
template = """
% if foo:
% for x in range(1,5):
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endif' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_unmatched_control_2(self):
template = """
% if foo:
% for x in range(1,5):
% endfor
"""
assert_raises_message(
exceptions.SyntaxException,
"Unterminated control keyword: 'if' at line: 3 char: 1",
Lexer(template).parse
)
def test_unmatched_control_3(self):
template = """
% if foo:
% for x in range(1,5):
% endlala
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endlala' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_ternary_control(self):
template = \
"""
% if x:
hi
% elif y+7==10:
there
% elif lala:
lala
% else:
hi
% endif
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
ControlLine(u'if', u'if x:', False, (2, 1)),
Text(u' hi\n', (3, 1)),
ControlLine(u'elif', u'elif y+7==10:', False, (4,
1)), Text(u' there\n', (5, 1)),
ControlLine(u'elif', u'elif lala:', False, (6,
1)), Text(u' lala\n', (7, 1)),
ControlLine(u'else', u'else:', False, (8, 1)),
Text(u' hi\n', (9, 1)),
ControlLine(u'if', u'endif', True, (10, 1))]))
def test_integration(self):
template = \
"""<%namespace name="foo" file="somefile.html"/>
## inherit from foobar.html
<%inherit file="foobar.html"/>
<%def name="header()">
<div>header</div>
</%def>
<%def name="footer()">
<div> footer</div>
</%def>
<table>
% for j in data():
<tr>
% for x in j:
<td>Hello ${x| h}</td>
% endfor
</tr>
% endfor
</table>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [NamespaceTag(u'namespace'
, {u'file': u'somefile.html', u'name': u'foo'},
(1, 1), []), Text(u'\n', (1, 46)),
Comment(u'inherit from foobar.html', (2, 1)),
InheritTag(u'inherit', {u'file': u'foobar.html'},
(3, 1), []), Text(u'''\n\n''', (3, 31)),
DefTag(u'def', {u'name': u'header()'}, (5, 1),
[Text(u'''\n <div>header</div>\n''', (5,
23))]), Text(u'\n', (7, 8)), DefTag(u'def',
{u'name': u'footer()'}, (8, 1),
[Text(u'''\n <div> footer</div>\n''', (8,
23))]), Text(u'''\n\n<table>\n''', (10, 8)),
ControlLine(u'for', u'for j in data():', False,
(13, 1)), Text(u' <tr>\n', (14, 1)),
ControlLine(u'for', u'for x in j:', False, (15,
1)), Text(u' <td>Hello ', (16, 1)),
Expression(u'x', ['h'], (16, 23)), Text(u'</td>\n'
, (16, 30)), ControlLine(u'for', u'endfor', True,
(17, 1)), Text(u' </tr>\n', (18, 1)),
ControlLine(u'for', u'endfor', True, (19, 1)),
Text(u'</table>\n', (20, 1))]))
def test_comment_after_statement(self):
template = \
"""
% if x: #comment
hi
% else: #next
hi
% endif #end
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
ControlLine(u'if', u'if x: #comment', False, (2,
1)), Text(u' hi\n', (3, 1)),
ControlLine(u'else', u'else: #next', False, (4,
1)), Text(u' hi\n', (5, 1)),
ControlLine(u'if', u'endif #end', True, (6, 1))]))
def test_crlf(self):
template = open(self._file_path("crlf.html"), 'rb').read()
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'<html>\r\n\r\n', (1, 1)),
PageTag(u'page', {
u'args': u"a=['foo',\n 'bar']"
}, (3, 1), []),
Text(u'\r\n\r\nlike the name says.\r\n\r\n', (4, 26)),
ControlLine(u'for', u'for x in [1,2,3]:', False, (8, 1)),
Text(u' ', (9, 1)),
Expression(u'x', [], (9, 9)),
ControlLine(u'for', u'endfor', True, (10, 1)),
Text(u'\r\n', (11, 1)),
Expression(u"trumpeter == 'Miles' and "
"trumpeter or \\\n 'Dizzy'",
[], (12, 1)),
Text(u'\r\n\r\n', (13, 15)),
DefTag(u'def', {u'name': u'hi()'}, (15, 1), [
Text(u'\r\n hi!\r\n', (15, 19))]),
Text(u'\r\n\r\n</html>\r\n', (17, 8))
])
)
assert flatten_result(Template(template).render()) \
== """<html> like the name says. 1 2 3 Dizzy </html>"""
def test_comments(self):
template = \
"""
<style>
#someselector
# other non comment stuff
</style>
## a comment
# also not a comment
## this is a comment
this is ## not a comment
<%doc> multiline
comment
</%doc>
hi
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''\n<style>\n #someselector\n # '''
'''other non comment stuff\n</style>\n''',
(1, 1)), Comment(u'a comment', (6, 1)),
Text(u'''\n# also not a comment\n\n''', (7, 1)),
Comment(u'this is a comment', (10, 1)),
Text(u''' \nthis is ## not a comment\n\n''', (11,
1)), Comment(u''' multiline\ncomment\n''', (14,
1)), Text(u'''
hi
''', (16, 8))]))
def test_docs(self):
template = \
"""
<%doc>
this is a comment
</%doc>
<%def name="foo()">
<%doc>
this is the foo func
</%doc>
</%def>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [Text(u'\n ', (1,
1)),
Comment(u'''\n this is a comment\n ''',
(2, 9)), Text(u'\n ', (4, 16)),
DefTag(u'def', {u'name': u'foo()'}, (5, 9),
[Text(u'\n ', (5, 28)),
Comment(u'''\n this is the foo func\n'''
''' ''',
(6, 13)), Text(u'\n ', (8, 20))]),
Text(u'\n ', (9, 16))]))
def test_preprocess(self):
def preproc(text):
return re.sub(r'(?<=\n)\s*#[^#]', '##', text)
template = \
"""
hi
# old style comment
# another comment
"""
nodes = Lexer(template, preprocessor=preproc).parse()
self._compare(nodes, TemplateNode({}, [Text(u'''\n hi\n''',
(1, 1)), Comment(u'old style comment', (3, 1)),
Comment(u'another comment', (4, 1))]))
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import numpy
import tables
from tables import (
Col, StringCol, Atom, StringAtom, Int16Atom, Int32Atom,
FloatAtom, Float64Atom,
)
from tables.tests import common
from tables.tests.common import unittest, test_filename
from tables.tests.common import PyTablesTestCase as TestCase
# Test Record class
class Record(tables.IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = Col.from_kind('int') # integer
var3 = Col.from_kind('int', itemsize=2) # short integer
var4 = Col.from_kind('float') # double (double-precision)
var5 = Col.from_kind('float', itemsize=4) # float (single-precision)
var6 = Col.from_kind('complex') # double-precision
var7 = Col.from_kind('complex', itemsize=8) # single-precision
if hasattr(tables, "Float16Atom"):
var8 = Col.from_kind('float', itemsize=2) # half-precision
if hasattr(tables, "Float96Atom"):
var9 = Col.from_kind('float', itemsize=12) # extended-precision
if hasattr(tables, "Float128Atom"):
var10 = Col.from_kind('float', itemsize=16) # extended-precision
if hasattr(tables, "Complex192Atom"):
var11 = Col.from_kind('complex', itemsize=24) # extended-precision
if hasattr(tables, "Complex256Atom"):
var12 = Col.from_kind('complex', itemsize=32) # extended-precision
class RangeTestCase(common.TempFileMixin, TestCase):
title = "This is the table title"
expectedrows = 100
maxshort = 2 ** 15
maxint = 2147483648 # (2 ** 31)
compress = 0
def setUp(self):
super(RangeTestCase, self).setUp()
self.rootgroup = self.h5file.root
# Create a table
self.table = self.h5file.create_table(self.rootgroup, 'table',
Record, self.title)
def test00_range(self):
"""Testing the range check."""
rec = self.table.row
# Save a record
i = self.maxshort
rec['var1'] = '%04d' % (i)
rec['var2'] = i
rec['var3'] = i
rec['var4'] = float(i)
rec['var5'] = float(i)
rec['var6'] = float(i)
rec['var7'] = complex(i, i)
if hasattr(tables, "Float16Atom"):
rec['var8'] = float(i)
if hasattr(tables, "Float96Atom"):
rec['var9'] = float(i)
if hasattr(tables, "Float128Atom"):
rec['var10'] = float(i)
try:
rec.append()
except ValueError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next ValueError was catched!")
print(value)
pass
else:
if common.verbose:
print(
"\nNow, the range overflow no longer issues a ValueError")
def test01_type(self):
"""Testing the type check."""
rec = self.table.row
# Save a record
i = self.maxshort
rec['var1'] = '%04d' % (i)
rec['var2'] = i
rec['var3'] = i % self.maxshort
rec['var5'] = float(i)
with self.assertRaises(TypeError):
rec['var4'] = "124c"
rec['var6'] = float(i)
rec['var7'] = complex(i, i)
if hasattr(tables, "Float16Atom"):
rec['var8'] = float(i)
if hasattr(tables, "Float96Atom"):
rec['var9'] = float(i)
if hasattr(tables, "Float128Atom"):
rec['var10'] = float(i)
# Check the dtype read-only attribute
class DtypeTestCase(common.TempFileMixin, TestCase):
def test00a_table(self):
"""Check dtype accessor for Table objects."""
a = self.h5file.create_table('/', 'table', Record)
self.assertEqual(a.dtype, a.description._v_dtype)
def test00b_column(self):
"""Check dtype accessor for Column objects."""
a = self.h5file.create_table('/', 'table', Record)
c = a.cols.var3
self.assertEqual(c.dtype, a.description._v_dtype['var3'])
def test01_array(self):
"""Check dtype accessor for Array objects."""
a = self.h5file.create_array('/', 'array', [1, 2])
self.assertEqual(a.dtype, a.atom.dtype)
def test02_carray(self):
"""Check dtype accessor for CArray objects."""
a = self.h5file.create_carray('/', 'array', atom=FloatAtom(),
shape=[1, 2])
self.assertEqual(a.dtype, a.atom.dtype)
def test03_carray(self):
"""Check dtype accessor for EArray objects."""
a = self.h5file.create_earray('/', 'array', atom=FloatAtom(),
shape=[0, 2])
self.assertEqual(a.dtype, a.atom.dtype)
def test04_vlarray(self):
"""Check dtype accessor for VLArray objects."""
a = self.h5file.create_vlarray('/', 'array', FloatAtom())
self.assertEqual(a.dtype, a.atom.dtype)
class ReadFloatTestCase(common.TestFileMixin, TestCase):
h5fname = test_filename("float.h5")
nrows = 5
ncols = 6
def setUp(self):
super(ReadFloatTestCase, self).setUp()
x = numpy.arange(self.ncols)
y = numpy.arange(self.nrows)
y.shape = (self.nrows, 1)
self.values = x + y
def test01_read_float16(self):
dtype = "float16"
if hasattr(numpy, dtype):
ds = getattr(self.h5file.root, dtype)
self.assertFalse(isinstance(ds, tables.UnImplemented))
self.assertEqual(ds.shape, (self.nrows, self.ncols))
self.assertEqual(ds.dtype, dtype)
self.assertTrue(common.allequal(
ds.read(), self.values.astype(dtype)))
else:
with self.assertWarns(UserWarning):
ds = getattr(self.h5file.root, dtype)
self.assertTrue(isinstance(ds, tables.UnImplemented))
def test02_read_float32(self):
dtype = "float32"
ds = getattr(self.h5file.root, dtype)
self.assertFalse(isinstance(ds, tables.UnImplemented))
self.assertEqual(ds.shape, (self.nrows, self.ncols))
self.assertEqual(ds.dtype, dtype)
self.assertTrue(common.allequal(
ds.read(), self.values.astype(dtype)))
def test03_read_float64(self):
dtype = "float64"
ds = getattr(self.h5file.root, dtype)
self.assertFalse(isinstance(ds, tables.UnImplemented))
self.assertEqual(ds.shape, (self.nrows, self.ncols))
self.assertEqual(ds.dtype, dtype)
self.assertTrue(common.allequal(
ds.read(), self.values.astype(dtype)))
def test04_read_longdouble(self):
dtype = "longdouble"
if hasattr(tables, "Float96Atom") or hasattr(tables, "Float128Atom"):
ds = getattr(self.h5file.root, dtype)
self.assertFalse(isinstance(ds, tables.UnImplemented))
self.assertEqual(ds.shape, (self.nrows, self.ncols))
self.assertEqual(ds.dtype, dtype)
self.assertTrue(common.allequal(
ds.read(), self.values.astype(dtype)))
if hasattr(tables, "Float96Atom"):
self.assertEqual(ds.dtype, "float96")
elif hasattr(tables, "Float128Atom"):
self.assertEqual(ds.dtype, "float128")
else:
# XXX: check
# the behavior depends on the HDF5 lib configuration
try:
with self.assertWarns(UserWarning):
ds = getattr(self.h5file.root, dtype)
self.assertTrue(isinstance(ds, tables.UnImplemented))
except AssertionError:
from tables.utilsextension import _broken_hdf5_long_double
if not _broken_hdf5_long_double():
ds = getattr(self.h5file.root, dtype)
self.assertEqual(ds.dtype, "float64")
def test05_read_quadprecision_float(self):
# XXX: check
try:
with self.assertWarns(UserWarning):
ds = self.h5file.root.quadprecision
self.assertTrue(isinstance(ds, tables.UnImplemented))
except AssertionError:
# NOTE: it would be nice to have some sort of message that warns
# against the potential precision loss: the quad-precision
# dataset actually uses 128 bits for each element, not just
# 80 bits (longdouble)
ds = self.h5file.root.quadprecision
self.assertEqual(ds.dtype, "longdouble")
class AtomTestCase(TestCase):
def test_init_parameters_01(self):
atom1 = StringAtom(itemsize=12)
atom2 = atom1.copy()
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
self.assertFalse(atom1 is atom2)
def test_init_parameters_02(self):
atom1 = StringAtom(itemsize=12)
atom2 = atom1.copy(itemsize=100, shape=(2, 2))
self.assertEqual(atom2,
StringAtom(itemsize=100, shape=(2, 2), dflt=b''))
def test_init_parameters_03(self):
atom1 = StringAtom(itemsize=12)
self.assertRaises(TypeError, atom1.copy, foobar=42)
def test_from_dtype_01(self):
atom1 = Atom.from_dtype(numpy.dtype((numpy.int16, (2, 2))))
atom2 = Int16Atom(shape=(2, 2), dflt=0)
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
def test_from_dtype_02(self):
atom1 = Atom.from_dtype(numpy.dtype('S5'), dflt=b'hello')
atom2 = StringAtom(itemsize=5, shape=(), dflt=b'hello')
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
def test_from_dtype_03(self):
atom1 = Atom.from_dtype(numpy.dtype('Float64'))
atom2 = Float64Atom(shape=(), dflt=0.0)
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
def test_from_kind_01(self):
atom1 = Atom.from_kind('int', itemsize=2, shape=(2, 2))
atom2 = Int16Atom(shape=(2, 2), dflt=0)
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
def test_from_kind_02(self):
atom1 = Atom.from_kind('int', shape=(2, 2))
atom2 = Int32Atom(shape=(2, 2), dflt=0)
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
def test_from_kind_03(self):
atom1 = Atom.from_kind('int', shape=1)
atom2 = Int32Atom(shape=(1,), dflt=0)
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
def test_from_kind_04(self):
atom1 = Atom.from_kind('string', itemsize=5, dflt=b'hello')
atom2 = StringAtom(itemsize=5, shape=(), dflt=b'hello')
self.assertEqual(atom1, atom2)
self.assertEqual(str(atom1), str(atom2))
def test_from_kind_05(self):
# ValueError: no default item size for kind ``string``
self.assertRaises(ValueError, Atom.from_kind, 'string', dflt=b'hello')
def test_from_kind_06(self):
# ValueError: unknown kind: 'Float'
self.assertRaises(ValueError, Atom.from_kind, 'Float')
def suite():
import doctest
import tables.atom
theSuite = unittest.TestSuite()
for i in range(1):
theSuite.addTest(doctest.DocTestSuite(tables.atom))
theSuite.addTest(unittest.makeSuite(AtomTestCase))
theSuite.addTest(unittest.makeSuite(RangeTestCase))
theSuite.addTest(unittest.makeSuite(DtypeTestCase))
theSuite.addTest(unittest.makeSuite(ReadFloatTestCase))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
|
|
from typing import Dict, List, Iterator, Optional, Tuple
import os.path
from pathlib import Path
import re
import fnmatch
import itertools
import json
import attr
from click import BadParameter
from requests import Session, RequestException
from retrying import retry
from elm_doc import elm_platform
ModuleName = str
ExactVersion = str
VersionRange = str
STUFF_DIRECTORY = 'elm-stuff'
module_name_re = re.compile(r'^[A-Z][a-zA-Z0-9_]*$')
@attr.s
class ElmProject:
DOCS_FILENAME = 'docs.json'
path = attr.ib()
@property
def json_path(self) -> Path:
return self.path / self.DESCRIPTION_FILENAME
def iter_direct_dependencies(self) -> Iterator['ElmPackage']:
raise NotImplementedError
@attr.s
class ElmPackage(ElmProject):
DESCRIPTION_FILENAME = 'elm.json'
user = attr.ib() # str
project = attr.ib() # str
version = attr.ib() # str
summary = attr.ib() # str
license = attr.ib() # str
elm_version = attr.ib() # VersionRange
exposed_modules = attr.ib() # Union[List[ModuleName], Dict[str, List[ModuleName]]]
dependencies = attr.ib() # Dict[str, VersionRange]
test_dependencies = attr.ib() # Dict[str, VersionRange]
@classmethod
def from_path(cls, path: Path) -> Optional['ElmPackage']:
json_path = path / cls.DESCRIPTION_FILENAME
if not json_path.exists():
return
description = _load_json(json_path)
if description['type'] != 'package':
return
name_parts = description['name'].split('/')
return cls(
path=path,
user=name_parts[0],
project=name_parts[1],
version=description['version'],
summary=description['summary'],
license=description['license'],
exposed_modules=description['exposed-modules'],
dependencies=description['dependencies'],
test_dependencies=description['test-dependencies'],
elm_version=description['elm-version'],
)
@property
def name(self) -> str:
return '{}/{}'.format(self.user, self.project)
def as_package(self, config):
return self
def as_json(self):
fields = [
('name', 'name'),
('version', 'version'),
('summary', 'summary'),
('license', 'license'),
('exposed-modules', 'exposed_modules'),
('dependencies', 'dependencies'),
('test-dependencies', 'test_dependencies'),
('elm-version', 'elm_version'),
]
props = {json_prop: getattr(self, attr) for json_prop, attr in fields}
props['type'] = 'package'
return props
def without_license(self) -> 'ElmPackage':
asdict = attr.asdict(self)
asdict['license'] = ''
return ElmPackage(**asdict)
def sorted_exposed_modules(self):
if isinstance(self.exposed_modules, dict):
modules = [module for modules in self.exposed_modules.values()
for module in modules]
else:
modules = list(self.exposed_modules)
return sorted(modules)
@attr.s
class ElmApplication(ElmProject):
DESCRIPTION_FILENAME = 'elm.json'
source_directories = attr.ib() # [str]
elm_version = attr.ib() # ExactVersion
direct_dependencies = attr.ib() # Dict[str, ExactVersion]
indirect_dependencies = attr.ib() # Dict[str, ExactVersion]
direct_test_dependencies = attr.ib() # Dict[str, ExactVersion]
indirect_test_dependencies = attr.ib() # Dict[str, ExactVersion]
@classmethod
def from_path(cls, path: Path) -> Optional['ElmApplication']:
json_path = path / cls.DESCRIPTION_FILENAME
if not json_path.exists():
return
description = _load_json(json_path)
if description['type'] != 'application':
return
return cls(
path=path,
source_directories=description['source-directories'],
elm_version=description['elm-version'],
direct_dependencies=description['dependencies'].get('direct', {}),
indirect_dependencies=description['dependencies'].get('indirect', {}),
direct_test_dependencies=description['test-dependencies'].get('direct', {}),
indirect_test_dependencies=description['test-dependencies'].get('indirect', {}),
)
def as_package(self, overrides: 'ProjectConfig') -> 'ElmPackage':
return ElmPackage(
path=self.path,
user=overrides.fake_user,
project=overrides.fake_project,
version=overrides.fake_version,
summary=overrides.fake_summary,
license=overrides.fake_license,
exposed_modules=[],
dependencies=_as_package_dependencies(
self.direct_dependencies),
test_dependencies=_as_package_dependencies(
self.direct_test_dependencies),
elm_version=_as_version_range(self.elm_version),
)
def as_json(self):
json = {
'type': 'application',
'source-directories': self.source_directories,
'elm-version': self.elm_version,
}
json['dependencies'] = {}
json['dependencies']['direct'] = self.direct_dependencies
json['dependencies']['indirect'] = self.indirect_dependencies
json['test-dependencies'] = {}
json['test-dependencies']['direct'] = self.direct_test_dependencies
json['test-dependencies']['indirect'] = self.indirect_test_dependencies
return json
def add_direct_dependencies(self, deps: List[Tuple[str, ExactVersion]]):
for name, version in deps:
self.direct_dependencies[name] = version
def direct_dependency_names(self) -> Iterator[str]:
return itertools.chain(
self.direct_dependencies.keys(),
self.direct_test_dependencies.keys(),
)
def iter_direct_dependencies(self) -> Iterator[ElmPackage]:
deps = itertools.chain(
self.direct_dependencies.items(),
self.direct_test_dependencies.items(),
)
# Elm 0.19.0 uses "package" in the path, 0.19.1 uses "packages".
# Here we use a glob to be agnostic and somewhat defensive against
# future change. e.g. ~/.elm/0.19.0/package*/elm/core/1.0.0
elm_version_dir = elm_platform.ELM_HOME / self.elm_version
for elm_package_dir in elm_version_dir.glob("package*"):
for name, version in deps:
yield from_path(elm_package_dir / name / version)
break
else:
raise RuntimeError(
('No directory that starts with "package" in {}. '
'Wiping the directory and retrying may fix this, '
'or it may be that this version of Elm is not supported by elm-doc yet.').format(
elm_version_dir))
def _as_package_dependencies(*app_dependencies: Dict[str, ExactVersion]) -> Dict[str, VersionRange]:
package_deps = {}
for app_deps in app_dependencies:
for package_name, exact_version in app_deps.items():
package_deps[package_name] = _as_version_range(exact_version)
return package_deps
def _as_version_range(exact_version: ExactVersion) -> VersionRange:
major, minor, patch = exact_version.split('.')
next_version = '{}.{}.{}'.format(major, minor, int(patch, 10) + 1)
return '{} <= v < {}'.format(exact_version, next_version)
def from_path(path: Path) -> ElmProject:
classes = [
ElmApplication,
ElmPackage,
]
for cls in classes:
project = cls.from_path(path)
if project:
return project
raise BadParameter('{} does not look like an Elm project'.format(path))
def _load_json(path: Path) -> Dict:
with open(str(path)) as f:
return json.load(f)
@attr.s
class ProjectConfig:
include_paths = attr.ib(factory=list) # List[str]
exclude_modules = attr.ib(factory=list) # List[str]
exclude_source_directories = attr.ib(factory=list) # List[str]
force_exclusion = attr.ib(default=False) # bool
fake_user = attr.ib(default='user') # str
fake_project = attr.ib(default='project') # str
fake_version = attr.ib(default='1.0.0') # str
fake_summary = attr.ib(default='summary') # str
fake_license = attr.ib(default='BSD-3-Clause') # str
@attr.s
class ElmModule:
path = attr.ib() # Path
name = attr.ib() # ModuleName
def glob_project_modules(
project: ElmProject, config: ProjectConfig) -> Iterator[ElmModule]:
# check for excludes if there's no explicit includes, or if
# there are explicit includes and exclusion is requested specifically.
check_excludes = (not config.include_paths) or config.force_exclusion
exclude_source_directories = [os.path.normpath(src) for src in config.exclude_source_directories]
for source_dir_name in project.source_directories:
if check_excludes and exclude_source_directories \
and (os.path.normpath(source_dir_name) in exclude_source_directories):
continue
source_dir = project.path / source_dir_name
elm_files = source_dir.glob('**/*.elm')
for elm_file in elm_files:
if elm_file.relative_to(project.path).parts[0] == STUFF_DIRECTORY:
continue
if config.include_paths and not any(_matches_include_path(elm_file, include_path)
for include_path in config.include_paths):
continue
rel_path = elm_file.relative_to(source_dir)
module_name_parts = rel_path.parent.parts + (rel_path.stem,)
if not all(map(_valid_module_name, module_name_parts)):
continue
module_name = '.'.join(module_name_parts)
if check_excludes and any(fnmatch.fnmatch(module_name, module_pattern)
for module_pattern in config.exclude_modules):
continue
yield ElmModule(path=elm_file, name=module_name)
def _valid_module_name(name):
return module_name_re.match(name)
def _matches_include_path(source_path: Path, include_path: Path):
try:
source_path.relative_to(include_path)
except ValueError:
return False
else:
return True
@retry(
retry_on_exception=lambda e: isinstance(e, RequestException),
wait_exponential_multiplier=1000, # Wait 2^x * 1000 milliseconds between each retry,
wait_exponential_max=30 * 1000, # up to 30 seconds, then 30 seconds afterwards
stop_max_attempt_number=10)
def fetch_releases(session: Session, package_name: str) -> Dict[ExactVersion, int]:
releases_url = 'https://package.elm-lang.org/packages/{}/releases.json'.format(package_name)
return session.get(releases_url).json()
|
|
import urllib
import urlparse
from gevent.queue import Empty
from geventwebsocket import WebSocketError
class BaseTransport(object):
"""Base class for all transports. Mostly wraps handler class functions."""
def __init__(self, handler, config, **kwargs):
"""Base transport class.
:param config: dict Should contain the config keys, like
``heartbeat_interval``, ``heartbeat_timeout`` and
``close_timeout``.
"""
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.headers = [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Allow-Methods", "POST, GET, OPTIONS"),
("Access-Control-Max-Age", 3600),
]
self.handler = handler
self.config = config
def write(self, data=""):
# Gevent v 0.13
if hasattr(self.handler, 'response_headers_list'):
if 'Content-Length' not in self.handler.response_headers_list:
self.handler.response_headers.append(('Content-Length', len(data)))
self.handler.response_headers_list.append('Content-Length')
elif not hasattr(self.handler, 'provided_content_length') or self.handler.provided_content_length is None:
# Gevent 1.0bX
l = len(data)
self.handler.provided_content_length = l
self.handler.response_headers.append(('Content-Length', l))
self.handler.write_smart(data)
def start_response(self, status, headers, **kwargs):
if "Content-Type" not in [x[0] for x in headers]:
headers.append(self.content_type)
headers.extend(self.headers)
self.handler.start_response(status, headers, **kwargs)
class XHRPollingTransport(BaseTransport):
def __init__(self, *args, **kwargs):
super(XHRPollingTransport, self).__init__(*args, **kwargs)
def options(self):
self.start_response("200 OK", ())
self.write()
return []
def get(self, socket):
socket.heartbeat()
heartbeat_interval = self.config['heartbeat_interval']
payload = self.get_messages_payload(socket, timeout=heartbeat_interval)
if not payload:
payload = "8::" # NOOP
self.start_response("200 OK", [])
self.write(payload)
def _request_body(self):
return self.handler.wsgi_input.readline()
def post(self, socket):
for message in self.decode_payload(self._request_body()):
socket.put_server_msg(message)
self.start_response("200 OK", [
("Connection", "close"),
("Content-Type", "text/plain")
])
self.write("1")
def get_messages_payload(self, socket, timeout=None):
"""This will fetch the messages from the Socket's queue, and if
there are many messes, pack multiple messages in one payload and return
"""
try:
msgs = socket.get_multiple_client_msgs(timeout=timeout)
data = self.encode_payload(msgs)
except Empty:
data = ""
return data
def encode_payload(self, messages):
"""Encode list of messages. Expects messages to be unicode.
``messages`` - List of raw messages to encode, if necessary
"""
if not messages or messages[0] is None:
return ''
if len(messages) == 1:
return messages[0].encode('utf-8')
payload = u''.join([(u'\ufffd%d\ufffd%s' % (len(p), p))
for p in messages if p is not None])
# FIXME: why is it so that we must filter None from here ? How
# is it even possible that a None gets in there ?
return payload.encode('utf-8')
def decode_payload(self, payload):
"""This function can extract multiple messages from one HTTP payload.
Some times, the XHR/JSONP/.. transports can pack more than one message
on a single packet. They are encoding following the WebSocket
semantics, which need to be reproduced here to unwrap the messages.
The semantics are:
\ufffd + [length as a string] + \ufffd + [payload as a unicode string]
This function returns a list of messages, even though there is only
one.
Inspired by socket.io/lib/transports/http.js
"""
payload = payload.decode('utf-8')
if payload[0] == u"\ufffd":
ret = []
while len(payload) != 0:
len_end = payload.find(u"\ufffd", 1)
length = int(payload[1:len_end])
msg_start = len_end + 1
msg_end = length + msg_start
message = payload[msg_start:msg_end]
ret.append(message)
payload = payload[msg_end:]
return ret
return [payload]
def do_exchange(self, socket, request_method):
if not socket.connection_established:
# Runs only the first time we get a Socket opening
self.start_response("200 OK", [
("Connection", "close"),
])
self.write("1::") # 'connect' packet
return
elif request_method in ("GET", "POST", "OPTIONS"):
return getattr(self, request_method.lower())(socket)
else:
raise Exception("No support for the method: " + request_method)
class JSONPolling(XHRPollingTransport):
def __init__(self, handler, config):
super(JSONPolling, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/javascript; charset=UTF-8")
def _request_body(self):
data = super(JSONPolling, self)._request_body()
# resolve %20%3F's, take out wrapping d="...", etc..
data = urllib.unquote_plus(data)[3:-1] \
.replace(r'\"', '"') \
.replace(r"\\", "\\")
# For some reason, in case of multiple messages passed in one
# query, IE7 sends it escaped, not utf-8 encoded. This dirty
# hack handled it
if data[0] == "\\":
data = data.decode("unicode_escape").encode("utf-8")
return data
def write(self, data):
"""Just quote out stuff before sending it out"""
args = urlparse.parse_qs(self.handler.environ.get("QUERY_STRING"))
if "i" in args:
i = args["i"]
else:
i = "0"
# TODO: don't we need to quote this data in here ?
super(JSONPolling, self).write("io.j[%s]('%s');" % (i, data))
class XHRMultipartTransport(XHRPollingTransport):
def __init__(self, handler):
super(JSONPolling, self).__init__(handler)
self.content_type = (
"Content-Type",
"multipart/x-mixed-replace;boundary=\"socketio\""
)
def do_exchange(self, socket, request_method):
if request_method == "GET":
return self.get(socket)
elif request_method == "POST":
return self.post(socket)
else:
raise Exception("No support for such method: " + request_method)
def get(self, socket):
header = "Content-Type: text/plain; charset=UTF-8\r\n\r\n"
self.start_response("200 OK", [("Connection", "keep-alive")])
self.write_multipart("--socketio\r\n")
self.write_multipart(header)
self.write_multipart(str(socket.sessid) + "\r\n")
self.write_multipart("--socketio\r\n")
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
# See below
else:
try:
self.write_multipart(header)
self.write_multipart(payload)
self.write_multipart("--socketio\r\n")
except socket.error:
# The client might try to reconnect, even with a socket
# error, so let's just let it go, and not kill the
# socket completely. Other processes will ensure
# we kill everything if the user expires the timeouts.
#
# WARN: this means that this payload is LOST, unless we
# decide to re-inject it into the queue.
return
socket.spawn(chunk)
class WebsocketTransport(BaseTransport):
def do_exchange(self, socket, request_method):
websocket = self.handler.environ['wsgi.websocket']
websocket.send("1::") # 'connect' packet
def send_into_ws():
while True:
message = socket.get_client_msg()
if message is None:
break
try:
websocket.send(message)
except (WebSocketError, TypeError):
# We can't send a message on the socket
# it is dead, let the other sockets know
socket.disconnect()
def read_from_ws():
while True:
message = websocket.receive()
if message is None:
break
else:
if message is not None:
socket.put_server_msg(message)
socket.spawn(send_into_ws)
socket.spawn(read_from_ws)
class FlashSocketTransport(WebsocketTransport):
pass
class HTMLFileTransport(XHRPollingTransport):
"""Not tested at all!"""
def __init__(self, handler, config):
super(HTMLFileTransport, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/html")
def write_packed(self, data):
self.write("<script>_('%s');</script>" % data)
def write(self, data):
l = 1024 * 5
super(HTMLFileTransport, self).write("%d\r\n%s%s\r\n" % (l, data, " " * (l - len(data))))
def do_exchange(self, socket, request_method):
return super(HTMLFileTransport, self).do_exchange(socket, request_method)
def get(self, socket):
self.start_response("200 OK", [
("Connection", "keep-alive"),
("Content-Type", "text/html"),
("Transfer-Encoding", "chunked"),
])
self.write("<html><body><script>var _ = function (msg) { parent.s._(msg, document); };</script>")
self.write_packed("1::") # 'connect' packet
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
else:
try:
self.write_packed(payload)
except socket.error:
# See comments for XHRMultipart
return
socket.spawn(chunk)
|
|
import functools
import tensorflow as tf
from .doc_utils import add_name_arg_doc
from .type_utils import is_tensor_object
__all__ = [
'get_static_shape', 'get_batch_size', 'get_rank', 'get_shape',
'get_dimension_size', 'get_dimensions_size', 'resolve_negative_axis',
'concat_shapes', 'is_shape_equal',
]
def get_static_shape(tensor):
"""
Get the the static shape of specified `tensor` as a tuple.
Args:
tensor: The tensor object.
Returns:
tuple[int or None] or None: The static shape tuple, or :obj:`None`
if the dimensions of `tensor` is not deterministic.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tensor.get_shape()
if shape.ndims is None:
shape = None
else:
shape = tuple((int(v) if v is not None else None)
for v in shape.as_list())
return shape
def resolve_negative_axis(ndims, axis):
"""
Resolve all negative `axis` indices according to `ndims` into positive.
Usage::
resolve_negative_axis(4, [0, -1, -2]) # output: (0, 3, 2)
Args:
ndims (int): Number of total dimensions.
axis (Iterable[int]): The axis indices.
Returns:
tuple[int]: The resolved positive axis indices.
Raises:
ValueError: If any index in `axis` is out of range.
"""
axis = tuple(int(a) for a in axis)
ret = []
for a in axis:
if a < 0:
a += ndims
if a < 0 or a >= ndims:
raise ValueError('`axis` out of range: {} vs ndims {}.'.
format(axis, ndims))
ret.append(a)
if len(set(ret)) != len(ret):
raise ValueError('`axis` has duplicated elements after resolving '
'negative axis: ndims {}, axis {}.'.
format(ndims, axis))
return tuple(ret)
@add_name_arg_doc
def get_batch_size(tensor, axis=0, name=None):
"""
Infer the mini-batch size according to `tensor`.
Args:
tensor (tf.Tensor): The input placeholder.
axis (int): The axis of mini-batches. Default is 0.
Returns:
int or tf.Tensor: The batch size.
"""
tensor = tf.convert_to_tensor(tensor)
axis = int(axis)
with tf.name_scope(name, default_name='get_batch_size', values=[tensor]):
batch_size = None
shape = get_static_shape(tensor)
if shape is not None:
batch_size = shape[axis]
if batch_size is None:
batch_size = tf.shape(tensor)[axis]
return batch_size
@add_name_arg_doc
def get_rank(tensor, name=None):
"""
Get the rank of the tensor.
Args:
tensor (tf.Tensor): The tensor to be tested.
name: TensorFlow name scope of the graph nodes.
Returns:
int or tf.Tensor: The rank.
"""
tensor_shape = get_static_shape(tensor)
if tensor_shape is not None:
return len(tensor_shape)
return tf.rank(tensor, name=name)
@add_name_arg_doc
def get_dimension_size(tensor, axis, name=None):
"""
Get the size of `tensor` of specified `axis`.
Args:
tensor (tf.Tensor): The tensor to be tested.
axis (Iterable[int] or None): The dimension to be queried.
Returns:
int or tf.Tensor: An integer or a tensor, the size of queried dimension.
"""
tensor = tf.convert_to_tensor(tensor)
with tf.name_scope(name, default_name='get_dimension_size',
values=[tensor]):
shape = get_static_shape(tensor)
if shape is not None and not is_tensor_object(axis) and \
shape[axis] is not None:
return shape[axis]
return tf.shape(tensor)[axis]
@add_name_arg_doc
def get_dimensions_size(tensor, axes=None, name=None):
"""
Get the size of `tensor` of specified `axes`.
If `axes` is :obj:`None`, select the size of all dimensions.
Args:
tensor (tf.Tensor): The tensor to be tested.
axes (Iterable[int] or None): The dimensions to be selected.
Returns:
tuple[int] or tf.Tensor: A tuple of integers if all selected
dimensions have static sizes. Otherwise a tensor.
"""
tensor = tf.convert_to_tensor(tensor)
if axes is not None:
axes = tuple(axes)
if not axes:
return ()
with tf.name_scope(name, default_name='get_dimensions_size',
values=[tensor]):
shape = get_static_shape(tensor)
if shape is not None and axes is not None:
shape = tuple(shape[a] for a in axes)
if shape is None or None in shape:
dynamic_shape = tf.shape(tensor)
if axes is None:
shape = dynamic_shape
else:
shape = tf.stack([dynamic_shape[i] for i in axes], axis=0)
return shape
get_shape = functools.partial(get_dimensions_size, axes=None)
@add_name_arg_doc
def concat_shapes(shapes, name=None):
"""
Concat shapes from `shapes`.
Args:
shapes (Iterable[tuple[int] or tf.Tensor]): List of shape tuples
or tensors.
Returns:
tuple[int] or tf.Tensor: The concatenated shape.
"""
shapes = tuple(shapes)
if any(is_tensor_object(s) for s in shapes):
shapes = [
s if is_tensor_object(s) else tf.constant(s, dtype=tf.int32)
for s in shapes
]
with tf.name_scope(name, default_name='concat_shapes', values=shapes):
return tf.concat(shapes, axis=0)
else:
return sum((tuple(s) for s in shapes), ())
@add_name_arg_doc
def is_shape_equal(x, y, name=None):
"""
Check whether the shape of `x` equals to `y`.
Args:
x: A tensor.
y: Another tensor, to compare with `x`.
Returns:
bool or tf.Tensor: The static or dynamic comparison result.
"""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
x_shape = get_static_shape(x)
y_shape = get_static_shape(y)
# both shapes have deterministic dimensions, we can perform a fast check
if x_shape is not None and y_shape is not None:
# dimension mismatch, cannot be equal
if len(x_shape) != len(y_shape):
return False
# gather the axis to check
axis_to_check = []
for i, (a, b) in enumerate(zip(x_shape, y_shape)):
if a is None or b is None:
axis_to_check.append(i)
else:
if a != b:
return False
# no dynamic axis to check, confirm equality
if not axis_to_check:
return True
# generate the dynamic check
with tf.name_scope(name or 'is_shape_equal', values=[x, y]):
x_shape = get_shape(x)
y_shape = get_shape(y)
return tf.reduce_all([tf.equal(x_shape[a], y_shape[a])
for a in axis_to_check])
# either one of the shapes has non-deterministic dimensions
with tf.name_scope(name or 'is_shape_equal', values=[x, y]):
x_shape = get_shape(x)
y_shape = get_shape(y)
return tf.cond(
tf.equal(tf.rank(x), tf.rank(y)),
lambda: tf.reduce_all(
tf.equal(
tf.concat([x_shape, y_shape], axis=0),
tf.concat([y_shape, x_shape], axis=0)
)
),
lambda: tf.constant(False, dtype=tf.bool)
)
|
|
from decimal import Decimal
from django.contrib.gis.db.models.fields import BaseSpatialField, GeometryField
from django.contrib.gis.db.models.sql import AreaField, DistanceField
from django.contrib.gis.geos import GEOSGeometry
from django.core.exceptions import FieldError
from django.db.models import (
BooleanField, FloatField, IntegerField, TextField, Transform,
)
from django.db.models.expressions import Func, Value
from django.db.models.functions import Cast
from django.db.utils import NotSupportedError
from django.utils.functional import cached_property
NUMERIC_TYPES = (int, float, Decimal)
class GeoFuncMixin:
function = None
geom_param_pos = (0,)
def __init__(self, *expressions, **extra):
super().__init__(*expressions, **extra)
# Ensure that value expressions are geometric.
for pos in self.geom_param_pos:
expr = self.source_expressions[pos]
if not isinstance(expr, Value):
continue
try:
output_field = expr.output_field
except FieldError:
output_field = None
geom = expr.value
if not isinstance(geom, GEOSGeometry) or output_field and not isinstance(output_field, GeometryField):
raise TypeError("%s function requires a geometric argument in position %d." % (self.name, pos + 1))
if not geom.srid and not output_field:
raise ValueError("SRID is required for all geometries.")
if not output_field:
self.source_expressions[pos] = Value(geom, output_field=GeometryField(srid=geom.srid))
@property
def name(self):
return self.__class__.__name__
@cached_property
def geo_field(self):
return self.source_expressions[self.geom_param_pos[0]].field
def as_sql(self, compiler, connection, function=None, **extra_context):
if self.function is None and function is None:
function = connection.ops.spatial_function_name(self.name)
return super().as_sql(compiler, connection, function=function, **extra_context)
def resolve_expression(self, *args, **kwargs):
res = super().resolve_expression(*args, **kwargs)
# Ensure that expressions are geometric.
source_fields = res.get_source_fields()
for pos in self.geom_param_pos:
field = source_fields[pos]
if not isinstance(field, GeometryField):
raise TypeError(
"%s function requires a GeometryField in position %s, got %s." % (
self.name, pos + 1, type(field).__name__,
)
)
base_srid = res.geo_field.srid
for pos in self.geom_param_pos[1:]:
expr = res.source_expressions[pos]
expr_srid = expr.output_field.srid
if expr_srid != base_srid:
# Automatic SRID conversion so objects are comparable.
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, check_types)
)
return value
class GeoFunc(GeoFuncMixin, Func):
pass
class GeomOutputGeoFunc(GeoFunc):
@cached_property
def output_field(self):
return GeometryField(srid=self.geo_field.srid)
class SQLiteDecimalToFloatMixin:
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection, **extra_context):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super().as_sql(compiler, connection, **extra_context)
class OracleToleranceMixin:
tolerance = 0.05
def as_oracle(self, compiler, connection, **extra_context):
tolerance = Value(self._handle_param(
self.extra.get('tolerance', self.tolerance),
'tolerance',
NUMERIC_TYPES,
))
clone = self.copy()
clone.set_source_expressions([*self.get_source_expressions(), tolerance])
return clone.as_sql(compiler, connection, **extra_context)
class Area(OracleToleranceMixin, GeoFunc):
arity = 1
@cached_property
def output_field(self):
return AreaField(self.geo_field)
def as_sql(self, compiler, connection, **extra_context):
if not connection.features.supports_area_geodetic and self.geo_field.geodetic(connection):
raise NotSupportedError('Area on geodetic coordinate systems not supported.')
return super().as_sql(compiler, connection, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
extra_context['template'] = '%(function)s(%(expressions)s, %(spheroid)d)'
extra_context['spheroid'] = True
return self.as_sql(compiler, connection, **extra_context)
class Azimuth(GeoFunc):
output_field = FloatField()
arity = 2
geom_param_pos = (0, 1)
class AsGeoJSON(GeoFunc):
output_field = TextField()
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super().__init__(*expressions, **extra)
class AsGML(GeoFunc):
geom_param_pos = (1,)
output_field = TextField()
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
super().__init__(*expressions, **extra)
def as_oracle(self, compiler, connection, **extra_context):
source_expressions = self.get_source_expressions()
version = source_expressions[0]
clone = self.copy()
clone.set_source_expressions([source_expressions[1]])
extra_context['function'] = 'SDO_UTIL.TO_GML311GEOMETRY' if version.value == 3 else 'SDO_UTIL.TO_GMLGEOMETRY'
return super(AsGML, clone).as_sql(compiler, connection, **extra_context)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection, **extra_context):
# No version parameter
clone = self.copy()
clone.set_source_expressions(self.get_source_expressions()[1:])
return clone.as_sql(compiler, connection, **extra_context)
class AsSVG(GeoFunc):
output_field = TextField()
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', int),
]
super().__init__(*expressions, **extra)
class BoundingCircle(OracleToleranceMixin, GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super().__init__(expression, num_seg, **extra)
def as_oracle(self, compiler, connection, **extra_context):
clone = self.copy()
clone.set_source_expressions([self.get_source_expressions()[0]])
return super(BoundingCircle, clone).as_oracle(compiler, connection, **extra_context)
class Centroid(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 1
class Difference(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
class DistanceResultMixin:
@cached_property
def output_field(self):
return DistanceField(self.geo_field)
def source_is_geography(self):
return self.geo_field.geography and self.geo_field.srid == 4326
class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
geom_param_pos = (0, 1)
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = self._handle_param(spheroid, 'spheroid', bool)
super().__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection, **extra_context):
clone = self.copy()
function = None
expr2 = clone.source_expressions[1]
geography = self.source_is_geography()
if expr2.output_field.geography != geography:
if isinstance(expr2, Value):
expr2.output_field.geography = geography
else:
clone.source_expressions[1] = Cast(
expr2,
GeometryField(srid=expr2.output_field.srid, geography=geography),
)
if not geography and self.geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
# DistanceSpheroid is more accurate and resource intensive than DistanceSphere
function = connection.ops.spatial_function_name('DistanceSpheroid')
# Replace boolean param by the real spheroid of the base field
clone.source_expressions.append(Value(self.geo_field.spheroid(connection)))
else:
function = connection.ops.spatial_function_name('DistanceSphere')
return super(Distance, clone).as_sql(compiler, connection, function=function, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
# SpatiaLite returns NULL instead of zero on geodetic coordinates
extra_context['template'] = 'COALESCE(%(function)s(%(expressions)s, %(spheroid)s), 0)'
extra_context['spheroid'] = int(bool(self.spheroid))
return super().as_sql(compiler, connection, **extra_context)
class Envelope(GeomOutputGeoFunc):
arity = 1
class ForcePolygonCW(GeomOutputGeoFunc):
arity = 1
class GeoHash(GeoFunc):
output_field = TextField()
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
super().__init__(*expressions, **extra)
def as_mysql(self, compiler, connection, **extra_context):
clone = self.copy()
# If no precision is provided, set it to the maximum.
if len(clone.source_expressions) < 2:
clone.source_expressions.append(Value(100))
return clone.as_sql(compiler, connection, **extra_context)
class GeometryDistance(GeoFunc):
output_field = FloatField()
arity = 2
function = ''
arg_joiner = ' <-> '
geom_param_pos = (0, 1)
class Intersection(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
@BaseSpatialField.register_lookup
class IsValid(OracleToleranceMixin, GeoFuncMixin, Transform):
lookup_name = 'isvalid'
output_field = BooleanField()
def as_oracle(self, compiler, connection, **extra_context):
sql, params = super().as_oracle(compiler, connection, **extra_context)
return "CASE %s WHEN 'TRUE' THEN 1 ELSE 0 END" % sql, params
class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super().__init__(expr1, **extra)
def as_sql(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotSupportedError("This backend doesn't support Length on geodetic fields")
return super().as_sql(compiler, connection, **extra_context)
def as_postgresql(self, compiler, connection, **extra_context):
clone = self.copy()
function = None
if self.source_is_geography():
clone.source_expressions.append(Value(self.spheroid))
elif self.geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
function = connection.ops.spatial_function_name('LengthSpheroid')
clone.source_expressions.append(Value(self.geo_field.spheroid(connection)))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
function = connection.ops.length3d
return super(Length, clone).as_sql(compiler, connection, function=function, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
function = None
if self.geo_field.geodetic(connection):
function = 'GeodesicLength' if self.spheroid else 'GreatCircleLength'
return super().as_sql(compiler, connection, function=function, **extra_context)
class LineLocatePoint(GeoFunc):
output_field = FloatField()
arity = 2
geom_param_pos = (0, 1)
class MakeValid(GeoFunc):
pass
class MemSize(GeoFunc):
output_field = IntegerField()
arity = 1
class NumGeometries(GeoFunc):
output_field = IntegerField()
arity = 1
class NumPoints(GeoFunc):
output_field = IntegerField()
arity = 1
class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
arity = 1
def as_postgresql(self, compiler, connection, **extra_context):
function = None
if self.geo_field.geodetic(connection) and not self.source_is_geography():
raise NotSupportedError("ST_Perimeter cannot use a non-projected non-geography field.")
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
function = connection.ops.perimeter3d
return super().as_sql(compiler, connection, function=function, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
raise NotSupportedError("Perimeter cannot use a non-projected field.")
return super().as_sql(compiler, connection, **extra_context)
class PointOnSurface(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 1
class Reverse(GeoFunc):
arity = 1
class Scale(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super().__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions += [
*(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]),
*(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]),
]
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super().__init__(*expressions, **extra)
class SymDifference(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
class Transform(GeomOutputGeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', int),
]
if 'output_field' not in extra:
extra['output_field'] = GeometryField(srid=srid)
super().__init__(*expressions, **extra)
class Translate(Scale):
def as_sqlite(self, compiler, connection, **extra_context):
clone = self.copy()
if len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate
clone.source_expressions.append(Value(0))
return super(Translate, clone).as_sqlite(compiler, connection, **extra_context)
class Union(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
|
|
# Test the runpy module
import unittest
import os
import os.path
import sys
import tempfile
from test.support import verbose, run_unittest, forget
from runpy import _run_code, _run_module_code, run_module
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.failUnless(d["result"] == self.expected_result)
self.failUnless(d["__name__"] is None)
self.failUnless(d["__file__"] is None)
self.failUnless(d["__loader__"] is None)
self.failUnless(d["__package__"] is None)
self.failUnless(d["run_argv0"] is saved_argv0)
self.failUnless("run_name" not in d)
self.failUnless(sys.argv[0] is saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.failUnless("result" not in d1)
self.failUnless(d2["initial"] is initial)
self.assertEqual(d2["result"], self.expected_result)
self.assertEqual(d2["nested"]["x"], 1)
self.failUnless(d2["__name__"] is name)
self.failUnless(d2["run_name_in_sys_modules"])
self.failUnless(d2["module_in_sys_modules"])
self.failUnless(d2["__file__"] is file)
self.failUnless(d2["run_argv0"] is file)
self.failUnless(d2["__loader__"] is loader)
self.failUnless(d2["__package__"] is package)
self.failUnless(sys.argv[0] is saved_argv0)
self.failUnless(name not in sys.modules)
class RunModuleTest(unittest.TestCase):
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package
self.expect_import_error("logging")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__.py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth):
pkg_name = "__runpy_pkg__"
test_fname = "runpy_test.py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print(" Package tree in:", sub_dir)
sys.path.insert(0, pkg_dir)
if verbose: print(" Updated sys.path:", sys.path[0])
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print(" Next level in:", sub_dir)
if verbose: print(" Created:", pkg_fname)
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print(" Created:", mod_fname)
mod_name = (pkg_name+".")*depth + "runpy_test"
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print(" Removed sys.modules entries")
del sys.path[0]
if verbose: print(" Removed sys.path entry")
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError as ex:
if verbose: print(ex) # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError as ex:
if verbose: print(ex) # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print(" Removed package tree")
except OSError as ex:
if verbose: print(ex) # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print("Running from source:", mod_name)
d1 = run_module(mod_name) # Read from source
self.failUnless("x" in d1)
self.assertEqual(d1["x"], 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print("Running from compiled:", mod_name)
d2 = run_module(mod_name) # Read from bytecode
self.failUnless("x" in d2)
self.assertEqual(d2["x"], 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print("Module executed successfully")
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling.py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print(" Added sibling module:", sibling_fname)
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print(" Added uncle package:", uncle_dir)
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print(" Added cousin package:", cousin_dir)
nephew_fname = os.path.join(cousin_dir, "nephew.py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print(" Added nephew module:", nephew_fname)
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print("Running from source:", mod_name)
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.failUnless("__package__" in d1)
self.failUnless(d1["__package__"] == pkg_name)
self.failUnless("sibling" in d1)
self.failUnless("nephew" in d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print("Running from compiled:", mod_name)
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.failUnless("__package__" in d2)
self.failUnless(d2["__package__"] == pkg_name)
self.failUnless("sibling" in d2)
self.failUnless("nephew" in d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print("Module executed successfully")
def test_run_module(self):
for depth in range(4):
if verbose: print("Testing package depth:", depth)
self._check_module(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print("Testing relative imports at depth:", depth)
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print("Testing main relative imports at depth:", depth)
self._check_relative_imports(depth, "__main__")
def test_main():
run_unittest(RunModuleCodeTest)
run_unittest(RunModuleTest)
if __name__ == "__main__":
test_main()
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.tests.test_api.test_v2 import ApiV2TestCase
"""
NOTE: Record invalidation is tested in Central tests
"""
class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
def test_get_floatingip_no_record(self):
context = self.get_context(project_id='a')
fip = self.network_api.fake.allocate_floatingip(context.project_id)
response = self.client.get(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
headers={'X-Test-Tenant-Id': context.project_id})
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# TODO(ekarlso): Remove the floatingip key - bug in v2 api
fip_record = response.json
self.assertEqual(":".join([fip['region'],
fip['id']]), fip_record['id'])
self.assertEqual(fip['address'], fip_record['address'])
self.assertIsNone(fip_record['description'])
self.assertIsNone(fip_record['ptrdname'])
self.assertEqual('NONE', fip_record['action'])
self.assertEqual('INACTIVE', fip_record['status'])
def test_get_floatingip_with_record(self):
fixture = self.get_ptr_fixture()
context = self.get_context(project_id='a')
fip = self.network_api.fake.allocate_floatingip(
context.project_id)
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
response = self.client.get(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
headers={'X-Test-Tenant-Id': context.project_id})
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# TODO(ekarlso): Remove the floatingip key - bug in v2 api
fip_record = response.json
self.assertEqual(":".join([fip['region'], fip['id']]),
fip_record['id'])
self.assertEqual(fip['address'], fip_record['address'])
self.assertIsNone(fip_record['description'])
self.assertEqual(fixture['ptrdname'], fip_record['ptrdname'])
self.assertEqual('CREATE', fip_record['action'])
self.assertEqual('PENDING', fip_record['status'])
def test_get_floatingip_after_unset(self):
fixture = self.get_ptr_fixture()
context = self.get_context(project_id='a')
elevated_context = context.elevated()
elevated_context.all_tenants = True
fip = self.network_api.fake.allocate_floatingip(context.project_id)
# Unsetting via "None"
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context.project_id
}
zone_id = self.central_service.find_record(
elevated_context, criterion=criterion).zone_id
# Simulate the unset on the backend
zone_serial = self.central_service.get_zone(
elevated_context, zone_id).serial
self.central_service.update_status(
elevated_context, zone_id, 'SUCCESS', zone_serial, 'UPDATE')
# Unset PTR ('ptrdname' is None aka null in JSON)
response = self.client.patch_json(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
{'ptrdname': None},
headers={'X-Test-Tenant-Id': context.project_id})
self.assertIsNone(response.json)
self.assertEqual(202, response.status_int)
# Simulate the unset on the backend
zone_serial = self.central_service.get_zone(
elevated_context, zone_id).serial
self.central_service.update_status(
elevated_context, zone_id, 'SUCCESS', zone_serial, 'UPDATE')
response = self.client.get(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
headers={'X-Test-Tenant-Id': context.project_id})
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
fip_record = response.json
self.assertEqual(":".join([fip['region'], fip['id']]),
fip_record['id'])
self.assertEqual(fip['address'], fip_record['address'])
self.assertIsNone(fip_record['description'])
self.assertIsNone(fip_record['ptrdname'])
self.assertEqual('NONE', fip_record['action'])
self.assertEqual('INACTIVE', fip_record['status'])
def test_get_floatingip_not_allocated(self):
url = '/reverse/floatingips/foo:04580c52-b253-4eb7-8791-fbb9de9f856f'
self._assert_exception('not_found', 404, self.client.get, url)
def test_get_floatingip_invalid_key(self):
url = '/reverse/floatingips/foo:bar'
self._assert_exception('bad_request', 400, self.client.get, url)
def test_list_floatingip_no_allocations(self):
response = self.client.get('/reverse/floatingips')
self.assertIn('floatingips', response.json)
self.assertIn('links', response.json)
self.assertEqual(0, len(response.json['floatingips']))
def test_list_floatingip_no_record(self):
context = self.get_context(project_id='a')
fip = self.network_api.fake.allocate_floatingip(context.project_id)
response = self.client.get(
'/reverse/floatingips',
headers={'X-Test-Tenant-Id': context.project_id})
self.assertIn('floatingips', response.json)
self.assertIn('links', response.json)
self.assertEqual(1, len(response.json['floatingips']))
fip_record = response.json['floatingips'][0]
self.assertIsNone(fip_record['ptrdname'])
self.assertEqual(":".join([fip['region'], fip['id']]),
fip_record['id'])
self.assertEqual(fip['address'], fip_record['address'])
self.assertIsNone(fip_record['description'])
self.assertEqual('NONE', fip_record['action'])
self.assertEqual('INACTIVE', fip_record['status'])
def test_list_floatingip_with_record(self):
fixture = self.get_ptr_fixture()
context = self.get_context(project_id='a')
fip = self.network_api.fake.allocate_floatingip(context.project_id)
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
response = self.client.get(
'/reverse/floatingips',
headers={'X-Test-Tenant-Id': context.project_id})
self.assertIn('floatingips', response.json)
self.assertIn('links', response.json)
self.assertEqual(1, len(response.json['floatingips']))
fip_record = response.json['floatingips'][0]
self.assertEqual(fixture['ptrdname'], fip_record['ptrdname'])
self.assertEqual(":".join([fip['region'], fip['id']]),
fip_record['id'])
self.assertEqual(fip['address'], fip_record['address'])
self.assertIsNone(fip_record['description'])
self.assertEqual(fixture['ptrdname'], fip_record['ptrdname'])
self.assertEqual('CREATE', fip_record['action'])
self.assertEqual('PENDING', fip_record['status'])
def test_set_floatingip(self):
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip('tenant')
response = self.client.patch_json(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
fixture.to_dict(),
headers={'X-Test-Tenant-Id': 'tenant'})
self.assertEqual(202, response.status_int)
self.assertEqual('application/json', response.content_type)
fip_record = response.json
self.assertEqual(":".join([fip['region'], fip['id']]),
fip_record['id'])
self.assertEqual(fip['address'], fip_record['address'])
self.assertIsNone(fip_record['description'])
self.assertEqual(fixture['ptrdname'], fip_record['ptrdname'])
self.assertEqual('CREATE', fip_record['action'])
self.assertEqual('PENDING', fip_record['status'])
def test_set_floatingip_not_allocated(self):
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip('tenant')
self.network_api.fake.deallocate_floatingip(fip['id'])
url = '/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']])
self._assert_exception('not_found', 404, self.client.patch_json, url,
fixture.to_dict())
def test_set_floatingip_invalid_ptrdname(self):
fip = self.network_api.fake.allocate_floatingip('tenant')
url = '/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']])
self._assert_exception('invalid_object', 400, self.client.patch_json,
url, {'ptrdname': 'test|'})
def test_set_floatingip_invalid_key(self):
url = '/reverse/floatingips/%s' % 'foo:random'
self._assert_exception('bad_request', 400, self.client.patch_json,
url, {})
def test_unset_floatingip(self):
fixture = self.get_ptr_fixture()
context = self.get_context(project_id='a')
elevated_context = context.elevated()
elevated_context.all_tenants = True
fip = self.network_api.fake.allocate_floatingip(context.project_id)
# Unsetting via "None"
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context.project_id
}
zone_id = self.central_service.find_record(
elevated_context, criterion=criterion).zone_id
# Simulate the unset on the backend
zone_serial = self.central_service.get_zone(
elevated_context, zone_id).serial
self.central_service.update_status(
elevated_context, zone_id, 'SUCCESS', zone_serial, 'UPDATE')
# Unset PTR ('ptrdname' is None aka null in JSON)
response = self.client.patch_json(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
{'ptrdname': None},
headers={'X-Test-Tenant-Id': context.project_id})
self.assertIsNone(response.json)
self.assertEqual(202, response.status_int)
# Simulate the unset on the backend
zone_serial = self.central_service.get_zone(
elevated_context, zone_id).serial
self.central_service.update_status(
elevated_context, zone_id, 'SUCCESS', zone_serial, 'UPDATE')
fip = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertIsNone(fip['ptrdname'])
def test_unset_floatingip_not_allocated(self):
fixture = self.get_ptr_fixture()
context = self.get_context(project_id='a')
fip = self.network_api.fake.allocate_floatingip(context.project_id)
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
self.network_api.fake.deallocate_floatingip(fip['id'])
url = '/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']])
self._assert_exception('not_found', 404, self.client.patch_json, url,
{'ptrdname': None})
|
|
"""
A context object for caching a function's return value each time it
is called with the same input arguments.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
from __future__ import with_statement
import os
import shutil
import time
import pydoc
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import traceback
import warnings
import inspect
import json
# Local imports
from .hashing import hash
from .func_inspect import get_func_code, get_func_name, filter_args
from .logger import Logger, format_time
from . import numpy_pickle
from .disk import mkdirp, rm_subdirs
FIRST_LINE_TEXT = "# first line:"
# TODO: The following object should have a data store object as a sub
# object, and the interface to persist and query should be separated in
# the data store.
#
# This would enable creating 'Memory' objects with a different logic for
# pickling that would simply span a MemorizedFunc with the same
# store (or do we want to copy it to avoid cross-talks?), for instance to
# implement HDF5 pickling.
# TODO: Same remark for the logger, and probably use the Python logging
# mechanism.
def extract_first_line(func_code):
""" Extract the first line information from the function code
text if available.
"""
if func_code.startswith(FIRST_LINE_TEXT):
func_code = func_code.split('\n')
first_line = int(func_code[0][len(FIRST_LINE_TEXT):])
func_code = '\n'.join(func_code[1:])
else:
first_line = -1
return func_code, first_line
class JobLibCollisionWarning(UserWarning):
""" Warn that there might be a collision between names of functions.
"""
###############################################################################
# class `MemorizedFunc`
###############################################################################
class MemorizedFunc(Logger):
""" Callable object decorating a function for caching its return value
each time it is called.
All values are cached on the filesystem, in a deep directory
structure. Methods are provided to inspect the cache or clean it.
Attributes
----------
func : callable
The original, undecorated, function.
cachedir : string
Path to the base cache directory of the memory context.
ignore : list or None
List of variable names to ignore when choosing whether to
recompute.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress : boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose : int, optional
The verbosity flag, controls messages that are issued as
the function is evaluated.
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, func, cachedir, ignore=None, mmap_mode=None,
compress=False, verbose=1, timestamp=None):
"""
Parameters
----------
func: callable
The function to decorate
cachedir: string
The path of the base directory to use as a data store
ignore: list or None
List of variable names to ignore.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress : boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated. The higher, the more verbose
timestamp: float, optional
The reference time from which times in tracing messages
are reported.
"""
Logger.__init__(self)
self._verbose = verbose
self.cachedir = cachedir
self.func = func
self.mmap_mode = mmap_mode
self.compress = compress
if compress and mmap_mode is not None:
warnings.warn('Compressed results cannot be memmapped',
stacklevel=2)
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
if ignore is None:
ignore = []
self.ignore = ignore
mkdirp(self.cachedir)
try:
functools.update_wrapper(self, func)
except:
" Objects like ufunc don't like that "
if inspect.isfunction(func):
doc = pydoc.TextDoc().document(func
).replace('\n', '\n\n', 1)
else:
# Pydoc does a poor job on other objects
doc = func.__doc__
self.__doc__ = 'Memoized version of %s' % doc
def __call__(self, *args, **kwargs):
# Compare the function code with the previous to see if the
# function code has changed
output_dir, argument_hash = self.get_output_dir(*args, **kwargs)
# FIXME: The statements below should be try/excepted
if not (self._check_previous_func_code(stacklevel=3) and
os.path.exists(output_dir)):
if self._verbose > 10:
_, name = get_func_name(self.func)
self.warn('Computing func %s, argument hash %s in '
'directory %s'
% (name, argument_hash, output_dir))
out = self.call(*args, **kwargs)
if self.mmap_mode is None:
return out
else:
# Memmap the output at the first call to be consistent with
# later calls
return self.load_output(output_dir)
else:
try:
t0 = time.time()
out = self.load_output(output_dir)
if self._verbose > 4:
t = time.time() - t0
_, name = get_func_name(self.func)
msg = '%s cache loaded - %s' % (name, format_time(t))
print(max(0, (80 - len(msg))) * '_' + msg)
return out
except Exception:
# XXX: Should use an exception logger
self.warn('Exception while loading results for '
'(args=%s, kwargs=%s)\n %s' %
(args, kwargs, traceback.format_exc()))
shutil.rmtree(output_dir, ignore_errors=True)
return self.call(*args, **kwargs)
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
return (self.__class__, (self.func, self.cachedir, self.ignore,
self.mmap_mode, self.compress, self._verbose))
#-------------------------------------------------------------------------
# Private interface
#-------------------------------------------------------------------------
def _get_func_dir(self, mkdir=True):
""" Get the directory corresponding to the cache for the
function.
"""
module, name = get_func_name(self.func)
module.append(name)
func_dir = os.path.join(self.cachedir, *module)
if mkdir:
mkdirp(func_dir)
return func_dir
def get_output_dir(self, *args, **kwargs):
""" Returns the directory in which are persisted the results
of the function corresponding to the given arguments.
The results can be loaded using the .load_output method.
"""
coerce_mmap = (self.mmap_mode is not None)
argument_hash = hash(filter_args(self.func, self.ignore,
args, kwargs),
coerce_mmap=coerce_mmap)
output_dir = os.path.join(self._get_func_dir(self.func),
argument_hash)
return output_dir, argument_hash
def _write_func_code(self, filename, func_code, first_line):
""" Write the function code and the filename to a file.
"""
func_code = '%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code)
with open(filename, 'w') as out:
out.write(func_code)
def _check_previous_func_code(self, stacklevel=2):
"""
stacklevel is the depth a which this function is called, to
issue useful warnings to the user.
"""
# Here, we go through some effort to be robust to dynamically
# changing code and collision. We cannot inspect.getsource
# because it is not reliable when using IPython's magic "%run".
func_code, source_file, first_line = get_func_code(self.func)
func_dir = self._get_func_dir()
func_code_file = os.path.join(func_dir, 'func_code.py')
try:
with open(func_code_file) as infile:
old_func_code, old_first_line = \
extract_first_line(infile.read())
except IOError:
self._write_func_code(func_code_file, func_code, first_line)
return False
if old_func_code == func_code:
return True
# We have differing code, is this because we are referring to
# differing functions, or because the function we are referring as
# changed?
_, func_name = get_func_name(self.func, resolv_alias=False,
win_characters=False)
if old_first_line == first_line == -1 or func_name == '<lambda>':
if not first_line == -1:
func_description = '%s (%s:%i)' % (func_name,
source_file, first_line)
else:
func_description = func_name
warnings.warn(JobLibCollisionWarning(
"Cannot detect name collisions for function '%s'"
% func_description), stacklevel=stacklevel)
# Fetch the code at the old location and compare it. If it is the
# same than the code store, we have a collision: the code in the
# file has not changed, but the name we have is pointing to a new
# code block.
if not old_first_line == first_line and source_file is not None:
possible_collision = False
if os.path.exists(source_file):
_, func_name = get_func_name(self.func, resolv_alias=False)
num_lines = len(func_code.split('\n'))
with open(source_file) as f:
on_disk_func_code = f.readlines()[
old_first_line - 1
:old_first_line - 1 + num_lines - 1]
on_disk_func_code = ''.join(on_disk_func_code)
possible_collision = (on_disk_func_code.rstrip()
== old_func_code.rstrip())
else:
possible_collision = source_file.startswith('<doctest ')
if possible_collision:
warnings.warn(JobLibCollisionWarning(
'Possible name collisions between functions '
"'%s' (%s:%i) and '%s' (%s:%i)" %
(func_name, source_file, old_first_line,
func_name, source_file, first_line)),
stacklevel=stacklevel)
# The function has changed, wipe the cache directory.
# XXX: Should be using warnings, and giving stacklevel
if self._verbose > 10:
_, func_name = get_func_name(self.func, resolv_alias=False)
self.warn("Function %s (stored in %s) has changed." %
(func_name, func_dir))
self.clear(warn=True)
return False
def clear(self, warn=True):
""" Empty the function's cache.
"""
func_dir = self._get_func_dir(mkdir=False)
if self._verbose and warn:
self.warn("Clearing cache %s" % func_dir)
if os.path.exists(func_dir):
shutil.rmtree(func_dir, ignore_errors=True)
mkdirp(func_dir)
func_code, _, first_line = get_func_code(self.func)
func_code_file = os.path.join(func_dir, 'func_code.py')
self._write_func_code(func_code_file, func_code, first_line)
def call(self, *args, **kwargs):
""" Force the execution of the function with the given arguments and
persist the output values.
"""
start_time = time.time()
output_dir, argument_hash = self.get_output_dir(*args, **kwargs)
if self._verbose:
print(self.format_call(*args, **kwargs))
output = self.func(*args, **kwargs)
self._persist_output(output, output_dir)
self._persist_input(output_dir, *args, **kwargs)
duration = time.time() - start_time
if self._verbose:
_, name = get_func_name(self.func)
msg = '%s - %s' % (name, format_time(duration))
print(max(0, (80 - len(msg))) * '_' + msg)
return output
def format_call(self, *args, **kwds):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = self.format_signature(self.func, *args,
**kwds)
msg = '%s\n[Memory] Calling %s...\n%s' % (80 * '_', path, signature)
return msg
# XXX: Not using logging framework
#self.debug(msg)
def format_signature(self, func, *args, **kwds):
# XXX: This should be moved out to a function
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
arg = self.format(arg, indent=2)
if len(arg) > 1500:
arg = '%s...' % arg[:700]
if previous_length > 80:
arg = '\n%s' % arg
previous_length = len(arg)
arg_str.append(arg)
arg_str.extend(['%s=%s' % (v, self.format(i)) for v, i in
kwds.items()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
# Make make public
def _persist_output(self, output, dir):
""" Persist the given output tuple in the directory.
"""
try:
mkdirp(dir)
filename = os.path.join(dir, 'output.pkl')
numpy_pickle.dump(output, filename, compress=self.compress)
if self._verbose > 10:
print('Persisting in %s' % dir)
except OSError:
" Race condition in the creation of the directory "
def _persist_input(self, output_dir, *args, **kwargs):
""" Save a small summary of the call using json format in the
output directory.
"""
argument_dict = filter_args(self.func, self.ignore,
args, kwargs)
input_repr = dict((k, repr(v)) for k, v in argument_dict.items())
# This can fail do to race-conditions with multiple
# concurrent joblibs removing the file or the directory
try:
mkdirp(output_dir)
json.dump(
input_repr,
file(os.path.join(output_dir, 'input_args.json'), 'w'),
)
except:
pass
return input_repr
def load_output(self, output_dir):
""" Read the results of a previous calculation from the directory
it was cached in.
"""
if self._verbose > 1:
t = time.time() - self.timestamp
if self._verbose < 10:
print('[Memory]% 16s: Loading %s...' % (
format_time(t),
self.format_signature(self.func)[0]
))
else:
print('[Memory]% 16s: Loading %s from %s' % (
format_time(t),
self.format_signature(self.func)[0],
output_dir
))
filename = os.path.join(output_dir, 'output.pkl')
return numpy_pickle.load(filename,
mmap_mode=self.mmap_mode)
# XXX: Need a method to check if results are available.
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(func=%s, cachedir=%s)' % (
self.__class__.__name__,
self.func,
repr(self.cachedir),
)
###############################################################################
# class `Memory`
###############################################################################
class Memory(Logger):
""" A context object for caching a function's return value each time it
is called with the same input arguments.
All values are cached on the filesystem, in a deep directory
structure.
see :ref:`memory_reference`
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, cachedir, mmap_mode=None, compress=False, verbose=1):
"""
Parameters
----------
cachedir: string or None
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress: boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated.
"""
# XXX: Bad explanation of the None value of cachedir
Logger.__init__(self)
self._verbose = verbose
self.mmap_mode = mmap_mode
self.timestamp = time.time()
self.compress = compress
if compress and mmap_mode is not None:
warnings.warn('Compressed results cannot be memmapped',
stacklevel=2)
if cachedir is None:
self.cachedir = None
else:
self.cachedir = os.path.join(cachedir, 'joblib')
mkdirp(self.cachedir)
def cache(self, func=None, ignore=None, verbose=None,
mmap_mode=False):
""" Decorates the given function func to only compute its return
value for input arguments not cached on disk.
Parameters
----------
func: callable, optional
The function to be decorated
ignore: list of strings
A list of arguments name to ignore in the hashing
verbose: integer, optional
The verbosity mode of the function. By default that
of the memory object is used.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. By default that of the memory object is used.
Returns
-------
decorated_func: MemorizedFunc object
The returned object is a MemorizedFunc object, that is
callable (behaves like a function), but offers extra
methods for cache lookup and management. See the
documentation for :class:`joblib.memory.MemorizedFunc`.
"""
if func is None:
# Partial application, to be able to specify extra keyword
# arguments in decorators
return functools.partial(self.cache, ignore=ignore,
verbose=verbose, mmap_mode=mmap_mode)
if self.cachedir is None:
return func
if verbose is None:
verbose = self._verbose
if mmap_mode is False:
mmap_mode = self.mmap_mode
if isinstance(func, MemorizedFunc):
func = func.func
return MemorizedFunc(func, cachedir=self.cachedir,
mmap_mode=mmap_mode,
ignore=ignore,
compress=self.compress,
verbose=verbose,
timestamp=self.timestamp)
def clear(self, warn=True):
""" Erase the complete cache directory.
"""
if warn:
self.warn('Flushing completely the cache')
rm_subdirs(self.cachedir)
def eval(self, func, *args, **kwargs):
""" Eval function func with arguments `*args` and `**kwargs`,
in the context of the memory.
This method works similarly to the builtin `apply`, except
that the function is called only if the cache is not
up to date.
"""
if self.cachedir is None:
return func(*args, **kwargs)
return self.cache(func)(*args, **kwargs)
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(cachedir=%s)' % (
self.__class__.__name__,
repr(self.cachedir),
)
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
# We need to remove 'joblib' from the end of cachedir
cachedir = self.cachedir[:-7] if self.cachedir is not None else None
return (self.__class__, (cachedir,
self.mmap_mode, self.compress, self._verbose))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.