code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""Field classes.
Includes all fields from `marshmallow.fields` in addition to a custom
`Nested` field and `DelimitedList`.
All fields can optionally take a special `location` keyword argument, which tells webargs
where to parse the request argument from. ::
args = {
'active': fields.Bool(location='query')
'content_type': fields.Str(load_from='Content-Type',
location='headers')
}
"""
import marshmallow as ma
from webargs.core import argmap2schema
__all__ = [
'Nested',
'DelimitedList',
]
# Expose all fields from marshmallow.fields.
# We do this instead of 'from marshmallow.fields import *' because webargs
# has its own subclass of Nested
for each in (field_name for field_name in ma.fields.__all__ if field_name != 'Nested'):
__all__.append(each)
globals()[each] = getattr(ma.fields, each)
class Nested(ma.fields.Nested):
"""Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
the first argument, which will be converted to a `marshmallow.Schema`.
"""
def __init__(self, nested, *args, **kwargs):
if isinstance(nested, dict):
nested = argmap2schema(nested)
super(Nested, self).__init__(nested, *args, **kwargs)
class DelimitedList(ma.fields.List):
"""Same as `marshmallow.fields.List`, except can load from either a list or
a delimited string (e.g. "foo,bar,baz").
:param Field cls_or_instance: A field class or instance.
:param str delimiter: Delimiter between values.
:param bool as_string: Dump values to string.
"""
delimiter = ','
def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
self.delimiter = delimiter or self.delimiter
self.as_string = as_string
super(DelimitedList, self).__init__(cls_or_instance, **kwargs)
def _serialize(self, value, attr, obj):
ret = super(DelimitedList, self)._serialize(value, attr, obj)
if self.as_string:
return self.delimiter.join(format(each) for each in value)
return ret
def _deserialize(self, value, attr, data):
try:
ret = (
value
if ma.utils.is_iterable_but_not_string(value)
else value.split(self.delimiter)
)
except AttributeError:
self.fail('invalid')
return super(DelimitedList, self)._deserialize(ret, attr, data)
|
daspots/dasapp
|
lib/webargs/fields.py
|
Python
|
mit
| 2,488
|
from __future__ import print_function
import logging
_logger = logging.getLogger('theano.sandbox.cuda.opt')
import copy
import sys
import time
import warnings
import pdb
import numpy
import theano
from theano import scalar as scal
from theano import config, tensor, gof
import theano.ifelse
from six.moves import reduce, xrange
from theano.compile import optdb
from theano.gof import (local_optimizer, EquilibriumDB, ProxyDB,
Optimizer, toolbox)
from theano.gof.opt import LocalMetaOptimizer
from theano.sandbox.cuda import as_cuda_ndarray_variable
from theano.sandbox.cuda.basic_ops import (
gpu_eye, gpu_contiguous,
gpu_from_host, host_from_gpu, GpuFromHost, HostFromGpu,
GpuContiguous,
GpuElemwise, GpuDimShuffle, GpuReshape, GpuCAReduce, GpuFlatten,
GpuSubtensor, GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20,
GpuIncSubtensor, gpu_alloc, GpuAlloc, gpu_shape, GpuSplit, GpuAllocEmpty)
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda.blas import (gpu_dot22, gpu_dot22scalar,
gpu_gemm_inplace, gpu_gemm_no_inplace, GpuConv,
GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights,
GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights)
from theano.sandbox.cuda.blas import gpu_gemv_inplace
from theano.sandbox.cuda.cula import gpu_solve
from theano.sandbox.cuda.blas import gpu_gemv_no_inplace
from theano.sandbox.cuda.blas import gpu_ger_inplace
from theano.sandbox.cuda.blas import gpu_ger_no_inplace
from theano.sandbox.cuda.blas import (GpuDownsampleFactorMax,
GpuDownsampleFactorMaxGrad, GpuDownsampleFactorMaxGradGrad)
from theano.sandbox.cuda.nnet import (
GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx,
GpuSoftmax, GpuSoftmaxWithBias)
from theano.sandbox.cuda.elemwise import SupportCodeError
from theano.scalar.basic_scipy import Erfinv
from theano.scalar.basic_scipy import Erfcx
from theano.sandbox.cuda.elemwise import erfinv_gpu
from theano.sandbox.cuda.elemwise import erfcx_gpu
from theano.sandbox.cuda.var import CudaNdarrayConstant
from theano.sandbox.cuda import gpu_optimizer, register_opt, gpu_seqopt, GpuOp
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.blas import _is_real_vector, _is_real_matrix
from theano.tensor import nlinalg
from theano.tensor import slinalg
from theano.tensor.nnet.Conv3D import Conv3D
from theano.tests.breakpoint import PdbBreakpoint
try:
# We need to be able to import this file even if cuda isn't avail.
from theano.sandbox.cuda import device_properties
except ImportError:
pass
# optdb.print_summary() # shows what is currently registered
gpu_cut_copies = EquilibriumDB()
gpu_seqopt.register('gpu_local_optimizations', gpu_optimizer, 1,
'fast_run', 'fast_compile', 'inplace', 'gpu')
gpu_seqopt.register('gpu_cut_transfers', gpu_cut_copies, 2,
'fast_run', 'fast_compile', 'gpu')
# DO NOT PUT fast_run or fast_compile in gpu_opt! This will ALWAYS enable the GPU!
optdb.register('gpu_opt',
gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpu')
# DO NOT PUT fast_run in gpu_after_fusion! This will ALWAYS enable the GPU!
# This second pass is needed as the fusion can put all the non float32 code
# inside the elemwise. When there is no float64 op, this is working.
optdb.register('gpu_after_fusion',
ProxyDB(gpu_seqopt),
optdb.__position__.get('elemwise_fusion', 49) + .1,
'gpu')
# Register merge_optimizer as a global opt
gpu_optimizer.register('gpu_merge', theano.gof.opt.merge_optimizer,
'fast_run', 'fast_compile', final_opt=True)
# register local_track_shape_i at this level too
# to make multi-level lift of shape work.
register_opt()(theano.tensor.opt.local_track_shape_i)
register_opt(final_opt=True, name='gpu_constant_folding')(
tensor.opt.constant_folding)
register_opt()(theano.tensor.opt.local_subtensor_make_vector)
# Register local_remove_all_assert as a global opt
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
# This is a partial list of CPU ops that can be in some circonstance
# moved to the GPU. This list is used by an optimization.
# Hopefully, we can keep this list up to date.
import theano.tensor.signal.downsample
import theano.tensor.nnet.neighbours
cpu_ops_moved_to_gpu = [
tensor.blas.Dot22, tensor.blas.Dot22Scalar, tensor.blas.Gemm,
tensor.blas.Gemv, tensor.blas.Ger, tensor.nnet.conv.ConvOp,
tensor.signal.downsample.DownsampleFactorMax,
tensor.signal.downsample.DownsampleFactorMaxGrad,
theano.tensor.nnet.neighbours.Images2Neibs,
tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias,
tensor.nnet.CrossentropySoftmax1HotWithBiasDx,
tensor.nnet.Softmax, tensor.nnet.SoftmaxWithBias,
tensor.Elemwise, tensor.DimShuffle, tensor.CAReduce,
tensor.elemwise.All, tensor.elemwise.Any,
tensor.elemwise.CAReduceDtype, tensor.elemwise.Sum,
tensor.elemwise.Prod, tensor.elemwise.ProdWithoutZeros,
tensor.Reshape, tensor.Flatten, tensor.Subtensor,
tensor.AdvancedSubtensor1, tensor.AdvancedIncSubtensor1,
tensor.IncSubtensor, tensor.Shape, tensor.Join,
tensor.Alloc, tensor.Eye]
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input of a graph to the gpu if it is necessary.
It should make this part of the optimizer faster we will will need only 1
pass on the fgraph.
"""
def __init__(self):
Optimizer.__init__(self)
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, CudaNdarrayType):
continue
# This happen frequently as we do 2 pass of the gpu optimizations
if (len(input.clients) == 1 and
(input.clients[0][0] == 'output' or
input.clients[0][0].op == gpu_from_host)):
continue
try:
new_input = host_from_gpu(gpu_from_host(input))
if new_input.type == input.type:
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# as we currently only support float32, this can fail.
# Using try except make that we won't need
pass
# we register it before all other gpu optimizer to be sure that the input
# are on the gpu.
gpu_seqopt.register('InputToGpuOptimizer', InputToGpuOptimizer(),
0,
'fast_run',
'fast_compile',
'merge') # TODO: how to make it mandatory for gpu_seqopt?
@local_optimizer([gpu_from_host, host_from_gpu])
def local_cut_gpu_host_gpu(node):
if tensor.opt.opt.check_chain(node, gpu_from_host, host_from_gpu):
return [node.inputs[0].owner.inputs[0]]
if tensor.opt.opt.check_chain(node, host_from_gpu, gpu_from_host):
return [node.inputs[0].owner.inputs[0]]
return False
gpu_cut_copies.register('cut_gpu_host_transfers', local_cut_gpu_host_gpu,
'fast_run', 'fast_compile', 'gpu')
gpu_cut_copies.register('cut_gpu_constant_transfers',
tensor.opt.constant_folding,
'fast_run', 'fast_compile', 'gpu')
# register it into canonicalize to allow other optimization to work without
# botering with this useless pattern.
optdb['canonicalize'].register('local_cut_gpu_host_gpu',
local_cut_gpu_host_gpu,
'fast_run', 'fast_compile', 'gpu')
# 'float64', 'complex128' and 'complex64' are not supported in elemwise
# on the gpu.
elemwise_cuda_dtype_supported = ['float32', 'uint8', 'int8', 'uint16', 'int16',
'uint32', 'int32', 'uint64', 'int64']
def dtype_in_elemwise_supported(op):
"""
Return True of the Elemwise op is supported on the gpu.
Return False otherwise.
:note: We need to check inside the Composite op.
"""
def get_all_basic_scalar(composite_op):
l = []
for i in composite_op.fgraph.toposort():
if isinstance(i, theano.scalar.Composite):
l += get_all_basic_scalar(i)
else:
l.append(i)
return l
if isinstance(op, GpuElemwise) or isinstance(op, tensor.Elemwise):
if isinstance(op.scalar_op, theano.scalar.Composite):
scals = get_all_basic_scalar(op.scalar_op)
for s in scals:
if any([i.type.dtype not in elemwise_cuda_dtype_supported
for i in s.inputs + s.outputs]):
return False
return True
@register_opt()
@local_optimizer([tensor.Elemwise])
def local_gpu_elemwise_0(node):
"""elemwise(..., host_from_gpu, ...)
-> host_from_gpu(elemwise(gpu_from_host, ..., gpu_from_host)
"""
if (isinstance(node.op, tensor.Elemwise) and
dtype_in_elemwise_supported(node.op)):
if any([i.owner and
isinstance(i.owner.op, HostFromGpu)
for i in node.inputs]):
if all([o.type.dtype == 'float32' for o in node.outputs]):
# Don't set any inplace pattern.
# gpu_inplace_elemwise_optimizer will do it later
if isinstance(node.op.scalar_op, Erfinv):
new_op = GpuElemwise(erfinv_gpu)
elif isinstance(node.op.scalar_op, Erfcx):
new_op = GpuElemwise(erfcx_gpu)
else:
try:
new_op = GpuElemwise(node.op.scalar_op)
except SupportCodeError:
# This happens when scalar_op requires support code
return False
# first establish that float32 can store all inputs
upcastable = set(['float32', 'int8', 'int16', 'uint8',
'uint16'])
# case 1 - all inputs are already float32
if all([i.type.dtype == 'float32' for i in node.inputs]):
# TODO: change this when fusion makes Elemwise with multiple
# outputs
gpu_elemwise = new_op(*(gpu_from_host(i)
for i in node.inputs))
# case 2 - it is still ok if some inputs were upcast to float32
elif all([i.type.dtype in upcastable
for i in node.inputs]):
# second - establish that a new node with upcasted inputs
# has the same outputs types as the original node
upcasted = node.op.make_node(*[tensor.cast(i, 'float32')
for i in node.inputs])
if [o.type for o in upcasted.outputs] ==\
[o.type for o in node.outputs]:
new_inputs = [gpu_from_host(tensor.cast(i, 'float32'))
for i in node.inputs]
gpu_elemwise = new_op(*new_inputs)
else:
return False
else:
return False
gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)
if not gpu_elemwise:
return False
if max_inputs_to_GpuElemwise(node) < len(gpu_elemwise.inputs):
return False
return [host_from_gpu(gpu_elemwise.outputs[0])]
@register_opt()
@local_optimizer([gpu_from_host])
def local_gpu_elemwise_1(node):
"""
gpu_from_host(Elemwise)) -> GpuElemwise(gpu_from_host(...))
"""
if isinstance(node.op, GpuFromHost):
host_i, = node.inputs
if (host_i.owner and
isinstance(host_i.owner.op, tensor.Elemwise) and
len(host_i.clients) == 1 and
dtype_in_elemwise_supported(node.op)):
elemwise_node = host_i.owner
# Don't set any inplace pattern.
# gpu_inplace_elemwise_optimizer will do it later
if isinstance(elemwise_node.op.scalar_op, Erfinv):
new_op = GpuElemwise(erfinv_gpu)
elif isinstance(elemwise_node.op.scalar_op, Erfcx):
new_op = GpuElemwise(erfcx_gpu)
else:
try:
new_op = GpuElemwise(elemwise_node.op.scalar_op)
except SupportCodeError:
# This happens when scalar_op requires support code
return False
if all([i.dtype == 'float32' for i in elemwise_node.inputs]):
gpu_elemwise = new_op(*[as_cuda_ndarray_variable(i)
for i in elemwise_node.inputs])
gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)
if not gpu_elemwise:
return False
return [gpu_elemwise.outputs[0]]
return False
@register_opt()
@local_optimizer([tensor.Split])
def local_gpu_split(node):
if isinstance(node.op, tensor.Split):
input = node.inputs[0]
outs_clients = reduce(list.__add__,
[out.clients for out in node.outputs])
if (input.owner and isinstance(input.owner.op, HostFromGpu) or
any([c != 'output' and isinstance(c.op, GpuFromHost) for c, idx
in outs_clients])):
new_op = GpuSplit(node.op.len_splits)
split_res = new_op(as_cuda_ndarray_variable(input), *node.inputs[1:],
return_list=True)
return [host_from_gpu(o) for o in split_res]
return False
@register_opt()
@local_optimizer([tensor.DimShuffle, gpu_from_host])
def local_gpu_dimshuffle_0(node):
"""
dimshuffle(host_from_gpu()) -> host_from_gpu(gpu_dimshuffle)
gpu_from_host(dimshuffle) -> gpu_dimshuffle(gpu_from_host)
"""
if isinstance(node.op, tensor.DimShuffle):
input, = node.inputs
if input.owner and isinstance(input.owner.op, HostFromGpu):
# move the add to a GpuAdd
new_op = GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
return [host_from_gpu(new_op(as_cuda_ndarray_variable(input)))]
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
tensor.DimShuffle):
dimshuffle_node = host_input.owner
new_op = GpuDimShuffle(dimshuffle_node.op.input_broadcastable,
dimshuffle_node.op.new_order)
return [new_op(as_cuda_ndarray_variable(dimshuffle_node.inputs[0]))]
return False
@register_opt()
@local_optimizer([tensor.SpecifyShape, gpu_from_host])
def local_gpu_specifyShape_0(node):
"""
specify_shape(host_from_gpu()) -> host_from_gpu(specify_shape)
gpu_from_host(specify_shape) -> specify_shape(gpu_from_host)
"""
if isinstance(node.op, tensor.SpecifyShape):
input = node.inputs[0]
if input.owner and isinstance(input.owner.op, HostFromGpu):
return [host_from_gpu(tensor.specify_shape(as_cuda_ndarray_variable(input),
*node.inputs[1:]))]
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
tensor.SpecifyShape):
specifyshape_node = host_input.owner
return [tensor.specify_shape(
as_cuda_ndarray_variable(specifyshape_node.inputs[0]),
*specifyshape_node.inputs[1:])]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.basic.Dot])
def local_gpu_dot_to_dot22(node):
"""
gpu_from_host(dot) -> gpudot(gpu_from_host)
dot(host_from_gpu) -> host_from_gpu(gpudot)
This optimization solves the vector-matrix multiplication issue by
transforming the vector into a matrix, apply gpudot22 and reshaping
the output.
A more suitable solution would be to use the right cublas call
This is needed in fast_compile
"""
# In case the got do input upcast, we much check that we can
# make it run on the gpu.
if isinstance(node.op, GpuFromHost):
if node.outputs[0].type.dtype != 'float32':
return False
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
tensor.basic.Dot):
x, y = host_input.owner.inputs
# case one: vector X matrix
if _is_real_vector(x) and _is_real_matrix(y):
new_op = GpuDimShuffle((False,), ('x', 0))
shape_out = y.shape[1].dimshuffle(['x'])
gpu_x = new_op(as_cuda_ndarray_variable(x))
gpu_y = as_cuda_ndarray_variable(y)
# case two: matrix X vector
elif _is_real_matrix(x) and _is_real_vector(y):
new_op = GpuDimShuffle((False,), (0, 'x'))
shape_out = x.shape[0].dimshuffle(['x'])
gpu_x = as_cuda_ndarray_variable(x)
gpu_y = new_op(as_cuda_ndarray_variable(y))
else:
return False
return [GpuReshape(1)(gpu_dot22(gpu_x, gpu_y), shape_out)]
if isinstance(node.op, tensor.basic.Dot):
if node.outputs[0].type.dtype != 'float32':
return False
if any([i.owner and isinstance(i.owner.op, HostFromGpu)
for i in node.inputs]):
x, y = node.inputs
if _is_real_vector(x) and _is_real_matrix(y):
new_op = GpuDimShuffle((False,), ('x', 0))
shape_out = y.shape[1].dimshuffle(['x'])
gpu_x = new_op(as_cuda_ndarray_variable(x))
gpu_y = as_cuda_ndarray_variable(y)
elif _is_real_matrix(x) and _is_real_vector(y):
new_op = GpuDimShuffle((False,), (0, 'x'))
shape_out = x.shape[0].dimshuffle(['x'])
gpu_x = as_cuda_ndarray_variable(x)
gpu_y = new_op(as_cuda_ndarray_variable(y))
else:
return False
return [host_from_gpu(GpuReshape(1)(gpu_dot22(gpu_x, gpu_y),
shape_out))]
return False
@local_optimizer(None)
def local_assert_no_cpu_op(node):
if not isinstance(node.op, GpuOp) and all([var.owner and isinstance(var.owner.op,
HostFromGpu) for var in node.inputs]) and any([[c for c in var.clients
if isinstance(c[0].op, GpuFromHost)] for var in node.outputs]):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU op %s is detected in the computational"
" graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
return None
# Register the local_assert_no_cpu_op:
assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,
name='assert_no_cpu_op')
# 49.2 is after device specialization & fusion optimizations for last transfers
theano.compile.optdb.register('assert_no_cpu_op', assert_no_cpu_op, 49.2)
@register_opt()
@local_optimizer([theano.ifelse.IfElse, gpu_from_host])
def local_gpu_lazy_ifelse(node):
"""
gpu_from_host(ifelse) -> gpu_ifelse(gpu_from_host)
ifelse(host_from_gpu) -> host_from_gpu(ifelse)
"""
if isinstance(node.op, theano.ifelse.IfElse) and not node.op.gpu:
gpu_ifelse = theano.ifelse.IfElse(node.op.n_outs, gpu=True)
outs_clients = reduce(list.__add__,
[out.clients for out in node.outputs])
if any([(i.owner and isinstance(i.owner.op, HostFromGpu))
for i in node.inputs]) or any(
[c != 'output' and c.op == gpu_from_host for c, idx
in outs_clients]):
c = node.inputs[0]
outs = node.inputs[1:]
# Should not happen, but just in case
if isinstance(c.type, CudaNdarrayType):
c = host_from_gpu(c)
if all([isinstance(o.type, CudaNdarrayType) or o.dtype != 'float32'
for o in outs]):
return
for i in range(len(outs)):
if (not isinstance(outs[i].type, CudaNdarrayType) and
outs[i].dtype == 'float32'):
outs[i] = as_cuda_ndarray_variable(outs[i])
outs = gpu_ifelse(c, *outs, return_list=True)
for i in range(len(outs)):
if isinstance(outs[i].type, CudaNdarrayType):
outs[i] = host_from_gpu(outs[i])
return outs
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and
isinstance(host_input.owner.op, theano.ifelse.IfElse) and
not host_input.owner.op.gpu and
# If there is more then 1 outputs, we can't replace it
# here with a local optimizer as we replace the
# GpuFromHost node and the other output of the if won't be
# replaced.
host_input.owner.op.n_outs == 1):
gpu_ifelse = theano.ifelse.IfElse(host_input.owner.op.n_outs,
gpu=True)
c = host_input.owner.inputs[0]
outs = host_input.owner.inputs[1:]
# Should not happen, but just in case
if isinstance(c.type, CudaNdarrayType):
c = host_from_gpu(c)
if all([isinstance(o.type, CudaNdarrayType) or o.dtype != 'float32'
for o in outs]):
return
for i in range(len(outs)):
if (not isinstance(outs[i].type, CudaNdarrayType) and
outs[i].dtype == 'float32'):
outs[i] = as_cuda_ndarray_variable(outs[i])
outs = gpu_ifelse.make_node(c, *outs).outputs
return outs
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.blas.Dot22])
def local_gpu_dot22(node):
"""
gpu_from_host(dot22) -> gpudot(gpu_from_host)
dot(host_from_gpu) -> host_from_gpu(gpudot22)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
tensor.blas.Dot22):
x, y = host_input.owner.inputs
return [gpu_dot22(as_cuda_ndarray_variable(x), as_cuda_ndarray_variable(y))]
if isinstance(node.op, tensor.blas.Dot22):
if any([(i.owner and isinstance(i.owner.op, HostFromGpu))
for i in node.inputs]):
x, y = node.inputs
return [host_from_gpu(gpu_dot22(as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y)))]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.blas.Dot22Scalar])
def local_gpu_dot22scalar(node):
"""
gpu_from_host(dot22scalar) -> gpudot(gpu_from_host)
dot(host_from_gpu) -> host_from_gpu(gpudot22scalar)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and
isinstance(host_input.owner.op,
tensor.blas.Dot22Scalar)):
x, y, scalar = host_input.owner.inputs
return [gpu_dot22scalar(as_cuda_ndarray_variable(x), as_cuda_ndarray_variable(y),
tensor.blas._as_scalar(scalar))]
if isinstance(node.op, tensor.blas.Dot22Scalar):
if any([i.owner and isinstance(i.owner.op, HostFromGpu)
for i in node.inputs]):
x, y, scalar = node.inputs
return [host_from_gpu(
gpu_dot22scalar(as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
tensor.blas._as_scalar(scalar)))]
return False
@register_opt()
@local_optimizer([gpu_from_host, slinalg.Solve])
def local_gpu_solve(node):
"""
gpu_from_host(CpuSolve) -> GpuSolve(gpu_from_host)
CpuSolve(host_from_gpu) -> host_from_gpu(GpuSolve)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and
isinstance(host_input.owner.op,
slinalg.Solve)):
x, y = host_input.owner.inputs
return [gpu_solve(as_cuda_ndarray_variable(x), as_cuda_ndarray_variable(y))]
if isinstance(node.op, slinalg.Solve):
if any([i.owner and isinstance(i.owner.op, HostFromGpu)
for i in node.inputs]):
x, y = node.inputs
return [host_from_gpu(
gpu_solve(as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y)))]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.blas_c.CGemv, tensor.blas.Gemv])
def local_gpu_gemv(node):
"""
gpu_from_host(gemv) -> gpu_gemv(gpu_from_host)
gemv(host_from_gpu) -> host_from_gpu(gpu_gemv)
"""
gemvs = (tensor.blas.Gemv,
tensor.blas_c.CGemv,
)
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, gemvs):
z, a, x, y, b = host_input.owner.inputs
return [gpu_gemv_no_inplace(
as_cuda_ndarray_variable(z),
a,
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
b)]
if isinstance(node.op, gemvs):
z, a, x, y, b = node.inputs
x_on_gpu = (x.owner and isinstance(x.owner.op, HostFromGpu))
y_on_gpu = (y.owner and isinstance(y.owner.op, HostFromGpu))
z_on_gpu = (z.owner and isinstance(z.owner.op, HostFromGpu))
if x_on_gpu or y_on_gpu or z_on_gpu:
return [host_from_gpu(
gpu_gemv_no_inplace(
as_cuda_ndarray_variable(z),
a,
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
b))]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.blas_c.CGer, tensor.blas.Ger,
tensor.blas_scipy.ScipyGer])
def local_gpu_ger(node):
"""
gpu_from_host(ger) -> gpu_ger(gpu_from_host)
ger(host_from_gpu) -> host_from_gpu(gpu_ger)
"""
gers = (tensor.blas_c.CGer,
tensor.blas.Ger,
tensor.blas_scipy.ScipyGer,
)
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, gers):
z, a, x, y = host_input.owner.inputs
return [gpu_ger_no_inplace(
as_cuda_ndarray_variable(z),
a,
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y)
)]
if isinstance(node.op, gers):
z, a, x, y = node.inputs
x_on_gpu = (x.owner and isinstance(x.owner.op, HostFromGpu))
y_on_gpu = (y.owner and isinstance(y.owner.op, HostFromGpu))
z_on_gpu = (z.owner and isinstance(z.owner.op, HostFromGpu))
if x_on_gpu or y_on_gpu or z_on_gpu:
return [host_from_gpu(
gpu_ger_no_inplace(
as_cuda_ndarray_variable(z),
a,
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y)
))]
return False
@register_opt()
@local_optimizer([tensor.blas.Gemm, gpu_from_host])
def local_gpu_gemm(node):
"""
gpu_from_host(gemm) -> gpu_gemm(gpu_from_host)
gemm(host_from_gpu) -> host_from_gpu(gpu_gemm)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
tensor.blas.Gemm):
z, a, x, y, b = host_input.owner.inputs
return [gpu_gemm_no_inplace(as_cuda_ndarray_variable(z),
a,
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
b)]
if isinstance(node.op, tensor.blas.Gemm):
z, a, x, y, b = node.inputs
x_on_gpu = (x.owner and isinstance(x.owner.op, HostFromGpu))
y_on_gpu = (y.owner and isinstance(y.owner.op, HostFromGpu))
z_on_gpu = (z.owner and isinstance(z.owner.op, HostFromGpu))
if x_on_gpu or y_on_gpu or z_on_gpu:
return [host_from_gpu(gpu_gemm_no_inplace(as_cuda_ndarray_variable(z),
a,
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
b))]
return False
@register_opt()
@local_optimizer([tensor.elemwise.CAReduce,
tensor.elemwise.All,
tensor.elemwise.Any,
tensor.elemwise.CAReduceDtype,
tensor.elemwise.Sum,
tensor.elemwise.Prod,
tensor.elemwise.ProdWithoutZeros])
def local_gpu_careduce(node):
if isinstance(node.op, tensor.elemwise.CAReduce):
scalar_op = node.op.scalar_op
# currently, only these two ops are supported at all,
# and max does not support all combinations of axes
if isinstance(node.op.scalar_op, (scal.Add, scal.Mul,
scal.Maximum, scal.Minimum)):
x, = node.inputs
# Otherwise, is some corner case, we will try to move it
# to the GPU later and this cause not wanted user warning.
if x.dtype != 'float32':
return
replace = False
if x.owner and isinstance(x.owner.op, HostFromGpu):
replace = True
elif (all([c != "output" and isinstance(c.op, GpuFromHost)
for c, i in node.outputs[0].clients])
and x.owner and x.owner.op.__class__ in
cpu_ops_moved_to_gpu):
# It is not always good to transfer the reduction to
# the GPU when the clients are on the GPU but not the
# reduction input. It mean we will transfer the
# (bigger) input to the GPU instead of the
# output(smaller) if we stop optimization there. Most
# of the time, we will also move to the GPU what
# created the input of the reduction. In that case, we
# don't introduce a bigger transfer. It is hard to
# know if after all optimization we will do the bigger
# transfer or not. I'm guessing an heuristic to find
# that. I suppose that if the input of the reduction is
# generated by an op that we can in some cases move to
# the GPU, that we will move it. If some CPU ops are
# supported only in some cases on the GPU, this will
# move to the GPU the reduction when it wasn't a good
# idea.
replace = True
if replace:
if node.op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in node.op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
greduce = GpuCAReduce(reduce_mask, scalar_op)
out = node.outputs[0]
if greduce.supports_c_code([as_cuda_ndarray_variable(x)]):
rval = host_from_gpu(greduce(as_cuda_ndarray_variable(x)))
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
shape_of = node.fgraph.shape_feature.shape_of
x_shape = shape_of[x]
new_in_shp = [x_shape[0]]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= x_shape[i]
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(x_shape[i])
new_greduce = GpuCAReduce(new_mask, scalar_op)
reshaped_x = x.reshape(tensor.stack(*new_in_shp))
gpu_reshaped_x = as_cuda_ndarray_variable(reshaped_x)
reshaped_gpu_inputs = [gpu_reshaped_x]
if new_greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = host_from_gpu(
new_greduce(gpu_reshaped_x))
if reduce_reshaped_x.ndim != out.ndim:
rval = reduce_reshaped_x.reshape(
tensor.stack(*shape_of[out]))
else:
rval = reduce_reshaped_x
else:
return
if rval.type == out.type:
return [rval]
else:
for b1, b2 in zip(rval.broadcastable,
out.type.broadcastable):
if b1 is True:
# It can happen that during
# optimization we discover that the
# input can be broadcasted, but didn't
# know that at graph build time.
continue
if b1 is False and b2 is True:
# We should not loose the information
# that one dimensions was
# broadcastable.
print((
"WARNING: local_gpu_careduce got type"
" wrong",
rval.type, out.type,
node.inputs[0].type, x.type,
node), file=sys.stderr)
return None
rval = tensor.patternbroadcast(rval,
out.broadcastable)
return [rval]
return False
@register_opt("low_memory")
@local_optimizer([GpuCAReduce])
def local_gpu_elemwise_careduce(node):
if (isinstance(node.op, GpuCAReduce) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably result
# to slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scal.basic.Sqr)
):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduce(op.reduce_mask, op.scalar_op, scal.basic.sqr)(inp)]
@register_opt()
@local_optimizer([gpu_from_host, tensor.Reshape])
def local_gpu_reshape(node):
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and \
isinstance(host_input.owner.op, tensor.Reshape):
rshp = host_input.owner.op
x, shp = host_input.owner.inputs
gpu_reshape = GpuReshape(rshp.ndim)(as_cuda_ndarray_variable(x), shp)
if gpu_reshape.broadcastable != node.outputs[0].broadcastable:
# this can happen as we always return False for all broadcast
# dim in GpuReshape but not for Reshape
# Event if we did the same think, with the constant
# optimization that could happen.
gpu_reshape = theano.tensor.patternbroadcast(
gpu_reshape, node.outputs[0].broadcastable)
return [gpu_reshape]
if isinstance(node.op, tensor.Reshape):
x, shp = node.inputs
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
gpu_reshape = GpuReshape(node.op.ndim)(gpu_x, shp)
if gpu_reshape.broadcastable != node.outputs[0].broadcastable:
# this can happen as we always return False for all broadcast
# dim in GpuReshape but not for Reshape
# Event if we did the same think, with the constant
# optimization that could happen.
gpu_reshape = theano.tensor.patternbroadcast(
gpu_reshape, node.outputs[0].broadcastable)
return [host_from_gpu(gpu_reshape)]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.Flatten])
def local_gpu_flatten(node):
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and \
isinstance(host_input.owner.op, tensor.Flatten):
outdim = host_input.owner.op.outdim
return [GpuFlatten(outdim)(
as_cuda_ndarray_variable(host_input.owner.inputs[0]))]
if isinstance(node.op, tensor.Flatten):
x, = node.inputs
outdim = node.op.outdim
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
return [host_from_gpu(GpuFlatten(outdim)(gpu_x))]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.Subtensor])
def local_gpu_subtensor(node):
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and \
isinstance(host_input.owner.op, tensor.Subtensor):
subt = host_input.owner.op
x = host_input.owner.inputs[0]
if len(x.clients) == 1:
# It mean, the input of the subtensor is used only by
# the subtensor. We do not want to move the subtensor
# to the GPU in that case.
return
coords = host_input.owner.inputs[1:]
return [GpuSubtensor(subt.idx_list)(as_cuda_ndarray_variable(x), *coords)]
if isinstance(node.op, tensor.Subtensor):
x = node.inputs[0]
if (x.owner and
isinstance(x.owner.op, HostFromGpu) and
x.dtype == "float32"):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or isinstance(n.op, GpuOp)
for n, _ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(as_cuda_ndarray_variable(node.outputs[0]))]
return
gpu_x, = x.owner.inputs
coords = node.inputs[1:]
return [host_from_gpu(GpuSubtensor(
node.op.idx_list)(gpu_x, *coords))]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.AdvancedSubtensor1])
def local_gpu_advanced_subtensor1(node):
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and \
host_input.owner.op.__class__ is tensor.AdvancedSubtensor1:
x = host_input.owner.inputs[0]
coords = host_input.owner.inputs[1:]
return [GpuAdvancedSubtensor1()(as_cuda_ndarray_variable(x), *coords)]
if node.op.__class__ is tensor.AdvancedSubtensor1:
x = node.inputs[0]
coords = node.inputs[1:]
if x.owner and isinstance(x.owner.op, HostFromGpu) and x.dtype == "float32":
gpu_x, = x.owner.inputs
return [host_from_gpu(GpuAdvancedSubtensor1()(gpu_x, *coords))]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.AdvancedIncSubtensor1])
def local_gpu_advanced_incsubtensor1(node):
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
# Should not execute for GpuAdvancedIncSubtensor1
if host_input.owner and \
host_input.owner.op.__class__ is tensor.AdvancedIncSubtensor1:
x, y = host_input.owner.inputs[0:2]
coords = host_input.owner.inputs[2:]
set_instead_of_inc = host_input.owner.op.set_instead_of_inc
if set_instead_of_inc and config.warn.gpu_set_subtensor1:
warnings.warn(
'Although your current code is fine, please note that '
'Theano versions prior to 0.6 (more specifically, '
'prior to commitd 2240bddd on March 29, 2012) may have '
'yielded an incorrect result. To remove this warning, '
'either set the `warn.gpu_set_subtensor1` config '
'option to False, or `warn.ignore_bug_before` to at '
'least \'0.6\'.', stacklevel=1)
active_device_no = theano.sandbox.cuda.active_device_number()
compute_capability = device_properties(active_device_no)['major']
if (compute_capability < 2 or
x.ndim != 2 or
y.ndim != 2):
gpu_op = GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
gpu_op = GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
return [gpu_op(as_cuda_ndarray_variable(x), as_cuda_ndarray_variable(y), *coords)]
# Should not execute for GpuAdvancedIncSubtensor1
if (node.op.__class__ is tensor.AdvancedIncSubtensor1 and
node.inputs[0].dtype == "float32" and
node.inputs[1].dtype == "float32"):
x, y = node.inputs[0:2]
coords = node.inputs[2:]
go_gpu = False
if x.owner and isinstance(x.owner.op, HostFromGpu):
go_gpu = True
gpu_x, = x.owner.inputs
else:
gpu_x = as_cuda_ndarray_variable(x)
if y.owner and isinstance(y.owner.op, HostFromGpu):
go_gpu = True
gpu_y, = y.owner.inputs
else:
gpu_y = as_cuda_ndarray_variable(y)
if go_gpu:
set_instead_of_inc = node.op.set_instead_of_inc
if set_instead_of_inc and config.warn.gpu_set_subtensor1:
warnings.warn(
'Although your current code is fine, please note that '
'Theano versions prior to 0.6 (more specifically, '
'prior to commit d2240bddd on March 29, 2012) may have '
'yielded an incorrect result. To remove this warning, '
'either set the `warn.gpu_set_subtensor1` config '
'option to False, or `warn.ignore_bug_before` to at '
'least \'0.6\'.', stacklevel=1)
active_device_no = theano.sandbox.cuda.active_device_number()
compute_capability = device_properties(active_device_no)['major']
if (compute_capability < 2 or
x.ndim != 2 or
y.ndim != 2):
gpu_op = GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
gpu_op = GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
return [host_from_gpu(gpu_op(gpu_x, gpu_y, *coords))]
return False
@register_opt()
@local_optimizer([gpu_from_host, tensor.IncSubtensor])
def local_gpu_incsubtensor(node):
if isinstance(node.op, GpuFromHost):
host_output = node.inputs[0]
if host_output.owner and \
type(host_output.owner.op) == tensor.IncSubtensor:
incsubt = host_output.owner.op
x, y = host_output.owner.inputs[0:2]
coords = host_output.owner.inputs[2:]
return [GpuIncSubtensor(
incsubt.idx_list,
inplace=incsubt.inplace,
set_instead_of_inc=incsubt.set_instead_of_inc)(
as_cuda_ndarray_variable(x),
as_cuda_ndarray_variable(y),
*coords)]
# Incrementing a float32 x results in a float32
# output even if y is float64, so we can downcast
# y to put it on GPU
if type(node.op) == tensor.IncSubtensor and \
node.inputs[0].dtype == "float32":
x, y = node.inputs[0:2]
assert isinstance(x.type, tensor.TensorType)
assert isinstance(y.type, tensor.TensorType)
coords = node.inputs[2:]
go_gpu = False
if x.owner and isinstance(x.owner.op, HostFromGpu):
go_gpu = True
gpu_x, = x.owner.inputs
else:
gpu_x = as_cuda_ndarray_variable(x)
if y.owner and isinstance(y.owner.op, HostFromGpu):
go_gpu = True
gpu_y, = y.owner.inputs
else:
if y.dtype != 'float32':
y = tensor.cast(y, 'float32')
gpu_y = as_cuda_ndarray_variable(y)
if go_gpu:
return [host_from_gpu(GpuIncSubtensor(
node.op.idx_list, inplace=node.op.inplace,
set_instead_of_inc=node.op.set_instead_of_inc)(
gpu_x, gpu_y, *coords))]
return False
@register_opt()
@local_optimizer([tensor.Shape])
def local_gpu_shape(node):
if isinstance(node.op, tensor.Shape):
x, = node.inputs
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
return [gpu_shape(gpu_x)]
return False
@register_opt()
@local_optimizer([tensor.Rebroadcast])
def local_gpu_rebroadcast(node):
'''rebroadcast(host_from_gpu(x)) -> host_from_gpu(rebroadcast(x))'''
if isinstance(node.op, tensor.Rebroadcast):
x, = node.inputs
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
return [host_from_gpu(node.op(gpu_x))]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, numpy.asarray(cnda))
@register_opt()
@local_optimizer([tensor.printing.Print])
def local_gpu_print_op(node):
if isinstance(node.op, tensor.printing.Print):
x, = node.inputs
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
new_op = node.op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = node.op
return [host_from_gpu(new_op(gpu_x))]
return False
@register_opt()
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i+1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = any([c[0] != "output" and
isinstance(c[0].op, GpuFromHost)
for c in out.clients])
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(gpu_from_host(inp))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(host_from_gpu(new_op_outputs[i]))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
def cast(x, dtype):
stype = scal.Scalar(dtype)
cast_op = theano.tensor.Elemwise(scal.Identity(scal.specific_out(stype)))
return cast_op(x)
import theano.tensor.nnet
@register_opt()
@local_optimizer([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias])
def local_gpu_crossentorpy_softmax_argmax_1hot_with_bias(node):
if isinstance(node.op, tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias):
x, b, y = node.inputs
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
# if y is a cast to integers, we can go to the underlying
# thing if we want, since this gpu op will cast to integers
# internally anyway
int_cast_ops = (
tensor.basic._convert_to_int32,
tensor.basic._convert_to_int8,
tensor.basic._convert_to_int16,
tensor.basic._convert_to_int64,
)
while y.owner and y.owner.op in int_cast_ops:
y = y.owner.inputs[0]
gpu_nll, gpu_sm, gpu_am = \
GpuCrossentropySoftmaxArgmax1HotWithBias()(
gpu_x,
as_cuda_ndarray_variable(b),
as_cuda_ndarray_variable(cast(y, 'float32')))
am_dtype = node.outputs[2].type.dtype
return [host_from_gpu(gpu_nll),
host_from_gpu(gpu_sm),
cast(host_from_gpu(gpu_am), am_dtype)]
return False
@register_opt()
@local_optimizer([tensor.nnet.CrossentropySoftmax1HotWithBiasDx])
def local_gpu_crossentorpy_softmax_1hot_with_bias_dx(node):
if isinstance(node.op, tensor.nnet.CrossentropySoftmax1HotWithBiasDx):
dnll, sm, yidx = node.inputs
if sm.owner and isinstance(sm.owner.op, HostFromGpu):
gpu_sm, = sm.owner.inputs
gpu_dx = GpuCrossentropySoftmax1HotWithBiasDx()(
as_cuda_ndarray_variable(dnll),
gpu_sm,
as_cuda_ndarray_variable(cast(yidx, 'float32')))
return [host_from_gpu(gpu_dx)]
return False
@register_opt()
@local_optimizer([tensor.nnet.Softmax])
def local_gpu_softmax(node):
if isinstance(node.op, tensor.nnet.Softmax):
x, = node.inputs
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
gpu_sm = GpuSoftmax()(gpu_x)
return [host_from_gpu(gpu_sm)]
return False
@register_opt()
@local_optimizer([tensor.nnet.SoftmaxWithBias])
def local_gpu_softmax_with_bias(node):
if isinstance(node.op, tensor.nnet.SoftmaxWithBias):
x, b = node.inputs
x_on_gpu = x.owner and isinstance(x.owner.op, HostFromGpu)
b_on_gpu = b.owner and isinstance(b.owner.op, HostFromGpu)
if x_on_gpu or b_on_gpu:
gpu_sm = GpuSoftmaxWithBias()(as_cuda_ndarray_variable(x), as_cuda_ndarray_variable(b))
return [host_from_gpu(gpu_sm)]
return False
# Convolution
from theano.tensor.nnet import conv
def _gpu_conv_to_fftconv(node):
# shared helper function for local_conv_fft_valid and local_conv_fft_full.
# we import conv2d_fft locally to avoid pycuda warnings
from theano.sandbox.cuda.fftconv import conv2d_fft
kwargs = {'border_mode': node.op.border_mode}
if (node.op.imshp is not None and
node.op.imshp[-1] is not None and
node.op.imshp[-1] % 2 == 1):
kwargs['pad_last_dim'] = True
# If the user supplied the full nonsymbolic image_shape and
# filter_shape in conv2d(), we can pass it on to conv2d_fft().
if ((node.op.imshp is not None) and
(len(node.op.imshp) == 3) and
(None not in node.op.imshp) and
(node.op.bsize is not None)):
kwargs['image_shape'] = (node.op.bsize,) + node.op.imshp
if ((node.op.kshp is not None) and
(None not in node.op.kshp) and
(node.op.nkern is not None) and
(len(node.op.imshp) == 3) and
(node.op.imshp[0] is not None)):
kwargs['filter_shape'] = (node.op.nkern, node.op.imshp[0]) + node.op.kshp
rval = conv2d_fft(node.inputs[0], node.inputs[1], **kwargs)
if node.outputs[0].broadcastable != rval.broadcastable:
# With given shape information, conv2d_fft may return a different
# broadcast pattern than GpuConv. This is forbidden, so we fix it.
rval = tensor.patternbroadcast(
rval, node.outputs[0].type.broadcastable)
return rval
@local_optimizer([GpuConv])
def local_conv_fft_valid(node):
if isinstance(node.op, GpuConv):
if (node.op.border_mode == 'valid' and
node.op.subsample == (1, 1) and
node.op.fft_opt):
return [_gpu_conv_to_fftconv(node)]
return False
@local_optimizer([GpuConv])
def local_conv_fft_full(node):
if isinstance(node.op, GpuConv):
if (node.op.border_mode == 'full' and
node.op.subsample == (1, 1) and
node.op.fft_opt):
return [_gpu_conv_to_fftconv(node)]
return
def values_eq_approx_high_tol(a, b):
"""This fct is needed to don't have DebugMode raise useless
error due to ronding error.
This happen as We reduce on the two last dimensions, so this
can raise the absolute error if the number of element we
reduce on is significant.
"""
assert a.ndim == 4
atol = None
if a.shape[-1] * a.shape[-2] > 100:
# For float32 the default atol is 1e-5
atol = 3e-5
return CudaNdarrayType.values_eq_approx(a, b, atol=atol)
@local_optimizer([gpu_from_host, conv.ConvOp])
def local_gpu_conv(node):
"""
gpu_from_host(conv) -> gpu_conv(gpu_from_host)
conv(host_from_gpu) -> host_from_gpu(gpu_conv)
"""
def GpuConvOp_from_ConvOp(op):
logical_img_hw = None
if op.kshp_logical is not None and op.kshp_logical != op.kshp:
return None
# print op.kshp, op.imshp[1:3]
# print op.kshp_logical, logical_img_hw
ret = GpuConv(border_mode=op.out_mode,
subsample=(op.dx, op.dy),
logical_img_hw=logical_img_hw,
logical_kern_hw=op.kshp_logical,
logical_kern_align_top=op.kshp_logical_top_aligned,
kshp=op.kshp,
version=op.version,
direction_hint=op.direction_hint,
verbose=op.verbose,
imshp=op.imshp,
nkern=op.nkern,
bsize=op.bsize,
fft_opt=op.fft_opt
)
if op.imshp_logical is not None:
logical_img_hw = op.imshp_logical[1:3]
if logical_img_hw != op.imshp[1:3]:
# this case is not implemented
# return None
rstride = int(numpy.ceil(op.imshp_logical[1] /
float(op.imshp[1])))
cstride = int(numpy.ceil(op.imshp_logical[2] /
float(op.imshp[2])))
def make_graph(img, kern):
buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),
img.shape[0], *op.imshp_logical)
img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],
img)
img = gpu_from_host(img)
return ret(img, kern)
return make_graph
return ret
if isinstance(node.op, GpuFromHost):
# gpu_from_host(conv) -> gpu_conv(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, conv.ConvOp):
gpu_conv = GpuConvOp_from_ConvOp(host_input.owner.op)
if gpu_conv is None:
return
img, kern = host_input.owner.inputs
out = gpu_conv(gpu_from_host(img),
gpu_from_host(kern))
out = tensor.patternbroadcast(out,
node.outputs[0].broadcastable)
out.values_eq_approx = values_eq_approx_high_tol
# in some case the ConvOp broadcast the last 2 dimensions
# differently then the gpu ConvOp
return [out]
if isinstance(node.op, conv.ConvOp):
# conv(host_from_gpu) -> host_from_gpu(gpu_conv)
img, kern = node.inputs
img_on_gpu = (img.owner and isinstance(img.owner.op, HostFromGpu))
kern_on_gpu = (kern.owner and isinstance(kern.owner.op, HostFromGpu))
if img_on_gpu or kern_on_gpu:
gpu_conv = GpuConvOp_from_ConvOp(node.op)
if gpu_conv is None:
return
out = gpu_conv(gpu_from_host(img),
gpu_from_host(kern))
out = tensor.patternbroadcast(
host_from_gpu(out),
node.outputs[0].broadcastable)
out.values_eq_approx = values_eq_approx_high_tol
# in some case the ConvOp broadcast the last 2 dimensions
# differently then the gpu ConvOp
return [out]
@local_optimizer([GpuConv])
def local_conv_gemm(node):
if (isinstance(node.op, GpuConv) and
node.op.border_mode in ['full', 'valid']):
img, kern = node.inputs
border_mode = node.op.border_mode
subsample = node.op.subsample
if (border_mode == 'valid') or (subsample != (1, 1)):
# need to flip the kernel for valid convolution
kern = kern[:, :, ::-1, ::-1]
# By default use GpuCorrMM
rval = GpuCorrMM(border_mode, subsample)(
gpu_contiguous(img), gpu_contiguous(kern))
# call GpuCorrMM_gradWeights if good
# (the latter is faster if batchsize * kernelHeight * kernelWidth
# is larger than inputChannels * outputHeight * outputWidth.
# GpuConv does not always store information on the batchsize and
# channels, though, so we only use what information we have.)
if ((subsample == (1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-2:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp)):
# we know the kernel and output size
prod1 = node.op.kshp[0] * node.op.kshp[1]
prod2 = ((node.op.imshp[-2] - node.op.kshp[0] + 1) *
(node.op.imshp[-1] - node.op.kshp[1] + 1))
if ((node.op.bsize is not None) and
(len(node.op.imshp) == 3) and
(node.op.imshp[0] is not None)):
# we also know batchsize and input channels
prod1 *= node.op.bsize
prod2 *= node.op.imshp[0]
# compare to decide
if prod1 > prod2:
# (we need to wrap the result in as_cuda_ndarray_variable,
# because we are not allowed to replace a CudaNdarray with
# a DimShuffle instance in a graph optimization)
rval = theano.sandbox.cuda.as_cuda_ndarray_variable(
GpuCorrMM_gradWeights(border_mode, subsample)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3))
).dimshuffle(1, 0, 2, 3))
elif (border_mode == 'full'):
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(1, 0, 2, 3)
# call GpuCorrMM_gradInputs
rval = GpuCorrMM_gradInputs('valid', subsample)(
gpu_contiguous(kern), gpu_contiguous(img))
if node.outputs[0].broadcastable != rval.broadcastable:
# With given shape information, conv2d_fft may return a different
# broadcast pattern than GpuConv. This is forbidden, so we fix it.
rval = tensor.patternbroadcast(
rval, node.outputs[0].type.broadcastable)
return [rval]
# First we register the optimizer that moves convolutions to the GPU.
register_opt()(local_gpu_conv)
# Then we create a group of optimizers that replace the legacy GpuConv
# with other implementations. They are tried in a specific order so we
# can control which ones take precedence over others.
conv_groupopt = theano.gof.optdb.LocalGroupDB()
conv_groupopt.__name__ = "gpu_conv_opts"
register_opt()(conv_groupopt)
# FFT gets the highest priority (lowest number), but is disabled by default.
# It can be enabled by including 'conv_fft'.
conv_groupopt.register('conv_fft_valid', local_conv_fft_valid, 10,
'conv_fft')
conv_groupopt.register('conv_fft_full', local_conv_fft_full, 10,
'conv_fft')
# cuDNN is the second, but only registered if cuDNN is available.
# It can be disabled by excluding 'conv_dnn' or 'cudnn'.
from . import dnn
# We can't check at import if dnn is available, so we must always
# register it. This do not cause problem as if it is not avail, the
# opt will do nothing.
conv_groupopt.register('local_conv_dnn', dnn.local_conv_dnn, 20,
'conv_dnn',
'fast_compile', 'fast_run', 'cudnn')
# The GEMM-based convolution comes last to catch all remaining cases.
# It can be disabled by excluding 'conv_gemm'.
conv_groupopt.register('local_conv_gemm', local_conv_gemm, 30,
'conv_gemm',
'fast_compile', 'fast_run')
class LocalCudaMetaOptimizer(LocalMetaOptimizer):
"""Base class for CUDA-based LocalMetaOptimizers"""
def time_call(self, fn):
# Override time_call() to do device synchronization
theano.sandbox.cuda.synchronize()
start = time.time()
fn()
theano.sandbox.cuda.synchronize()
return time.time() - start
# Convolution Meta-optimizer
class ConvMetaOptimizer(LocalCudaMetaOptimizer):
def __init__(self, optimizers):
super(ConvMetaOptimizer, self).__init__([GpuConv], optimizers)
def provide_inputs(self, node, inputs):
# We need to provide dummy data for the given inputs.
# We can make use of the fact that GpuConv often knows its shapes.
result = {}
img, kern = node.inputs
# provide dummy image and filters if needed
vars = (img, kern)
if node.op.imshp is not None and len(node.op.imshp) == 3:
nchannels = node.op.imshp[0]
else:
nchannels = None
shapes = ((node.op.bsize,) + node.op.imshp,
(node.op.nkern, nchannels) + node.op.kshp)
for (var, shape) in zip(vars, shapes):
if ((var in inputs) and
(shape is not None) and
not any(s is None for s in shape)):
result[var] = theano.shared(
# TODO: Use var.type.filter when cuda_ndarray.filter supports non-strict casts
# var.type.filter(numpy.random.randn(*shape),
# allow_downcast=True),
numpy.require(numpy.random.randn(*shape),
dtype=var.dtype),
var.name,
broadcastable=var.broadcastable,
borrow=True)
# return mapping
return result
# We just register all optimizers from conv_groupopt with the metaoptimizer
conv_metaopt = ConvMetaOptimizer(
conv_groupopt.query(*['+' + name for name in conv_groupopt._names]).opts)
# Then we add some optimizers that try less obvious options
conv_metaopt.register(dnn.local_conv_dnn_alternative)
# Finally, we register the metaoptimizer as the first optimizer in conv_groupopt
conv_groupopt.register('conv_meta', conv_metaopt, 0)
@local_optimizer([Conv3D])
def local_conv3d_fft(node):
if not isinstance(node.op, Conv3D):
return
try:
stride_x = tensor.get_scalar_constant_value(node.inputs[3][0])
stride_y = tensor.get_scalar_constant_value(node.inputs[3][1])
stride_z = tensor.get_scalar_constant_value(node.inputs[3][2])
except tensor.NotScalarConstantError:
return False
if (stride_x, stride_y, stride_z) == (1, 1, 1):
# we import conv3d_fft locally to avoid pycuda warnings
from theano.sandbox.cuda.fftconv import conv3d_fft
# Shuffle inputs signal from (b, 0, 1, t, c) to (b, c, 0, 1, t)
x = node.inputs[0]
x = gpu_from_host(x.dimshuffle(0, 4, 1, 2, 3))
# Shuffle filters from (oc, 0, 1, t, ic) to (oc, ic, 0, 1, t)
f = node.inputs[1]
f = gpu_from_host(f.dimshuffle(0, 4, 1, 2, 3))
# filter flip
f = f[:, :, ::-1, ::-1, ::-1]
rval = conv3d_fft(x, f, border_mode='valid', pad_last_dim=True)
# Shuffle from (oc, c, 0, 1, t) to (oc, 0, 1, t, c)
return [rval.dimshuffle(0, 2, 3, 4, 1) + node.inputs[2]]
gpu_optimizer.register("conv3d_fft", local_conv3d_fft)
from theano.tensor.nnet.ConvGrad3D import ConvGrad3D
@local_optimizer([ConvGrad3D])
def local_convgrad3d_fft(node):
try:
stride_x = tensor.get_scalar_constant_value(node.inputs[1][0])
stride_y = tensor.get_scalar_constant_value(node.inputs[1][1])
stride_z = tensor.get_scalar_constant_value(node.inputs[1][2])
except tensor.NotScalarConstantError:
return False
if (isinstance(node.op, ConvGrad3D) and
(stride_x, stride_y, stride_z) == (1, 1, 1)):
# we import conv3d_fft locally to avoid pycuda warnings
from theano.sandbox.cuda.fftconv import conv3d_fft
# Shuffle inputs signal from (b, 0, 1, t, ic) to (ic, b, 0, 1, t)
x = node.inputs[0]
x = x.dimshuffle(4, 0, 1, 2, 3)
# Shuffle dCdH from (b, 0, 1, t, oc) to (oc, b, 0, 1, t)
f = node.inputs[3]
f = f.dimshuffle(4, 0, 1, 2, 3)
# filter flip
f = f[:, :, ::-1, ::-1, ::-1]
rval = conv3d_fft(x, f, border_mode='valid', pad_last_dim=True)
# Shuffle from (ic, oc, 0, 1, t) to (oc, 0, 1, t, ic)
return [rval.dimshuffle(1, 2, 3, 4, 0)]
gpu_optimizer.register("convgrad3d_fft", local_convgrad3d_fft)
from theano.tensor.nnet.ConvTransp3D import ConvTransp3D
@local_optimizer([ConvTransp3D])
def local_convtransp3d_fft(node):
try:
stride_x = tensor.get_scalar_constant_value(node.inputs[2][0])
stride_y = tensor.get_scalar_constant_value(node.inputs[2][1])
stride_z = tensor.get_scalar_constant_value(node.inputs[2][2])
except tensor.NotScalarConstantError:
return False
if (isinstance(node.op, ConvTransp3D) and
(stride_x, stride_y, stride_z) == (1, 1, 1)):
# we import conv3d_fft locally to avoid pycuda warnings
from theano.sandbox.cuda.fftconv import conv3d_fft
# Shuffle filters from (oc, 0, 1, t, ic) to (ic, oc, 0, 1, t)
x = node.inputs[0]
x = x.dimshuffle(4, 0, 1, 2, 3)
# Shuffle dCdH from (b, 0, 1, t, oc) to (b, oc, 0, 1, t)
f = node.inputs[3]
f = f.dimshuffle(0, 4, 1, 2, 3)
rval = conv3d_fft(f, x, border_mode='full', pad_last_dim=True)
# Shuffle from (ic, b, 0, 1, t) to (b, 0, 1, t, ic)
return [rval.dimshuffle(0, 2, 3, 4, 1) + node.inputs[1]]
gpu_optimizer.register("convtransp3d_fft", local_convtransp3d_fft)
@local_optimizer([Conv3D])
def local_conv3d_gemm(node):
if not isinstance(node.op, Conv3D):
return
try:
sx = tensor.get_scalar_constant_value(node.inputs[3][0])
sy = tensor.get_scalar_constant_value(node.inputs[3][1])
sz = tensor.get_scalar_constant_value(node.inputs[3][2])
except tensor.NotScalarConstantError:
return False
if isinstance(node.op, Conv3D):
# Shuffle inputs signal from (b, 0, 1, t, c) to (b, c, 0, 1, t)
x = node.inputs[0]
x = x.dimshuffle(0, 4, 1, 2, 3)
# Shuffle filters from (oc, 0, 1, t, ic) to (oc, ic, 0, 1, t)
f = node.inputs[1]
f = f.dimshuffle(0, 4, 1, 2, 3)
rval = GpuCorr3dMM(border_mode='valid', subsample=(sx, sy, sz))(x, f)
# Shuffle from (oc, c, 0, 1, t) to (oc, 0, 1, t, c)
return [rval.dimshuffle(0, 2, 3, 4, 1) + node.inputs[2]]
gpu_optimizer.register("conv3d_gemm", local_conv3d_gemm)
@local_optimizer([ConvGrad3D])
def local_convgrad3d_gemm(node):
try:
sx = tensor.get_scalar_constant_value(node.inputs[1][0])
sy = tensor.get_scalar_constant_value(node.inputs[1][1])
sz = tensor.get_scalar_constant_value(node.inputs[1][2])
except tensor.NotScalarConstantError:
return False
if isinstance(node.op, ConvGrad3D):
# Shuffle inputs signal from (b, 0, 1, t, c) to (b, c, 0, 1, t)
x = node.inputs[0]
x = gpu_contiguous(x.dimshuffle(0, 4, 1, 2, 3))
# Shuffle dCdH from (b, 0, 1, t, oc) to (oc, b, 0, 1, t)
f = node.inputs[3]
f = gpu_contiguous(f.dimshuffle(0, 4, 1, 2, 3))
rval = GpuCorr3dMM_gradWeights(subsample=(sx, sy, sz))(x, f,
shape=node.inputs[2][1:4])
# Shuffle from (ic, oc, 0, 1, t) to (oc, 0, 1, t, ic)
return [rval.dimshuffle(0, 2, 3, 4, 1)]
gpu_optimizer.register("convgrad3d_gemm", local_convgrad3d_gemm)
@local_optimizer([ConvTransp3D])
def local_convtransp3d_gemm(node):
try:
sx = tensor.get_scalar_constant_value(node.inputs[2][0])
sy = tensor.get_scalar_constant_value(node.inputs[2][1])
sz = tensor.get_scalar_constant_value(node.inputs[2][2])
except tensor.NotScalarConstantError:
return False
if isinstance(node.op, ConvTransp3D) and (sx, sy, sz) == (1, 1, 1):
# Shuffle filters from (oc, 0, 1, t, ic) to (ic, oc, 0, 1, t)
x = node.inputs[0]
x = gpu_contiguous(x.dimshuffle(0, 4, 1, 2, 3))
# Shuffle dCdH from (b, 0, 1, t, oc) to (b, oc, 0, 1, t)
f = node.inputs[3]
f = gpu_contiguous(f.dimshuffle(0, 4, 1, 2, 3))
rval = GpuCorr3dMM_gradInputs(subsample=(sx, sy, sz))(kern=x, topgrad=f)
# Shuffle from (ic, b, 0, 1, t) to (b, 0, 1, t, ic)
return [rval.dimshuffle(0, 2, 3, 4, 1) + node.inputs[1]]
gpu_optimizer.register("convtransp3d_gemm", local_convtransp3d_gemm)
# Pooling
import theano.tensor.signal.downsample as downsample
@register_opt()
@local_optimizer([downsample.DownsampleFactorMax])
def local_gpu_downsample_factor_max(node):
if (isinstance(node.op, downsample.DownsampleFactorMax)
and node.op.ds == node.op.st):
assert node.op.__props__ == ('ds', 'ignore_border', 'st', 'padding',
'mode')
if node.op.padding != (0, 0) or node.op.mode != 'max':
return
x, = node.inputs
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_ds = GpuDownsampleFactorMax(node.op.ds, node.op.ignore_border)
return [host_from_gpu(gpu_ds(x.owner.inputs[0]))]
@register_opt()
@local_optimizer([downsample.DownsampleFactorMaxGrad])
def local_gpu_downsample_factor_max_grad(node):
if (isinstance(node.op, downsample.DownsampleFactorMaxGrad) and
node.op.ds == node.op.st):
assert node.op.__props__ == ('ds', 'ignore_border', 'st', 'padding',
'mode')
if (node.op.padding != (0, 0) or
node.op.mode != 'max' or
node.op.st != node.op.ds):
return
x, z, gz = node.inputs
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_ds_grad = GpuDownsampleFactorMaxGrad(node.op.ds,
node.op.ignore_border)
return [host_from_gpu(gpu_ds_grad(x.owner.inputs[0],
as_cuda_ndarray_variable(z),
as_cuda_ndarray_variable(gz)))]
@register_opt()
@local_optimizer([downsample.DownsampleFactorMaxGradGrad])
def local_gpu_downsample_factor_max_grad_grad(node):
if isinstance(node.op, downsample.DownsampleFactorMaxGradGrad):
assert node.op.__props__ == ('ds', 'ignore_border', 'st',
'padding', 'mode')
if node.op.padding != (0, 0) or node.op.mode != 'max':
return
x, z, gx = node.inputs
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
op = GpuDownsampleFactorMaxGradGrad(node.op.ds,
node.op.ignore_border)
return [host_from_gpu(op(x.owner.inputs[0],
as_cuda_ndarray_variable(z),
as_cuda_ndarray_variable(gx)))]
from theano.sandbox.cuda.basic_ops import gpu_join, GpuJoin
@register_opt()
@local_optimizer([tensor.Join])
def local_gpu_join(node):
"""
Inspired by the opt for convop.
Very loose notation follows.
Subgraphs concerned first look like
[array of HostTensor] -> HostToGpu -> GpuToHost
-> Join -> HostToGpu -> GpuToHost
First we apply this Opt:
join(host_from_gpu) -> host_from_gpu(gpu_join)
then, as an intermediate result, there should be
host_from_gpu(gpu_join) -> HostToGpu -> GpuToHost
this unnecessary GpuToHost -> HostToGpu should be removed
by other opts, leaving us with
host_from_gpu(gpu_join)
For intermediate places in the graph not covered by the first opt, the
following could be useful:
gpu_from_host(join) -> gpu_join(gpu_from_host)
not implemented yet.
"""
if isinstance(node.op, tensor.Join):
# optimizing this case:
# join(host_from_gpu) -> host_from_gpu(gpu_join)
# print "OPT: we've got a Join instance"
axis_and_tensors = node.inputs
# print "OPT: axis_and_tensors=", axis_and_tensors
matches = [(not t.owner is None and isinstance(t.owner.op, HostFromGpu)) or
isinstance(t, gof.Constant) for t in axis_and_tensors[1:]]
# print "OPT: matches =", matches
# if all input tensors are host_from_gpu'ified
if all(matches):
# the extra gpu_from_host introduced here will
# be removed by further optimizations
new_tensors = [as_cuda_ndarray_variable(t) for t in axis_and_tensors[1:]]
new_a_and_t = [axis_and_tensors[0]] + new_tensors
replacement_node = host_from_gpu(gpu_join(*new_a_and_t))
# print "OPT: replacement_node", replacement_node
return [replacement_node]
# This is a copy of the same opt in tensor to make the tests happy,
# but I'm not convinced it is actually needed.
@register_opt()
@local_optimizer([GpuJoin])
def local_gpujoin_1(node):
tensors = node.inputs[1:]
if len(tensors) == 1:
return [tensors[0]]
# Commented out because it can result in
# shared = dimshuffle(gemm_inplace(dimshuffle(shared)))
# which causes memory leaks (long term fix is to make the above not leak
# memory)
@local_optimizer([gpu_gemm_no_inplace], inplace=True)
def local_inplace_gemm(node):
if node.op == gpu_gemm_no_inplace:
return [gpu_gemm_inplace(*node.inputs)]
@local_optimizer([gpu_gemv_no_inplace], inplace=True)
def local_inplace_gemv(node):
if node.op == gpu_gemv_no_inplace:
return [gpu_gemv_inplace(*node.inputs)]
@local_optimizer([gpu_ger_no_inplace], inplace=True)
def local_inplace_ger(node):
if node.op == gpu_ger_no_inplace:
return [gpu_ger_inplace(*node.inputs)]
# After destroyhandler is in but before we try to make elemwise things inplace
# Try to make gpu gemm inplace
# Also, need to make the gemm optimisation(step 70) happen before the fusion of
# elemwise(step 71)
optdb.register('InplaceGpuBlasOpt',
tensor.opt.in2out(local_inplace_gemm,
local_inplace_gemv,
local_inplace_ger,
name="InplaceGpuBlasOpt"),
70.0, 'fast_run', 'inplace', 'gpu')
def get_device_type_sizes():
"""
:return:(gpu ptr size, cpu ptr size, int sizes(gpu and cpu))
:return type: tuple
"""
if hasattr(get_device_type_sizes, 'rval'):
return get_device_type_sizes.rval
gpu_ptr_size = 8
cpu_ptr_size = 8
int_size = 8
try:
cuda_ndarray = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray
t = cuda_ndarray.ptr_int_size()
gpu_ptr_size, cpu_ptr_size, int_size, gpu_int_size = t
assert int_size == gpu_int_size, (int_size, gpu_int_size)
del gpu_int_size
del t
except Exception as e:
_logger.warning(("Optimization Warning: "
"Got the following error, but you can ignore it. "
"This could cause less GpuElemwise fused together.\n"
"%s") % e)
rval = get_device_type_sizes.rval = dict(gpu_ptr_size=gpu_ptr_size,
cpu_ptr_size=cpu_ptr_size,
int_size=int_size)
return rval
def max_inputs_to_GpuElemwise(node):
"""
return the maximum number of inputs this GpuElemwise Apply node can
accept.
This is needed as currently there is a limit of 256 bytes of
parameter for the gpu function on devices with compute capability
1.x. There is a 4 kbyte limit on devices with compute capability
2.x (not used).
This measures the number of parameters we put in our GPU function and
computes the maximum number of inputs that respect the 256 byte
limit.
"""
type_sizes = get_device_type_sizes()
int_size = type_sizes['int_size']
gpu_ptr_size = type_sizes['gpu_ptr_size']
# some bytes are used for block and thread coords etc.
argument_limit = 232
ndim = node.inputs[0].type.ndim
size_param_mandatory = int_size # for numels
size_param_mandatory += int_size * ndim # for the shape
size_param_mandatory += sum((gpu_ptr_size + int_size * ndim)
for i in node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_inputs = (ndim * int_size) + gpu_ptr_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_inputs
# There is a case this don't algorithm doesn't work. Is this related to
# the order of the parameters to the gpu function?
if node.inputs[0].type.ndim == 1 and max_nb_inputs > 14:
return 14
return max_nb_inputs
def split_huge_add_or_mul(node):
"""
For add and mul, it can happen that we have too much input
That will make nvcc fail compilation of our current code.
We don't want node in the graph that can't execute
as this break DebugMode.
This should not happen for other GpuElemwise as their is only the fusion
that can generate op with too much input and it check for that.
"""
if node.op.scalar_op in (scal.add, scal.mul):
max_nb_inputs = max_inputs_to_GpuElemwise(node)
if max_nb_inputs <= 1 and len(node.inputs) > 1:
return False
while len(node.inputs) > max_nb_inputs:
inner_op = []
for i in xrange(0,
len(node.inputs),
max_nb_inputs):
inner_op.append(node.op(*node.inputs[i: i + max_nb_inputs]))
node = node.op(*inner_op).owner
return node
# GpuElemwise fusion
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
if config.gpu.local_elemwise_fusion:
_logger.debug("enabling optimization fusion of gpu elemwise in fast_run")
# Must be after cpu fusion at 40, gpu at 48.5 and before AddDestroyHandler at 49.5
optdb.register('gpu_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion),
49, 'fast_run', 'fusion',
'local_elemwise_fusion', 'gpu')
else:
_logger.debug(("not enabling optimization fusion of gpu elemwise in "
"fast_run"))
optdb.register('gpu_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion),
71.00, 'fusion', 'local_elemwise_fusion')
# GpuElemwise inplace
gpu_inplace_elemwise_optimizer = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
# DO NOT PLACE add a 'gpu' tag here! This would enable it in fast_compile.
# It still will be run in fast_run with device=gpu with the current tag.
optdb.register('gpu_inplace_elemwise_opt', gpu_inplace_elemwise_optimizer, 75,
'fast_run', 'inplace', 'gpu_inplace')
register_opt()(tensor.opt.local_remove_useless_assert)
register_opt()(tensor.opt.local_shape_to_shape_i)
gpu_elemwise_alloc = gof.local_optimizer([GpuElemwise])(
tensor.opt.local_elemwise_alloc_op(GpuElemwise, GpuAlloc, GpuDimShuffle)
)
register_opt()(gpu_elemwise_alloc)
register_opt()(tensor.opt.local_useless_elemwise) # needed by gpu_elemwise_alloc
tensor.opt.register_specialize_device(gpu_elemwise_alloc)
@register_opt()
@local_optimizer([tensor.alloc])
def local_gpualloc(node):
replace = False
if node.op == tensor.alloc:
if node.inputs[0].owner and \
isinstance(node.inputs[0].owner.op, HostFromGpu):
replace = True
elif all([c != 'output' and c.op == gpu_from_host
for c, idx in node.outputs[0].clients]):
# if all clients are on gpu
replace = True
elif all([c != 'output' and
c.op == tensor.join and
all([i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:]])
for c, idx in node.outputs[0].clients]):
# if the client is a subtensor with input on gpu or alloc
replace = True
if replace and node.inputs[0].dtype != 'float32':
replace = False
if replace:
val = node.inputs[0]
shp = node.inputs[1:]
old_out = node.outputs[0]
new_out = host_from_gpu(gpu_alloc(val, *shp))
# Sigh. it's an annoying thing about theano
# that you can't add information to the graph.
# If for some reason it has come to light that
# one of the dimensions is broadcastable, we have to hide that
# or the optimization won't go through.
if new_out.type != old_out.type:
assert new_out.type.ndim == old_out.type.ndim
assert new_out.type.dtype == old_out.type.dtype
# it seems to have happened that new_out has some broadcastable
# dimensions that old_out did not have
for b_old, b_new in zip(old_out.type.broadcastable,
new_out.type.broadcastable):
assert b_new or (not b_old)
new_out = tensor.patternbroadcast(new_out, old_out.broadcastable)
# if old_out.type != new_out.type:
#import pdb; pdb.set_trace()
return [new_out]
@register_opt()
@local_optimizer([theano.tensor.opt.Assert])
def local_assert(node):
if (isinstance(node.op, theano.tensor.opt.Assert) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op,
HostFromGpu)):
return [host_from_gpu(node.op(node.inputs[0].owner.inputs[0],
*node.inputs[1:]))]
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, CudaNdarrayConstant) and
inp.data.size == 1 and
(numpy.asarray(inp.data) == 0).all()):
new_out = GpuAlloc(memset_0=True)(*node.inputs)
old_bcast = node.outputs[0].type.broadcastable
if new_out.type.broadcastable != old_bcast:
# check that we did not try discarding a broadcastable dimension
assert not any(b_old and not b_new for b_old, b_new in zip(
old_bcast, new_out.type.broadcastable))
# force old broadcasting pattern; we must not change it here
new_out = tensor.patternbroadcast(new_out, old_bcast)
return [new_out]
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt()
@local_optimizer([gpu_from_host, tensor.Eye])
def local_gpu_eye(node):
"""
gpu_from_host(eye) -> gpueye(gpu_from_host)
eye(host_from_gpu) -> host_from_gpu(gpueye)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and
isinstance(host_input.owner.op, tensor.Eye) and
host_input.owner.op.dtype == "float32"):
return [gpu_eye(*host_input.owner.inputs)]
if isinstance(node.op, tensor.Eye) and node.op.dtype == "float32":
if any([(i.owner and isinstance(i.owner.op, HostFromGpu))
for i in node.inputs]):
return [host_from_gpu(gpu_eye(*node.inputs))]
return False
def safe_to_gpu(x):
if (isinstance(x.type, tensor.TensorType) and
x.type.dtype == 'float32'):
return as_cuda_ndarray_variable(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, CudaNdarrayType):
return host_from_gpu(x)
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name ( old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those ( in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
def tensor_to_cuda(x):
if (isinstance(x.type, tensor.TensorType) and
x.type.dtype == 'float32'):
y = CudaNdarrayType(broadcastable=x.type.broadcastable)()
if x.name:
y.name = x.name + '[cuda]'
return y
else:
return x
@register_opt()
@local_optimizer([nlinalg.ExtractDiag])
def local_gpu_extract_diagonal(node):
"""
extract_diagonal(host_from_gpu()) -> host_from_gpu(extract_diagonal)
gpu_from_host(extract_diagonal) -> extract_diagonal(gpu_from_host)
"""
if (isinstance(node.op, nlinalg.ExtractDiag) and
isinstance(node.inputs[0].type,
theano.tensor.TensorType)):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, HostFromGpu):
return [host_from_gpu(nlinalg.extract_diag(as_cuda_ndarray_variable(inp)))]
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and
isinstance(host_input.owner.op, nlinalg.ExtractDiag) and
isinstance(host_input.owner.inputs[0].type,
theano.tensor.TensorType)):
diag_node = host_input.owner
return [nlinalg.extract_diag(
as_cuda_ndarray_variable(diag_node.inputs[0]))]
return False
def typeConstructor(broadcastable, dtype):
if dtype == 'float32':
return CudaNdarrayType(broadcastable=broadcastable)
else:
return tensor.TensorType(broadcastable=broadcastable, dtype=dtype)
@register_opt('scan')
@local_optimizer([gpu_from_host, scan_op.Scan])
def gpuScanOptimization(node):
"""
scan(host_from_gpu) -> host_from_gpu(GPUscan)
gpu_from_host(scan) -> GPUscan(gpu_from_host)
"""
# gpu_from_host(scan) -> GPUscan(gpu_from_host)
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and
isinstance(host_input.owner.op, scan_op.Scan) and
not host_input.owner.op.info['gpu'] and
len(host_input.owner.outputs) == 1):
# Note that we are not doing the right thing here !!
# This is because the local optimizer expects only one
# output that corresponds to the input of ``node``
# If we do this for each output seperately we will have
# multiple scan ops in the graph ( as many as outputs )
# and I'm not sure they will get merged into one again
# So for now I will just cover a limited case when there
# is only one output and the local optimizer can be used
# TODO (fix) : either make sure the different scans get
# merged or implement this optimization as a global
# optimization
thescan = host_input.owner.op
info = copy.deepcopy(thescan.info)
info['gpu'] = True
inputs = host_input.owner.inputs
nw_ins = [inputs[0]]
e = (1 +
thescan.n_seqs +
thescan.n_mit_mot +
thescan.n_mit_sot +
thescan.n_sit_sot +
thescan.n_shared_outs)
nw_ins += [safe_to_gpu(x) for x in inputs[1:e]]
b = e
e = e + thescan.n_nit_sot
nw_ins += inputs[b:e]
nw_ins += [safe_to_gpu(x) for x in inputs[e:]]
scan_ins = [tensor_to_cuda(x) for x in thescan.inputs]
scan_outs = [safe_to_gpu(x) for x in thescan.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(thescan.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about cuda ndarray and can not
# handle graphs with inputs being Cuda Ndarrays
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins,
scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
nw_op = scan_op.Scan(scan_ins,
scan_outs,
info,
typeConstructor=typeConstructor).make_node(
*nw_ins)
_outputs = nw_op.outputs
return _outputs
# scan(host_from_gpu) -> host_from_gpu(GPUscan)
if (type(node.op) == scan_op.Scan
and not node.op.info['gpu']):
if any([(i.owner and isinstance(i.owner.op, HostFromGpu))
for i in node.inputs]):
thescan = node.op
info = copy.deepcopy(thescan.info)
info['gpu'] = True
inputs = node.inputs
nw_ins = [inputs[0]]
e = (1 +
thescan.n_seqs +
thescan.n_mit_mot +
thescan.n_mit_sot +
thescan.n_sit_sot +
thescan.n_shared_outs)
nw_ins += [safe_to_gpu(x) for x in inputs[1:e]]
b = e
e = e + thescan.n_nit_sot
nw_ins += inputs[b:e]
nw_ins += [safe_to_gpu(x) for x in inputs[e:]]
scan_ins = [tensor_to_cuda(x) for x in thescan.inputs]
scan_outs = [safe_to_gpu(x) for x in thescan.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(thescan.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about cuda ndarray and can not
# handle graphs with inputs being Cuda Ndarrays
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins,
scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
_outputs = scan_op.Scan(
scan_ins,
scan_outs,
info,
typeConstructor=typeConstructor).make_node(*nw_ins).outputs
outputs = []
for x, y in zip(_outputs, node.outputs):
if isinstance(y.type, CudaNdarrayType):
outputs += [x]
else:
outputs += [safe_to_cpu(x)]
return outputs
return False
@register_opt()
@local_optimizer([tensor.AllocEmpty, gpu_from_host])
def local_gpu_allocempty(node):
if (isinstance(node.op, tensor.AllocEmpty) and
node.op.dtype == "float32"):
ret = host_from_gpu(GpuAllocEmpty()(*node.inputs))
# Keep the check that we don't care about the value.
ret.tag.values_eq_approx = node.outputs[0].tag.values_eq_approx
return [ret]
return False
optdb.register('gpu_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeConstructor=typeConstructor,
gpu_flag=True),
75,
'gpu',
'fast_run',
'inplace',
'scan')
import theano.sandbox.cuda.extra_ops
|
nke001/attention-lvcsr
|
libs/Theano/theano/sandbox/cuda/opt.py
|
Python
|
mit
| 96,948
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Komiksowiec documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 3 23:11:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Komiksowiec'
copyright = '2017, Paweł pid Kozubal'
author = 'Paweł pid Kozubal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Komiksowiec v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Komiksowiecdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Komiksowiec.tex', 'Komiksowiec Documentation',
'Paweł pid Kozubal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'komiksowiec', 'Komiksowiec Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Komiksowiec', 'Komiksowiec Documentation',
author, 'Komiksowiec', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
pidpawel/komiksowiec
|
docs/source/conf.py
|
Python
|
mit
| 9,830
|
if __name__ == '__main__':
a = int(raw_input())
b = int(raw_input())
print a + b
print a - b
print a * b
|
LuisUrrutia/hackerrank
|
python/introduction/python-arithmetic-operators.py
|
Python
|
mit
| 126
|
import logging
import logging.config
import sys
import threading
import os
from amberclient.collision_avoidance.collision_avoidance_proxy import CollisionAvoidanceProxy
from amberclient.common.amber_client import AmberClient
from amberclient.location.location import LocationProxy
from amberclient.roboclaw.roboclaw import RoboclawProxy
from amberdriver.common.message_handler import MessageHandler
from amberdriver.drive_to_point import drive_to_point_pb2
from amberdriver.drive_to_point.drive_to_point import DriveToPoint
from amberdriver.tools import config
__author__ = 'paoolo'
pwd = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig('%s/drive_to_point.ini' % pwd)
config.add_config_ini('%s/drive_to_point.ini' % pwd)
LOGGER_NAME = 'DriveToPointController'
USE_COLLISION_AVOIDANCE = config.DRIVE_TO_POINT_USE_COLLISION_AVOIDANCE == 'True'
class DriveToPointController(MessageHandler):
def __init__(self, pipe_in, pipe_out, driver):
MessageHandler.__init__(self, pipe_in, pipe_out)
self.__drive_to_point = driver
self.__logger = logging.getLogger(LOGGER_NAME)
def handle_data_message(self, header, message):
if message.HasExtension(drive_to_point_pb2.setTargets):
self.__handle_set_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getNextTarget):
self.__handle_get_next_target(header, message)
elif message.HasExtension(drive_to_point_pb2.getNextTargets):
self.__handle_get_next_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getVisitedTarget):
self.__handle_get_visited_target(header, message)
elif message.HasExtension(drive_to_point_pb2.getVisitedTargets):
self.__handle_get_visited_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getConfiguration):
self.__handle_get_configuration(header, message)
else:
self.__logger.warning('No request in message')
def __handle_set_targets(self, header, message):
self.__logger.debug('Set targets')
targets = message.Extensions[drive_to_point_pb2.targets]
targets = zip(targets.longitudes, targets.latitudes, targets.radiuses)
self.__drive_to_point.set_targets(targets)
@MessageHandler.handle_and_response
def __handle_get_next_target(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get next target')
next_target, current_location = self.__drive_to_point.get_next_target_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend([next_target[0]])
targets.latitudes.extend([next_target[1]])
targets.radiuses.extend([next_target[2]])
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getNextTarget] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_next_targets(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get next targets')
next_targets, current_location = self.__drive_to_point.get_next_targets_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend(map(lambda next_target: next_target[0], next_targets))
targets.latitudes.extend(map(lambda next_target: next_target[1], next_targets))
targets.radiuses.extend(map(lambda next_target: next_target[2], next_targets))
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getNextTargets] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_visited_target(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get visited target')
visited_target, current_location = self.__drive_to_point.get_visited_target_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend([visited_target[0]])
targets.latitudes.extend([visited_target[1]])
targets.radiuses.extend([visited_target[2]])
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getVisitedTarget] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_visited_targets(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get visited targets')
visited_targets, current_location = self.__drive_to_point.get_visited_targets_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend(map(lambda target: target[0], visited_targets))
targets.latitudes.extend(map(lambda target: target[1], visited_targets))
targets.radiuses.extend(map(lambda target: target[2], visited_targets))
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getVisitedTargets] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_configuration(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get configuration')
configuration = response_message.Extensions[drive_to_point_pb2.configuration]
configuration.maxSpeed = self.__drive_to_point.MAX_SPEED
response_message.Extensions[drive_to_point_pb2.getConfiguration] = True
return response_header, response_message
def handle_subscribe_message(self, header, message):
self.__logger.debug('Subscribe action, nothing to do...')
def handle_unsubscribe_message(self, header, message):
self.__logger.debug('Unsubscribe action, nothing to do...')
def handle_client_died_message(self, client_id):
self.__logger.info('Client %d died, stop!', client_id)
self.__drive_to_point.set_targets([])
if __name__ == '__main__':
client_for_location = AmberClient('127.0.0.1', name="location")
client_for_driver = AmberClient('127.0.0.1', name="driver")
location_proxy = LocationProxy(client_for_location, 0)
if USE_COLLISION_AVOIDANCE:
driver_proxy = CollisionAvoidanceProxy(client_for_driver, 0)
else:
driver_proxy = RoboclawProxy(client_for_driver, 0)
drive_to_point = DriveToPoint(driver_proxy, location_proxy)
driving_thread = threading.Thread(target=drive_to_point.driving_loop, name="driving-thread")
driving_thread.start()
location_thread = threading.Thread(target=drive_to_point.location_loop, name="location-thread")
location_thread.start()
controller = DriveToPointController(sys.stdin, sys.stdout, drive_to_point)
controller()
|
showmen15/testEEE
|
src/amberdriver/drive_to_point/drive_to_point_controller.py
|
Python
|
mit
| 7,564
|
import unittest
import socket
import os
from shapy.framework.netlink.constants import *
from shapy.framework.netlink.message import *
from shapy.framework.netlink.tc import *
from shapy.framework.netlink.htb import *
from shapy.framework.netlink.connection import Connection
from tests import TCTestCase
class TestClass(TCTestCase):
def test_add_class(self):
self.qhandle = 0x1 << 16 # | 0x1 # major:minor, 1:
self.add_htb_qdisc()
handle = 0x1 << 16 | 0x1
rate = 256*1000
mtu = 1600
this_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(this_dir, 'htb_add_class.data'), 'rb') as f:
data = f.read()
#init = Attr(TCA_HTB_INIT, HTBParms(rate, rate).pack()+data[36+8+4+48:])
init = Attr(TCA_HTB_INIT,
HTBParms(rate, rate).pack() +
RTab(rate, mtu).pack() + CTab(rate, mtu).pack())
tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, handle, self.qhandle, 0,
[Attr(TCA_KIND, 'htb\0'), init])
msg = Message(type=RTM_NEWTCLASS,
flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK,
service_template=tcm)
self.conn.send(msg)
self.check_ack(self.conn.recv())
self.delete_root_qdisc()
def add_htb_qdisc(self):
tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, self.qhandle, TC_H_ROOT, 0,
[Attr(TCA_KIND, 'htb\0'), HTBQdiscAttr(defcls=0x1ff)])
msg = Message(type=RTM_NEWQDISC,
flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK,
service_template=tcm)
self.conn.send(msg)
r = self.conn.recv()
self.check_ack(r)
return r
|
praus/shapy
|
tests/netlink/test_htb_class.py
|
Python
|
mit
| 1,941
|
#!/usr/bin/env python
import setuptools
if __name__ == "__main__":
setuptools.setup(
name="aecg100",
version="1.1.0.18",
author="WHALETEQ Co., LTD",
description="WHALETEQ Co., LTD AECG100 Linux SDK",
url="https://www.whaleteq.com/en/Support/Download/7/Linux%20SDK",
include_package_data=True,
package_data={
'': ['sdk/*.so', 'sdk/*.h', 'sample/python/*.txt']
},
)
|
benian/aecg100
|
setup.py
|
Python
|
mit
| 418
|
#!/usr/local/bin/python3.5
import itertools
import sys
from .stuff import word_set
__version__ = "1.1.0"
def find_possible(lst):
"""
Return all possible combinations of letters in lst
@type lst: [str]
@rtype: [str]
"""
returned_list = []
for i in range(0, len(lst) + 1):
for subset in itertools.permutations(lst, i):
possible = ''
for letter in subset:
possible += letter
if len(possible) == len(lst):
# itertools.permutations returns smaller lists
returned_list.append(possible)
return returned_list
def return_words(lst, word_set):
"""
Return combinations in that are words in word_set
@type lst: [str]
@type word_set: set(str)
@rtype: [str]
"""
returned_list = []
for word in lst:
if word in word_set or word.capitalize() in word_set:
# Some words are capitalized in the word_set
returned_list.append(word)
return returned_list
def main():
"""
Main function to run the program
"""
anagram_lst = []
anagram = sys.argv[1]
for char in anagram:
anagram_lst.append(char)
possible_words = find_possible(anagram_lst)
actual_words = return_words(possible_words, word_set)
print('Solutions:')
if len(actual_words) == 0:
print('None found')
else:
for item in set(actual_words):
# Running through in set form prevents duplicates
print(item)
|
patrickleweryharris/anagram-solver
|
anagram_solver/anagram_solver.py
|
Python
|
mit
| 1,532
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-07 08:13
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0020_auto_20161229_0041'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='application_fee_percent',
field=djstripe.fields.StripePercentField(decimal_places=2, help_text="A positive decimal that represents the fee percentage of the subscription invoice amount that will be transferred to the application owner's Stripe account each billing period.", max_digits=5, null=True, validators=[django.core.validators.MinValueValidator(1.0), django.core.validators.MaxValueValidator(100.0)]),
),
]
|
tkwon/dj-stripe
|
djstripe/migrations/0021_auto_20170107_0813.py
|
Python
|
mit
| 865
|
class MethodMissing(object):
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
def method(*args, **kw):
return self.method_missing(name, *args, **kw)
return method
def method_missing(self, name, *args, **kw):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, name))
class ValMissing(object):
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
return self.val_missing(name)
def val_missing(self, name):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, name))
|
ganow/gq
|
gq/missing.py
|
Python
|
mit
| 788
|
import threading
__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "gawlowicz@tkn.tu-berlin.de"
class Timer(object):
def __init__(self, handler_):
assert callable(handler_)
super().__init__()
self._handler = handler_
self._event = threading.Event()
self._thread = None
def start(self, interval):
"""interval is in seconds"""
if self._thread:
self.cancel()
self._event.clear()
self._thread = threading.Thread(target=self._timer, args=[interval])
self._thread.setDaemon(True)
self._thread.start()
def cancel(self):
if (not self._thread) or (not self._thread.is_alive()):
return
self._event.set()
# self._thread.join()
self._thread = None
def is_running(self):
return self._thread is not None
def _timer(self, interval):
# Avoid cancellation during execution of self._callable()
cancel = self._event.wait(interval)
if cancel:
return
self._handler()
class TimerEventSender(Timer):
# timeout handler is called by timer thread context.
# So in order to actual execution context to application's event thread,
# post the event to the application
def __init__(self, app, ev_cls):
super(TimerEventSender, self).__init__(self._timeout)
self._app = app
self._ev_cls = ev_cls
def _timeout(self):
self._app.send_event(self._ev_cls())
|
uniflex/uniflex
|
uniflex/core/timer.py
|
Python
|
mit
| 1,583
|
import pytest
import responses
from document import Document
from scrapers.knox_tn_agendas_scraper import KnoxCoTNAgendaScraper
from . import common
from . import utils
class TestKnoxAgendaScraper(object):
session = None
page_str = ""
def test_get_docs_from_page(self):
scraper = KnoxCoTNAgendaScraper()
docs = scraper._get_docs_from_schedule(self.page_str)
assert len(docs) == 4
for doc in docs:
# All URLs should be absolute.
assert doc.url.startswith('https://')
actual_titles = [doc.title for doc in docs]
expected_titles = [
'June 28, 2017: BZA Agenda',
'June 26, 2017: Beer Board',
'June 19, 2017: Work Session',
'June 7, 2017: AGENDA COMMITTEE MEETING',
]
assert expected_titles == actual_titles
@responses.activate
def test_full_scraper(self):
self.session.query(Document).delete()
count = self.session.query(Document).count()
assert count == 0
responses.add(
responses.GET,
KnoxCoTNAgendaScraper.MEETING_SCHEDULE_URL,
body=self.page_str,
status=200,
match_querystring=True
)
scraper = KnoxCoTNAgendaScraper()
scraper.scrape(self.session)
docs = self.session.query(Document).all()
assert len(docs) == 4
expected_titles = {
'June 28, 2017: BZA Agenda',
'June 26, 2017: Beer Board',
'June 19, 2017: Work Session',
'June 7, 2017: AGENDA COMMITTEE MEETING',
}
for doc in docs:
assert doc.title in expected_titles
@classmethod
def setup_class(cls):
cls.session = common.Session()
with open(utils.get_abs_filename('knox-co-results-page.html'), 'r') as page:
cls.page_str = page.read()
@classmethod
def teardown_class(cls):
common.Session.remove()
def setup_method(self, test_method):
self.session.begin_nested()
def teardown_method(self, test_method):
self.session.rollback()
|
RagtagOpen/bidwire
|
bidwire/tests/test_knox_co_agenda_scraper.py
|
Python
|
mit
| 2,136
|
"""
==========================================
Author: Tyler Brockett
Username: /u/tylerbrockett
Description: Alert Bot (Formerly sales__bot)
Date Created: 11/13/2015
Date Last Edited: 12/20/2016
Version: v2.0
==========================================
"""
import praw
import traceback
from utils.logger import Logger
from utils.color import Color
from utils import output
from prawcore.exceptions import Redirect
from prawcore.exceptions import Forbidden
class RedditHandler:
def __init__(self, credentials):
output.startup_message(credentials)
self.credentials = credentials
self.reddit = self.connect()
self.NUM_POSTS = 20
def connect(self):
try:
reddit = praw.Reddit(
client_id=self.credentials['client_id'],
client_secret=self.credentials['client_secret'],
password=self.credentials['password'],
user_agent=self.credentials['user_agent'],
username=self.credentials['username'])
return reddit
except:
raise RedditHelperException('Error connecting to Reddit\n\n' + traceback.format_exc())
def disconnect(self):
self.reddit = None
def reset(self):
try:
self.disconnect()
self.reddit = self.connect()
except:
raise RedditHelperException(RedditHelperException.RESET_EXCEPTION + '\n\n' + traceback.format_exc())
def get_instance(self):
return self.reddit
def get_unread(self):
ret = []
unread = self.reddit.inbox.unread(limit=None)
for message in unread:
ret.append(message)
ret.reverse()
return ret
def get_message(self, message_id):
return self.reddit.inbox.message(message_id)
def send_message(self, redditor, subject, body):
try:
self.reddit.redditor(redditor).message(subject, body)
except:
Logger.log(traceback.format_exc(), Color.RED)
raise RedditHelperException(RedditHelperException.SEND_MESSAGE_EXCEPTION)
def get_submissions(self, subreddit):
submissions = []
posts = 200 if (subreddit == 'all') else self.NUM_POSTS
try:
subs = self.reddit.subreddit(subreddit).new(limit=posts)
for submission in subs:
submissions.append(submission)
except Forbidden as e:
Logger.log(traceback.format_exc(), Color.RED)
return []
except Exception as e:
Logger.log(traceback.format_exc(), Color.RED)
raise RedditHelperException(RedditHelperException.GET_SUBMISSIONS_EXCEPTION)
return submissions
def get_original_message_id(self, received_message, database):
message = received_message
while message.parent_id and len(database.get_subscriptions_by_message_id(str(message.author), message.id)) == 0:
message = self.reddit.inbox.message(message.parent_id[3:])
return message.id
def check_invalid_subreddits(self, subreddits):
invalid = []
for subreddit in subreddits:
try:
for submission in self.reddit.subreddit(subreddit).new(limit=1):
print('subreddit is valid')
except Redirect: # was praw.errors.InvalidSubreddit without 'len()' around call in the try block
Logger.log(traceback.format_exc(), Color.RED)
invalid.append(subreddit)
return invalid
class RedditHelperException(Exception):
SEND_MESSAGE_EXCEPTION = 'Error sending message'
RESET_EXCEPTION = 'Error resetting connection to Reddit'
GET_SUBMISSIONS_EXCEPTION = 'Error getting submissions'
def __init__(self, error_args):
Exception.__init__(self, 'Reddit Exception: {0}'.format(error_args))
self.errorArgs = error_args
|
tylerbrockett/reddit-bot-buildapcsales
|
src/bot_modules/reddit_handler.py
|
Python
|
mit
| 3,944
|
# -*- coding: utf-8 -*-
"""
spdypy.stream
~~~~~~~~~~~~~
Abstractions for SPDY streams.
"""
import collections
from .frame import (SYNStreamFrame, SYNReplyFrame, RSTStreamFrame,
DataFrame, HeadersFrame, WindowUpdateFrame, FLAG_FIN)
class Stream(object):
"""
A SPDY connection is made up of many streams. Each stream communicates by
sending some nonzero number of frames, beginning with a SYN_STREAM and
ending with a RST_STREAM frame, or a frame marked with FLAG_FIN.
The stream abstraction provides a system for wrapping HTTP connections in
frames for sending down SPDY connections. They are a purely internal
abstraction, and not intended for use by end-users of SPDYPy.
:param stream_id: The stream_id for this stream.
:param version: The SPDY version this stream is for.
:param compressor: A reference to the zlib compression object for this
connection.
:param decompressor: A reference to the zlib decompression object for this
connection.
"""
def __init__(self, stream_id, version, compressor, decompressor):
self.stream_id = stream_id
self.version = version
self._queued_frames = collections.deque()
self._compressor = compressor
self._decompressor = decompressor
def open_stream(self, priority, associated_stream=None):
"""
Builds the frames necessary to open a SPDY stream. Stores them in the
queued frames object.
:param priority: The priority of this stream, from 0 to 7. 0 is the
highest priority, 7 the lowest.
:param associated_stream: (optional) The stream this stream is
associated to.
"""
assoc_id = associated_stream.stream_id if associated_stream else None
syn = SYNStreamFrame()
syn.version = self.version
syn.stream_id = self.stream_id
syn.assoc_stream_id = assoc_id
syn.priority = priority
# Assume this will be the last frame unless we find out otherwise.
syn.flags.add(FLAG_FIN)
self._queued_frames.append(syn)
def add_header(self, key, value):
"""
Adds a SPDY header to the stream. For now this assumes that the first
outstanding frame in the queue is one that has headers on it. Later,
this method will be smarter.
:param key: The header key.
:param value: The header value.
"""
frame = self._queued_frames[0]
frame.headers[key] = value
def prepare_data(self, data, last=False):
"""
Prepares some data in a data frame.
:param data: The data to send.
:param last: (Optional) Whether this is the last data frame.
"""
frame = DataFrame()
frame.stream_id = self.stream_id
# Remove any FLAG_FIN earlier in the queue.
for queued_frame in self._queued_frames:
queued_frame.flags.discard(FLAG_FIN)
if last:
frame.flags.add(FLAG_FIN)
frame.data = data
self._queued_frames.append(frame)
def send_outstanding(self, connection):
"""
Sends any outstanding frames on a given connection.
:param connection: The connection to send the frames on.
"""
frame = self._next_frame()
while frame is not None:
data = frame.to_bytes(self._compressor)
connection.send(data)
frame = self._next_frame()
def process_frame(self, frame):
"""
Given a SPDY frame, handle it in the context of a given stream. The
exact behaviour here is different depending on the type of the frame.
We handle the following kinds at the stream level: RST_STREAM,
HEADERS, WINDOW_UPDATE, and Data frames.
:param frame: The Frame subclass to handle.
"""
if isinstance(frame, SYNReplyFrame):
self._process_reply_frame(frame)
elif isinstance(frame, RSTStreamFrame):
self._process_rst_frame(frame)
elif isinstance(frame, HeadersFrame):
self._process_headers_frame(frame)
elif isinstance(frame, WindowUpdateFrame):
self._process_window_update(frame)
elif isinstance(frame, DataFrame):
self._handle_data(frame)
else:
raise ValueError("Unexpected frame kind.")
def _next_frame(self):
"""
Utility method for returning the next frame from the frame queue.
"""
try:
return self._queued_frames.popleft()
except IndexError:
return None
|
Lukasa/spdypy
|
spdypy/stream.py
|
Python
|
mit
| 4,701
|
#!/usr/bin/env python
# encoding: utf-8
# http://axe.g0v.tw/level/4
import urllib2, re
lines = []; last_url = None
for index in range(1, 25):
url = "http://axe-level-4.herokuapp.com/lv4/" if index == 1 \
else "http://axe-level-4.herokuapp.com/lv4/?page=" + str(index)
# The hint is that we shall make our bot look like a real browser.
req = urllib2.Request(url)
req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.73.11 (KHTML, like Gecko) Version/7.0.1 Safari/537.73.11')
if last_url:
req.add_header('Referer', last_url)
last_url = url
html = urllib2.urlopen(req).read()
pattern = r"<tr>\s*<td>(.*)</td>\s*<td>(.*)</td>\s*<td>(.*)</td>\s*</tr>"
results = re.findall(pattern, html, re.MULTILINE)[1:]
format = '{"town": "%s", "village": "%s", "name" : "%s"}'
for result in results:
lines.append(format % tuple(result))
with open("test.txt", "w") as f:
f.write("[%s]" % ",\n".join(lines))
|
zonble/axe_py
|
axe4.py
|
Python
|
mit
| 1,045
|
from collections import deque
N = int(input())
d = deque()
i = 0
while i < N:
command = input().split()
if command[0] == 'append':
d.append(command[1])
elif command[0] == 'appendleft':
d.appendleft(command[1])
elif command[0] == 'pop':
d.pop()
else:
d.popleft()
i += 1
[print(i, end=' ') for i in d]
|
avtomato/HackerRank
|
Python/_07_Collections/_06_Collections.deque()/solution.py
|
Python
|
mit
| 358
|
import pytest
import pwny
target_little_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.little)
target_big_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.big)
def test_pack():
assert pwny.pack('I', 0x41424344) == b'DCBA'
def test_pack_format_with_endian():
assert pwny.pack('>I', 0x41424344) == b'ABCD'
def test_pack_explicit_endian():
assert pwny.pack('I', 0x41424344, endian=pwny.Target.Endian.big) == b'ABCD'
def test_pack_explicit_target():
assert pwny.pack('I', 0x41424344, target=target_big_endian) == b'ABCD'
@pytest.mark.xfail(raises=NotImplementedError)
def test_pack_invalid_endian():
pwny.pack('I', 1, endian='invalid')
def test_unpack():
assert pwny.unpack('I', b'DCBA') == (0x41424344,)
def test_unpack_format_with_endian():
assert pwny.unpack('>I', b'ABCD') == (0x41424344,)
def test_unpack_explicit_endian():
assert pwny.unpack('I', b'ABCD', endian=pwny.Target.Endian.big) == (0x41424344,)
def test_unpack_explicit_target():
assert pwny.unpack('I', b'ABCD', target=target_big_endian) == (0x41424344,)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unpack_invalid_endian():
pwny.unpack('I', 'AAAA', endian='invalid')
def test_pack_size():
# This tests both pack_size in general as well as not padding the byte.
assert pwny.pack_size('bq') == 9
short_signed_data = [
[8, -0x7f, b'\x81'],
[16, -0x7fff, b'\x80\x01'],
[32, -0x7fffffff, b'\x80\x00\x00\x01'],
[64, -0x7fffffffffffffff, b'\x80\x00\x00\x00\x00\x00\x00\x01'],
]
short_unsigned_data = [
[8, 0x61, b'a'],
[16, 0x6162, b'ab'],
[32, 0x61626364, b'abcd'],
[64, 0x6162636465666768, b'abcdefgh'],
]
def test_short_form_pack():
for width, num, bytestr in short_signed_data:
f = 'p%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'P%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_short_form_unpack():
for width, num, bytestr in short_signed_data:
f = 'u%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'U%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_pointer_pack():
yield check_short_form_pack, 'p', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'p', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_pack_endian, 'p', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_pack, 'P', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'P', 4294901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_pack_endian, 'P', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def test_pointer_unpack():
yield check_short_form_unpack, 'u', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'u', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_unpack_endian, 'u', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_unpack, 'U', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'U', 4294901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_unpack_endian, 'U', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def check_short_form_pack(f, num, bytestr):
assert getattr(pwny, f)(num) == bytestr
def check_short_form_pack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(num, endian=endian) == bytestr
def check_short_form_unpack(f, num, bytestr):
assert getattr(pwny, f)(bytestr) == num
def check_short_form_unpack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(bytestr, endian=endian) == num
|
edibledinos/pwnypack
|
tests/test_packing.py
|
Python
|
mit
| 4,642
|
"""optboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('dashboard.urls', namespace='dashboard')),
]
|
tanutarou/OptBoard
|
optboard/urls.py
|
Python
|
mit
| 838
|
def maxSumSub(arr):
maxSums = [0]*len(arr)
for i in range(len(arr)):
Si = arr[i]
maxS = Si
for j in range(0,i):
if (arr[j] < arr[i]):
s = maxSums[j] + arr[i]
if (s > maxS):
maxS = s
maxSums[i] = maxS
return max(maxSums)
arr = [1,101,2,3,100,4,5]
print maxSumSub(arr)
arr = [3,4,5,10]
print maxSumSub(arr)
arr = [10,5,4,3]
print maxSumSub(arr)
|
KarlParkinson/practice
|
algs/maxSumSub.py
|
Python
|
mit
| 454
|
import requests
import httpretty
from nose.tools import nottest
from pyrelic import BaseClient
@nottest # Skip until we can properly simulate timeouts
@httpretty.activate
def test_make_request_timeout():
"""
Remote calls should time out
"""
httpretty.register_uri(httpretty.GET, "www.example.com",
body=None,
)
# When I make an API request and receive no response
c = BaseClient()
# Then I should raise a NewRelicApiException
c._make_request.when.called_with(requests.get,
"http://www.example.com",
timeout=0.05,
retries=1)\
.should.throw(requests.RequestException)
@httpretty.activate
def test_make_request_non_200():
"""
Bad HTTP Responses should throw an error
"""
httpretty.register_uri(httpretty.GET, "http://foobar.com",
body="123", status=400)
# When I make an API request and receive a 400
c = BaseClient()
# Then I should raise the appropriate requests exception
c._make_request.when.called_with(requests.get,
"http://foobar.com")\
.should.throw(requests.RequestException)
def test_client_proxy_string():
"""
Base Client should parse proxy strings
"""
# When I create a client with a proxy as a string
proxy = "baz:1234"
c = BaseClient(proxy=proxy)
# Then the Client should create the proxy config as a dictionary
c.proxy.should.equal({"http": proxy, "https": proxy})
def test_client_proxy_dict():
"""
Base Client should parse proxy dicts
"""
# When I create a client with a proxy as a dict
proxy = {"baz": "1234"}
c = BaseClient(proxy=proxy)
# Then the Client should create the proxy config as a dictionary
c.proxy.should.equal(proxy)
|
andrewgross/pyrelic
|
tests/unit/test_base_client.py
|
Python
|
mit
| 1,927
|
from tokens.andd import And
from tokens.expression import Expression
from tokens.iff import Iff
from tokens.kfalse import ConstantFalse
from tokens.ktrue import ConstantTrue
from tokens.nop import Not
from tokens.orr import Or
from tokens.then import Then
from tokens.variable import Variable
class TokenParser:
"""This parser only works with atomic expressions,
so parenthesis are needed everywhere to group items"""
@staticmethod
def parse_expression(string):
# Separate parenthesis so they're new tokens
# Also convert [ or { to the same parenthesis (
for s in '([{':
string = string.replace(s, ' ( ')
for s in ')]}':
string = string.replace(s, ' ) ')
# Get all operators so we can iterate over them
#
# Note that the order here is important. We first need to replace long
# expressions, such as '<->' with their single character representations.
#
# If we didn't do this, after we tried to separate the tokens from other
# expressions by adding spaces on both sides of the operator, '->' would
# break '<->' turning it into '< ->', which would not be recognised.
#
# We add spaces between the tokens so it's easy to split them and identify them.
# Another way would be to iterate over the string and finding the tokens. Once
# identified, they'd be put, in order, on a different list. However, this is
# not as simple as the currently used approach.
operators = [Iff, Then, Not, Or, And, ConstantTrue, ConstantFalse]
# Find all the representations on the string and add surrounding spaces,
# this will allow us to call 'string.split()' to separate variable names
# from the operators so the user doesn't need to enter them separated
for operator in operators:
for representation in operator.representations:
string = string.replace(representation, ' '+operator.single_char_representation+' ')
# Get all the tokens
words = string.split()
# Store the found nested expressions on the stack
expressions_stack = [Expression()]
for w in words:
done = False
for operator in operators:
# We replaced all the operator with their single character representations. We
# don't need to check whether the current word (representation) is any of the
# available representations for this operator, since it's the single-character one.
if w == operator.single_char_representation:
expressions_stack[-1].add_token(operator())
done = True
break
if done:
pass
elif w == '(':
expressions_stack.append(Expression())
elif w == ')':
e = expressions_stack.pop()
expressions_stack[-1].add_token(e)
else:
expressions_stack[-1].add_token(Variable(w))
# Tokenize the top expression (this will also tokenize its children)
expressions_stack[0].tokenize()
# Return the top expression once it's completely valid
return expressions_stack[0]
|
LonamiWebs/Py-Utils
|
logicmind/token_parser.py
|
Python
|
mit
| 3,318
|
'''
author : "George Profenza"
url : ("disturb", "disturbmedia.com/blog","My blog, http://tomaterial.blogspot.com")
Export meshes the three.js 3D Engine by mr.doob's et al.
More details on the engine here:
https://github.com/mrdoob/three.js
Currently supports UVs. If the model doesn't display correctly
you might need to reverse some normals/do some cleanup.
Also, if you use Selection Tags and basic ColorMaterials,
the colours will be picked up as face colors. Call autoColor() on the
model you use for this.
The mesh transformations(position, rotation, scale) are saved
and you can get them using: getPosition(), getRotation() and getScale()
each returning a THREE.Vector3
In short
var myGeom = new myC4DGeom();
var myModel = new THREE.Mesh( myGeom, new THREE.MeshFaceMaterial());
//set transforms
model.position = myGeom.getPosition()
model.rotation = myGeom.getRotation()
model.scale = myGeom.getScale()
//set selection tags colours
myGeom.autoColor()
More details on this exporter and more js examples here:
https://github.com/orgicus/three.js
Have fun!
This script requires Cinema 4D R11.5 minimum and the Py4D Plugin:
http://www.py4d.com/get-py4d/
'''
import c4d
from c4d import documents, UVWTag, storage, plugins, gui, modules, bitmaps, utils
from c4d.utils import *
''' from c4d import symbols as sy, plugins, utils, bitmaps, gui '''
import math
import re
# utils
clean = lambda varStr: re.sub('\W|^(?=\d)','_', varStr)
# from Active State's Python recipies: http://code.activestate.com/recipes/266466-html-colors-tofrom-rgb-tuples/
def RGBToHTMLColor(rgb_tuple):
return '0x%02x%02x%02x' % rgb_tuple
def Export():
if not op: return
if op.GetType() != 5100:
print 'Selected Object is not an editable mesh'
return
unit = 0.001#for scale
fps = doc.GetFps()
bd = doc.GetRenderBaseDraw()
scr = bd.GetFrameScreen()
rd = doc.GetActiveRenderData()
name = op.GetName()
classname = clean(name)
c4dPath = c4d.storage.GeGetC4DPath(c4d.C4D_PATH_LIBRARY)
jsFile = open(c4dPath+'/scripts/Three.js','r')
js = jsFile.read()
htmlFile = open(c4dPath+'/scripts/template.html','r')
html = htmlFile.read()
html = html.replace('%s',classname)
code = 'var %s = function () {\n\n\tvar scope = this;\n\n\tTHREE.Geometry.call(this);\n\n' % classname
def GetMesh(code):
# goto 0
doc.SetTime(c4d.BaseTime(0, fps))
c4d.DrawViews( c4d.DA_ONLY_ACTIVE_VIEW|c4d.DA_NO_THREAD|c4d.DA_NO_REDUCTION|c4d.DA_STATICBREAK )
c4d.GeSyncMessage(c4d.EVMSG_TIMECHANGED)
doc.SetTime(doc.GetTime())
c4d.EventAdd(c4d.EVENT_ANIMATE)
SendModelingCommand(command = c4d.MCOMMAND_REVERSENORMALS, list = [op], mode = c4d.MODIFY_ALL, bc = c4d.BaseContainer(), doc = doc)
verts = op.GetAllPoints()
for v in verts:
code += '\tv( %.6f, %.6f, %.6f );\n' % (v.x, -v.y, v.z)
code += '\n'
ncount = 0
uvcount = 0
faces = op.GetAllPolygons()
normals = op.CreatePhongNormals()
ndirection = 1
hasUV = False
for tag in op.GetTags():
if tag.GetName() == "UVW":
uvw = tag
hasUV = True
for f in faces:
if(f.d == f.c):
if(normals):
code += '\tf3( %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.a, f.b, f.c, normals[ncount].x*ndirection, normals[ncount].y*ndirection, normals[ncount].z*ndirection)
else:
code += '\tf3( %d, %d, %d );\n' % (f.a, f.b, f.c)
else:
if(normals):
code += '\tf4( %d, %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.a, f.b, f.c, f.d, normals[ncount].x*ndirection, normals[ncount].y*ndirection, normals[ncount].z*ndirection)
else:
code += '\tf4( %d, %d, %d, %d );\n' % (f.a, f.b, f.c, f.d)
if hasUV:
uv = uvw.GetSlow(uvcount);
# uvs += '[Vector('+str(uv[0].x)+','+str(1.0-uv[0].y)+'),Vector('+str(uv[1].x)+','+str(1.0-uv[1].y)+'),Vector('+str(uv[2].x)+','+str(1.0-uv[2].y)+')],'
if len(uv) == 4:
# {'a': Vector(1, 1, 0), 'c': Vector(0, 0, 0), 'b': Vector(1, 0, 0), 'd': Vector(0, 1, 0)}
code += '\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n' % (uv['a'].x, uv['a'].y, uv['b'].x, uv['b'].y, uv['b'].x, uv['b'].y, uv['c'].x, uv['c'].y)
else:
code += '\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n' % (uv['a'].x, uv['a'].y, uv['b'].x, uv['b'].y, uv['c'].x, uv['c'].y)
ncount += 1
uvcount += 1
code +='\n\tthis.computeCentroids();\n\tthis.computeNormals(true);\n'
#selection color
code +='\n\tscope.colors = {};\n'
code +='\tscope.selections = {};\n'
selName = ''
for tag in op.GetTags():
if(tag.GetType() == 5616): #texture tag
material = tag.GetMaterial()
color = material[c4d.MATERIAL_COLOR_COLOR]
tag.SetBit(c4d.BIT_ACTIVE)
selName = clean(tag[c4d.TEXTURETAG_RESTRICTION])
if len(selName) == 0: print "*** WARNING! *** Missing selection name for material: " + material.GetName()
code += '\tscope.colors["'+selName+'"] = '+str(RGBToHTMLColor((color.x*255,color.y*255,color.z*255)))+';\n'
if tag.GetType() == 5673: #selection tag
print 'selection: ' + tag.GetName()
print 'selection object: ' + tag
sel = tag.GetSelection()
selName = clean(tag.GetName())
ids = sel.GetAll(op.GetPointCount())
indices = [i for i, e in enumerate(ids) if e != 0]
code += '\tscope.selections["'+selName+'"] = '+str(indices)+';\n'
code += '\n\tscope.autoColor = function(){\n'
code += '\t\tfor(var s in this.selections){\n'
code += '\t\t\tfor(var i = 0 ; i < this.selections[s].length; i++) this.faces[this.selections[s][i]].material = [new THREE.MeshBasicMaterial({color:this.colors[s]})];\n'
code += '\t\t}\n\t}\n'
# model position, rotation, scale rotation x,y,z = H,P,B => three.js x,y,z is P,H,B => y,x,z
p = op.GetPos()
r = op.GetRot()
s = op.GetScale()
code += '\n\tscope.getPosition = function(){\treturn new THREE.Vector3'+str((p.x,p.y,p.z))+';\t}\n'
code += '\n\tscope.getRotation = function(){\treturn new THREE.Vector3'+str((r.y,r.x,r.z))+';\t}\n'
code += '\n\tscope.getScale = function(){\treturn new THREE.Vector3'+str((s.x,s.y,s.z))+';\t}\n'
code += '\n'
code += '\tfunction v( x, y, z ) {\n\n'
code += '\t\tscope.vertices.push( new THREE.Vertex( new THREE.Vector3( x, y, z ) ) );\n\n'
code += '\t}\n\n'
code += '\tfunction f3( a, b, c, nx, ny, nz ) {\n\n'
code += '\t\tscope.faces.push( new THREE.Face3( a, b, c, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
code += '\t}\n\n'
code += '\tfunction f4( a, b, c, d, nx, ny, nz ) {\n\n'
code += '\t\tscope.faces.push( new THREE.Face4( a, b, c, d, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
code += '\t}\n\n'
code += '\tfunction uv( u1, v1, u2, v2, u3, v3, u4, v4 ) {\n\n'
code += '\t\tvar uv = [];\n'
code += '\t\tuv.push( new THREE.UV( u1, v1 ) );\n'
code += '\t\tuv.push( new THREE.UV( u2, v2 ) );\n'
code += '\t\tuv.push( new THREE.UV( u3, v3 ) );\n'
code += '\t\tif ( u4 && v4 ) uv.push( new THREE.UV( u4, v4 ) );\n'
code += '\t\tscope.uvs.push( uv );\n'
code += '\t}\n\n'
code += '}\n\n'
code += '%s.prototype = new THREE.Geometry();\n' % classname
code += '%s.prototype.constructor = %s;' % (classname, classname)
SendModelingCommand(command = MCOMMAND_REVERSENORMALS, list = [op], mode = MODIFY_ALL, bc = c4d.BaseContainer(), doc = doc)
return code
code = GetMesh(code)
docPath = doc.GetDocumentPath()
jspath = docPath+'/'+classname+'.js'
htmlpath = docPath+'/'+classname+'.html'
file = open(jspath,'w')
file.write(code)
file.close()
file = open(htmlpath,'w')
file.write(html)
file.close()
file = open(docPath+'/Three.js','w')
file.write(js)
file.close()
print 'Export Complete!'
Export()
|
flyingoctopus/three.js
|
utils/exporters/cinema4d/export_to_three.js.py
|
Python
|
mit
| 8,574
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import (check_err, GDALException,
OGRException, OGRIndexError, SRSException) # NOQA
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDAL_VERSION', 'SpatialReference', 'CoordTransform', 'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
diego-d5000/MisValesMd
|
env/lib/python2.7/site-packages/django/contrib/gis/gdal/__init__.py
|
Python
|
mit
| 2,676
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import assignment2_helper as helper
# Look pretty...
matplotlib.style.use('ggplot')
# Do * NOT * alter this line, until instructed!
scaleFeatures = True
# TODO: Load up the dataset and remove any and all
# Rows that have a nan. You should be a pro at this
# by now ;-)
#
# .. your code here
original_df = pd.read_csv ('Datasets/kidney_disease.csv')
new_df = original_df.dropna()
# Create some color coded labels; the actual label feature
# will be removed prior to executing PCA, since it's unsupervised.
# You're only labeling by color so you can see the effects of PCA
labels = ['red' if i=='ckd' else 'green' for i in new_df.classification]
# TODO: Use an indexer to select only the following columns:
# ['bgr','wc','rc']
#
# .. your code here ..
new_df.head()
print new_df.columns.get_loc("bgr")
print new_df.columns.get_loc("wc")
print new_df.columns.get_loc("rc")
df = new_df.iloc[:,[10,17,18]]
df.head()
# TODO: Print out and check your dataframe's dtypes. You'll probably
# want to call 'exit()' after you print it out so you can stop the
# program's execution.
df.dtypes
# You can either take a look at the dataset webpage in the attribute info
# section: https://archive.ics.uci.edu/ml/datasets/Chronic_Kidney_Disease
# or you can actually peek through the dataframe by printing a few rows.
# What kind of data type should these three columns be? If Pandas didn't
# properly detect and convert them to that data type for you, then use
# an appropriate command to coerce these features into the right type.
#
# .. your code here ..
df.wc=pd.to_numeric(df.iloc[:,1], errors='coerce')
df.rc=pd.to_numeric(df.iloc[:,2], errors='coerce')
# TODO: PCA Operates based on variance. The variable with the greatest
# variance will dominate. Go ahead and peek into your data using a
# command that will check the variance of every feature in your dataset.
# Print out the results. Also print out the results of running .describe
# on your dataset.
df.describe()
# Hint: If you don't see all three variables: 'bgr','wc' and 'rc', then
# you probably didn't complete the previous step properly.
#
# .. your code here ..
# TODO: This method assumes your dataframe is called df. If it isn't,
# make the appropriate changes. Don't alter the code in scaleFeatures()
# just yet though!
#
# .. your code adjustment here ..
if scaleFeatures: df = helper.scaleFeatures(df)
# TODO: Run PCA on your dataset and reduce it to 2 components
# Ensure your PCA instance is saved in a variable called 'pca',
# and that the results of your transformation are saved in 'T'.
#
# .. your code here ..
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(df)
T = pca.transform(df)
# Plot the transformed data as a scatter plot. Recall that transforming
# the data will result in a NumPy NDArray. You can either use MatPlotLib
# to graph it directly, or you can convert it to DataFrame and have pandas
# do it for you.
# Since we've already demonstrated how to plot directly with MatPlotLib in
# Module4/assignment1.py, this time we'll convert to a Pandas Dataframe.
#
# Since we transformed via PCA, we no longer have column names. We know we
# are in P.C. space, so we'll just define the coordinates accordingly:
ax = helper.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)
T = pd.DataFrame(T)
T.columns = ['component1', 'component2']
T.plot.scatter(x='component1', y='component2', marker='o', c=labels, alpha=0.75, ax=ax)
plt.show()
|
rebaltina/DAT210x
|
Module4/assignment2_true.py
|
Python
|
mit
| 3,557
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Philipp Wagner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, math
from PIL import Image
import facedetect
def Distance(p1,p2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.sqrt(dx*dx+dy*dy)
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):
if (scale is None) and (center is None):
return image.rotate(angle=angle, resample=resample)
nx,ny = x,y = center
sx=sy=1.0
if new_center:
(nx,ny) = new_center
if scale:
(sx,sy) = (scale, scale)
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine/sx
b = sine/sx
c = x-nx*a-ny*b
d = -sine/sy
e = cosine/sy
f = y-nx*d-ny*e
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
# calculate offsets in original image
offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
# get the direction
eye_direction = (eye_right[0] - eye_left[0], eye_right[1] - eye_left[1])
# calc rotation angle in radians
rotation = -math.atan2(float(eye_direction[1]),float(eye_direction[0]))
# distance between them
dist = Distance(eye_left, eye_right)
# calculate the reference eye-width
reference = dest_sz[0] - 2.0*offset_h
# scale factor
scale = float(dist)/float(reference)
# rotate original around the left eye
image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)
# crop the rotated image
crop_xy = (eye_left[0] - scale*offset_h, eye_left[1] - scale*offset_v)
crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)
image = image.crop((int(crop_xy[0]), int(crop_xy[1]), int(crop_xy[0]+crop_size[0]), int(crop_xy[1]+crop_size[1])))
# resize it
image = image.resize(dest_sz, Image.ANTIALIAS)
return image
if __name__ == "__main__":
f = open(sys.argv[1], 'r')
csv = open(sys.argv[2], "w")
for line in f:
lineArray = line.split(";")
fileName = lineArray[0]
label = lineArray[1]
print "aligning %s to aligned" % (fileName)
aligned_file_name = "aligned/%s" % fileName
face = facedetect.detect_faces(fileName)['face'][0]
print(face)
CropFace(Image.open(fileName), eye_left=(face[0],face[1]), eye_right=(face[2],face[1]), offset_pct=(0.08,0.08), dest_sz=(200,200)).save(aligned_file_name)
# CropFace(Image.open(fileName), eye_left=(252,364), eye_right=(420,366), offset_pct=(0.1,0.1), dest_sz=(200,200)).save(aligned_file_name)
csv.write("%s;%s" % (aligned_file_name, label))
f.close()
csv.close()
|
DiUS/Physiognomy
|
python/align_faces.py
|
Python
|
mit
| 4,189
|
import theano
from theano import shared, tensor
from blocks.bricks import Feedforward, Activation
from blocks.bricks.base import application, lazy
from blocks_extras.initialization import PermutationMatrix
from blocks_extras.utils import check_valid_permutation
from blocks.utils import shared_floatx
class FixedPermutation(Feedforward):
"""Perform a fixed permutation of the input features.
Parameters
----------
order : ndarray-like
A 1-dimensional container containing a permutation
on the integers.
dot : bool, optional
Whether or not to perform the permutation by matrix
multiplication. This may be faster in some circumstances
but requires allocation of a permutation matrix.
"""
@lazy(allocation=['order'])
def __init__(self, order, dot=True, **kwargs):
self.order = order
self._dot = dot
super(FixedPermutation, self).__init__(**kwargs)
def _allocate(self):
self.order = check_valid_permutation(self.order)
if self.input_dim != len(self.order):
raise ValueError("input_dim does not match length of order "
"vector")
# No roles assigned here, since these are not learnable parameters.
if self._dot:
shape = (self.order.shape[0], self.order.shape[0])
self._matrix = shared_floatx(
PermutationMatrix(self.order).generate(None, shape))
else:
order = self.order.astype('int32')
assert order.min() == 0 # Catch highly unlikely downcast issue.
self._permutation = shared(order)
@property
def input_dim(self):
return len(self.order)
@application(inputs=['input_'], outputs=['output_'])
def apply(self, input_):
if self._dot:
return tensor.dot(input_, self._matrix)
else:
return tensor.take(input_, self._permutation, axis=1)
class Softsign(Activation):
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_):
one = tensor.constant(1, dtype=theano.config.floatX)
return input_ / (one + abs(input_))
|
mila-udem/blocks-extras
|
blocks_extras/bricks/__init__.py
|
Python
|
mit
| 2,174
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `owner` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `owner`.
return obj.owner == request.user
|
johnnywell/snowman
|
api/permissions.py
|
Python
|
mit
| 595
|
from django.contrib import admin
from api.models import (
CrawlUrls,
CrawlLinks,
)
# Register your models here.
admin.site.register(CrawlUrls)
admin.site.register(CrawlLinks)
|
saymedia/seosuite-dashboard-api
|
api/admin.py
|
Python
|
mit
| 187
|
"""
WSGI config for PythonAnywhere test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
hjwp/cookiecutter-example-project
|
config/wsgi.py
|
Python
|
mit
| 1,632
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import with_statement
import glob
import os
import hmac
import hashlib
import shutil
import socket
import subprocess
import struct
from twisted.internet import defer
from twisted.internet.interfaces import IProtocolFactory
from twisted.internet.endpoints import serverFromString
from zope.interface import implementer
try:
import GeoIP as _GeoIP
GeoIP = _GeoIP
except ImportError:
GeoIP = None
city = None
country = None
asn = None
# XXX probably better to depend on and use "six" for py2/3 stuff?
try:
unicode
except NameError:
py3k = True
basestring = str
else:
py3k = False
basestring = basestring
def create_geoip(fname):
# It's more "pythonic" to just wait for the exception,
# but GeoIP prints out "Can't open..." messages for you,
# which isn't desired here
if not os.path.isfile(fname):
raise IOError("Can't find %s" % fname)
if GeoIP is None:
return None
# just letting any errors make it out
return GeoIP.open(fname, GeoIP.GEOIP_STANDARD)
def maybe_create_db(path):
try:
return create_geoip(path)
except IOError:
return None
city, asn, country = list(map(maybe_create_db,
("/usr/share/GeoIP/GeoLiteCity.dat",
"/usr/share/GeoIP/GeoIPASNum.dat",
"/usr/share/GeoIP/GeoIP.dat")))
try:
import ipaddr as _ipaddr
ipaddr = _ipaddr
except ImportError:
ipaddr = None
def is_executable(path):
"""Checks if the given path points to an existing, executable file"""
return os.path.isfile(path) and os.access(path, os.X_OK)
def find_tor_binary(globs=('/usr/sbin/', '/usr/bin/',
'/Applications/TorBrowser_*.app/Contents/MacOS/'),
system_tor=True):
"""
Tries to find the tor executable using the shell first or in in the
paths whose glob-patterns is in the given 'globs'-tuple.
:param globs:
A tuple of shell-style globs of directories to use to find tor
(TODO consider making that globs to actual tor binary?)
:param system_tor:
This controls whether bash is used to seach for 'tor' or
not. If False, we skip that check and use only the 'globs'
tuple.
"""
# Try to find the tor executable using the shell
if system_tor:
try:
proc = subprocess.Popen(
('which tor'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True
)
except OSError:
pass
else:
stdout, _ = proc.communicate()
if proc.poll() == 0 and stdout != '':
return stdout.strip()
# the shell may not provide type and tor is usually not on PATH when using
# the browser-bundle. Look in specific places
for pattern in globs:
for path in glob.glob(pattern):
torbin = os.path.join(path, 'tor')
if is_executable(torbin):
return torbin
return None
def maybe_ip_addr(addr):
"""
Tries to return an IPAddress, otherwise returns a string.
TODO consider explicitly checking for .exit or .onion at the end?
"""
if ipaddr is not None:
try:
return ipaddr.IPAddress(addr)
except ValueError:
pass
return str(addr)
def find_keywords(args, key_filter=lambda x: not x.startswith("$")):
"""
This splits up strings like name=value, foo=bar into a dict. Does NOT deal
with quotes in value (e.g. key="value with space" will not work
By default, note that it takes OUT any key which starts with $ (i.e. a
single dollar sign) since for many use-cases the way Tor encodes nodes
with "$hash=name" looks like a keyword argument (but it isn't). If you
don't want this, override the "key_filter" argument to this method.
:return:
a dict of key->value (both strings) of all name=value type
keywords found in args.
"""
filtered = [x for x in args if '=' in x and key_filter(x.split('=')[0])]
return dict(x.split('=', 1) for x in filtered)
def delete_file_or_tree(*args):
"""
For every path in args, try to delete it as a file or a directory
tree. Ignores deletion errors.
"""
for f in args:
try:
os.unlink(f)
except OSError:
shutil.rmtree(f, ignore_errors=True)
def ip_from_int(ip):
""" Convert long int back to dotted quad string """
return socket.inet_ntoa(struct.pack('>I', ip))
def process_from_address(addr, port, torstate=None):
"""
Determines the PID from the address/port provided by using lsof
and returns it as an int (or None if it couldn't be
determined). In the special case the addr is '(Tor_internal)' then
the PID of the Tor process (as gotten from the torstate object) is
returned (or 0 if unavailable, e.g. a Tor which doesn't implement
'GETINFO process/pid'). In this case if no TorState instance is
given, None is returned.
"""
if addr is None:
return None
if "(tor_internal)" == str(addr).lower():
if torstate is None:
return None
return int(torstate.tor_pid)
proc = subprocess.Popen(['lsof', '-i', '4tcp@%s:%s' % (addr, port)],
stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
lines = stdout.split('\n')
if len(lines) > 1:
return int(lines[1].split()[1])
def hmac_sha256(key, msg):
"""
Adapted from rransom's tor-utils git repository. Returns the
digest (binary) of an HMAC with SHA256 over msg with key.
"""
return hmac.new(key, msg, hashlib.sha256).digest()
CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE = os.urandom(32)
def compare_via_hash(x, y):
"""
Taken from rransom's tor-utils git repository, to compare two
hashes in something resembling constant time (or at least, not
leaking timing info?)
"""
return (hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==
hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))
class NetLocation:
"""
Represents the location of an IP address, either city or country
level resolution depending on what GeoIP database was loaded. If
the ASN database is available you get that also.
"""
def __init__(self, ipaddr):
"ipaddr should be a dotted-quad"
self.ip = ipaddr
self.latlng = (None, None)
self.countrycode = None
self.city = None
self.asn = None
if self.ip is None or self.ip == 'unknown':
return
if city:
try:
r = city.record_by_addr(self.ip)
except:
r = None
if r is not None:
self.countrycode = r['country_code']
self.latlng = (r['latitude'], r['longitude'])
try:
self.city = (r['city'], r['region_code'])
except KeyError:
self.city = (r['city'], r['region_name'])
elif country:
self.countrycode = country.country_code_by_addr(ipaddr)
else:
self.countrycode = ''
if asn:
try:
self.asn = asn.org_by_addr(self.ip)
except:
self.asn = None
@implementer(IProtocolFactory)
class NoOpProtocolFactory:
"""
This is an IProtocolFactory that does nothing. Used for testing,
and for :method:`available_tcp_port`
"""
def noop(self, *args, **kw):
pass
buildProtocol = noop
doStart = noop
doStop = noop
@defer.inlineCallbacks
def available_tcp_port(reactor):
"""
Returns a Deferred firing an available TCP port on localhost.
It does so by listening on port 0; then stopListening and fires the
assigned port number.
"""
endpoint = serverFromString(reactor, 'tcp:0:interface=127.0.0.1')
port = yield endpoint.listen(NoOpProtocolFactory())
address = port.getHost()
yield port.stopListening()
defer.returnValue(address.port)
|
ghtdak/txtorcon
|
txtorcon/util.py
|
Python
|
mit
| 8,270
|
#!/usr/bin/env python3
import os
import re
import itertools
from functools import reduce
from .version import __version__
sep_regex = re.compile(r'[ \-_~!@#%$^&*\(\)\[\]\{\}/\:;"|,./?`]')
def get_portable_filename(filename):
path, _ = os.path.split(__file__)
filename = os.path.join(path, filename)
return filename
def load_conversion_file(filename):
filename = get_portable_filename(filename)
with open(filename, encoding='utf-8') as f:
l = list(f)
l = [i for i in l if i.strip()]
l = [i.strip().split() for i in l]
return {i[0]: i[1:] for i in l}
print('Loading converters...')
beginning = load_conversion_file('f2p-beginning.txt')
middle = load_conversion_file('f2p-middle.txt')
ending = load_conversion_file('f2p-ending.txt')
print('Loading persian word list...')
with open(get_portable_filename('persian-word-freq.txt'), encoding='utf-8') as f:
word_freq = list(f)
word_freq = [i.strip() for i in word_freq if i.strip()]
word_freq = [i.split() for i in word_freq if not i.startswith('#')]
word_freq = {i[0]: int(i[1]) for i in word_freq}
print('Loading dictionary...')
with open(get_portable_filename('f2p-dict.txt'), encoding='utf-8') as f:
dictionary = [i.strip().split(' ', 1) for i in f if i.strip()]
dictionary = {k.strip(): v.strip() for k, v in dictionary}
def f2p_word_internal(word, original_word):
# this function receives the word as separate letters
persian = []
for i, letter in enumerate(word):
if i == 0:
converter = beginning
elif i == len(word) - 1:
converter = ending
else:
converter = middle
conversions = converter.get(letter)
if conversions == None:
return [(''.join(original_word), 0.0)]
else:
conversions = ['' if i == 'nothing' else i for i in conversions]
persian.append(conversions)
alternatives = itertools.product(*persian)
alternatives = [''.join(i) for i in alternatives]
alternatives = [(i, word_freq[i]) if i in word_freq else (i, 0)
for i in alternatives]
if len(alternatives) > 0:
max_freq = max(freq for _, freq in alternatives)
alternatives = [(w, float(freq / max_freq)) if freq != 0 else (w, 0.0)
for w, freq in alternatives]
else:
alternatives = [(''.join(word), 1.0)]
return alternatives
def variations(word):
"""Create variations of the word based on letter combinations like oo,
sh, etc."""
if word == 'a':
return [['A']]
elif len(word) == 1:
return [[word[0]]]
elif word == 'aa':
return [['A']]
elif word == 'ee':
return [['i']]
elif word == 'ei':
return [['ei']]
elif word in ['oo', 'ou']:
return [['u']]
elif word == 'kha':
return [['kha'], ['kh', 'a']]
elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return [[word]]
elif word in ["'ee", "'ei"]:
return [["'i"]]
elif word in ["'oo", "'ou"]:
return [["'u"]]
elif word in ["a'", "e'", "o'", "i'", "u'", "A'"]:
return [[word[0] + "'"]]
elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [["'" + word[1]]]
elif len(word) == 2 and word[0] == word[1]:
return [[word[0]]]
if word[:2] == 'aa':
return [['A'] + i for i in variations(word[2:])]
elif word[:2] == 'ee':
return [['i'] + i for i in variations(word[2:])]
elif word[:2] in ['oo', 'ou']:
return [['u'] + i for i in variations(word[2:])]
elif word[:3] == 'kha':
return \
[['kha'] + i for i in variations(word[3:])] + \
[['kh', 'a'] + i for i in variations(word[3:])] + \
[['k', 'h', 'a'] + i for i in variations(word[3:])]
elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return \
[[word[:2]] + i for i in variations(word[2:])] + \
[[word[0]] + i for i in variations(word[1:])]
elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif word[:3] in ["'ee", "'ei"]:
return [["'i"] + i for i in variations(word[3:])]
elif word[:3] in ["'oo", "'ou"]:
return [["'u"] + i for i in variations(word[3:])]
elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif len(word) >= 2 and word[0] == word[1]:
return [[word[0]] + i for i in variations(word[2:])]
else:
return [[word[0]] + i for i in variations(word[1:])]
def f2p_word(word, max_word_size=15, cutoff=3):
"""Convert a single word from Finglish to Persian.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
"""
original_word = word
word = word.lower()
c = dictionary.get(word)
if c:
return [(c, 1.0)]
if word == '':
return []
elif len(word) > max_word_size:
return [(original_word, 1.0)]
results = []
for w in variations(word):
results.extend(f2p_word_internal(w, original_word))
# sort results based on the confidence value
results.sort(key=lambda r: r[1], reverse=True)
# return the top three results in order to cut down on the number
# of possibilities.
return results[:cutoff]
def f2p_list(phrase, max_word_size=15, cutoff=3):
"""Convert a phrase from Finglish to Persian.
phrase: The phrase to convert.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
Returns a list of lists, each sub-list contains a number of
possibilities for each word as a pair of (word, confidence)
values.
"""
# split the phrase into words
results = [w for w in sep_regex.split(phrase) if w]
# return an empty list if no words
if results == []:
return []
# convert each word separately
results = [f2p_word(w, max_word_size, cutoff) for w in results]
return results
def f2p(phrase, max_word_size=15, cutoff=3):
"""Convert a Finglish phrase to the most probable Persian phrase.
"""
results = f2p_list(phrase, max_word_size, cutoff)
return ' '.join(i[0][0] for i in results)
def main():
print('Finglish to Persian Converter, v{}'.format(__version__))
print('finglish: ', end='')
phrase = input()
result = f2p(phrase)
print(result)
if __name__ == '__main__':
main()
|
elektito/finglish
|
finglish/f2p.py
|
Python
|
mit
| 6,952
|
from PyQt4 import QtGui, QtCore
class Window(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.mapper = QtCore.QSignalMapper(self)
self.toolbar = self.addToolBar('Foo')
self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)
for text in 'One Two Three'.split():
action = QtGui.QAction(text, self)
self.mapper.setMapping(action, text)
action.triggered.connect(self.mapper.map)
self.toolbar.addAction(action)
self.mapper.mapped['QString'].connect(self.handleButton)
self.edit = QtGui.QLineEdit(self)
self.setCentralWidget(self.edit)
def handleButton(self, identifier):
if identifier == 'One':
text = 'Do This'
elif identifier == 'Two':
text = 'Do That'
elif identifier == 'Three':
text = 'Do Other'
self.edit.setText(text)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.resize(300, 60)
window.show()
sys.exit(app.exec_())
|
Schizo/MediaBrowser
|
python/Temp/sandboxSignalMapper.py
|
Python
|
mit
| 1,120
|
from pydons import MatStruct, FileBrowser, LazyDataset
import netCDF4
import numpy as np
import tempfile
import os
DATADIR = os.path.join(os.path.dirname(__file__), 'data')
def test_netcdf4():
d = MatStruct()
data1 = np.random.rand(np.random.randint(1, 1000))
with tempfile.NamedTemporaryFile(suffix=".nc") as tmpf:
fh = netCDF4.Dataset(tmpf.name, mode='w')
grp = fh.createGroup('mygroup')
dim1 = grp.createDimension('dim1')
var1 = grp.createVariable('var1', data1.dtype.str, (dim1.name, ))
var1[:] = data1
fh.close()
dd = FileBrowser(tmpf.name)
assert 'mygroup' in dd
assert 'var1' in dd.mygroup
assert np.all(dd.mygroup.var1[:] == data1)
|
coobas/pydons
|
tests/test_file_browser_netcdf4.py
|
Python
|
mit
| 740
|
from __future__ import print_function
from __future__ import unicode_literals
import re
import time
import socket
from netmiko.cisco_base_connection import CiscoSSHConnection
class HPProcurveSSH(CiscoSSHConnection):
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Procurve uses - 'Press any key to continue'
"""
delay_factor = self.select_delay_factor(delay_factor=0)
time.sleep(2 * delay_factor)
self.write_channel("\n")
time.sleep(2 * delay_factor)
self.write_channel("\n")
time.sleep(2 * delay_factor)
# HP output contains VT100 escape codes
self.ansi_escape_codes = True
self.set_base_prompt()
self.disable_paging(command="\nno page\n")
self.set_terminal_width(command='terminal width 511')
def enable(self, cmd='enable', pattern='password', re_flags=re.IGNORECASE,
default_username='manager'):
"""Enter enable mode"""
debug = False
output = self.send_command_timing(cmd)
if 'username' in output.lower():
output += self.send_command_timing(default_username)
if 'password' in output.lower():
output += self.send_command_timing(self.secret)
if debug:
print(output)
self.clear_buffer()
return output
def cleanup(self):
"""Gracefully exit the SSH session."""
self.exit_config_mode()
self.write_channel("logout\n")
count = 0
while count <= 5:
time.sleep(.5)
output = self.read_channel()
if 'Do you want to log out' in output:
self.write_channel("y\n")
# Don't automatically save the config (user's responsibility)
elif 'Do you want to save the current' in output:
self.write_channel("n\n")
try:
self.write_channel("\n")
except socket.error:
break
count += 1
|
shamanu4/netmiko
|
netmiko/hp/hp_procurve_ssh.py
|
Python
|
mit
| 2,061
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._subscriptions_operations import build_check_zone_peers_request, build_get_request, build_list_locations_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionsOperations:
"""SubscriptionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.subscriptions.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_locations(
self,
subscription_id: str,
**kwargs: Any
) -> AsyncIterable["_models.LocationListResult"]:
"""Gets all available geo-locations.
This operation provides all the locations that are available for resource providers; however,
each resource provider may support a subset of this list.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.LocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_locations_request(
subscription_id=subscription_id,
template_url=self.list_locations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_locations_request(
subscription_id=subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("LocationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} # type: ignore
@distributed_trace_async
async def get(
self,
subscription_id: str,
**kwargs: Any
) -> "_models.Subscription":
"""Gets details about a specified subscription.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subscription, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.Subscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.SubscriptionListResult"]:
"""Gets all subscriptions for a tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubscriptionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.SubscriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SubscriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions'} # type: ignore
@distributed_trace_async
async def check_zone_peers(
self,
subscription_id: str,
parameters: "_models.CheckZonePeersRequest",
**kwargs: Any
) -> "_models.CheckZonePeersResult":
"""Compares a subscriptions logical zone mapping.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param parameters: Parameters for checking zone peers.
:type parameters: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.CheckZonePeersRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckZonePeersResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.CheckZonePeersResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckZonePeersResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'CheckZonePeersRequest')
request = build_check_zone_peers_request(
subscription_id=subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_zone_peers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckZonePeersResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_zone_peers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/checkZonePeers/'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2018_06_01/aio/operations/_subscriptions_operations.py
|
Python
|
mit
| 12,046
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.LabServices/labs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labName": _SERIALIZER.url("lab_name", lab_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
lab_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labName": _SERIALIZER.url("lab_name", lab_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
lab_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labName": _SERIALIZER.url("lab_name", lab_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labName": _SERIALIZER.url("lab_name", lab_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_publish_request_initial(
subscription_id: str,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/publish')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labName": _SERIALIZER.url("lab_name", lab_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_sync_group_request_initial(
subscription_id: str,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/syncGroup')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labName": _SERIALIZER.url("lab_name", lab_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class LabsOperations(object):
"""LabsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.labservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_subscription(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PagedLabs"]:
"""Get all labs for a subscription.
Returns a list of all labs for a subscription.
:param filter: The filter to apply to the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PagedLabs or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.labservices.models.PagedLabs]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedLabs"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PagedLabs", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.LabServices/labs'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.PagedLabs"]:
"""Get all labs for a subscription and resource group.
Returns a list of all labs in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PagedLabs or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.labservices.models.PagedLabs]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedLabs"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PagedLabs", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> "_models.Lab":
"""Get a lab resource.
Returns the properties of a lab resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Lab, or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Lab
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Lab"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Lab', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
lab_name: str,
body: "_models.Lab",
**kwargs: Any
) -> "_models.Lab":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Lab"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'Lab')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Lab', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Lab', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Lab', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
lab_name: str,
body: "_models.Lab",
**kwargs: Any
) -> LROPoller["_models.Lab"]:
"""Create or update a lab resource.
Operation to create or update a lab resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:param body: The request body.
:type body: ~azure.mgmt.labservices.models.Lab
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Lab or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.labservices.models.Lab]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Lab"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Lab', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'original-uri'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
lab_name: str,
body: "_models.LabUpdate",
**kwargs: Any
) -> "_models.Lab":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Lab"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'LabUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Lab', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Lab', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
lab_name: str,
body: "_models.LabUpdate",
**kwargs: Any
) -> LROPoller["_models.Lab"]:
"""Update a lab resource.
Operation to update a lab resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:param body: The request body.
:type body: ~azure.mgmt.labservices.models.LabUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Lab or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.labservices.models.Lab]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Lab"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Lab', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a lab resource.
Operation to delete a lab resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}'} # type: ignore
def _publish_initial(
self,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_publish_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
template_url=self._publish_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_publish_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/publish'} # type: ignore
@distributed_trace
def begin_publish(
self,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Publish or re-publish a lab.
Publish or re-publish a lab. This will create or update all lab resources, such as virtual
machines.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._publish_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_publish.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/publish'} # type: ignore
def _sync_group_initial(
self,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_sync_group_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
template_url=self._sync_group_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_sync_group_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/syncGroup'} # type: ignore
@distributed_trace
def begin_sync_group(
self,
resource_group_name: str,
lab_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Manually sync the lab group.
Action used to manually kick off an AAD group sync job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._sync_group_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_sync_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/syncGroup'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/operations/_labs_operations.py
|
Python
|
mit
| 45,851
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal as D
class NexmoResponse(object):
"""A convenient wrapper to manipulate the Nexmo json response.
The class makes it easy to retrieve information about sent messages, total
price, etc.
Example::
>>> response = nexmo.send_sms(frm, to, txt)
>>> print response.total_price
0.15
>>> print response.remaining_balance
1.00
>>> print response.message_count:
3
>>> for message in response.messages:
... print message.message_id, message.message_price
00000124 0.05
00000125 0.05
00000126 0.05
The class only handles successfull responses, since errors raise
exceptions in the :class:`~Nexmo` class.
"""
def __init__(self, json_data):
self.messages = [NexmoMessage(data) for data in json_data['messages']]
self.message_count = len(self.messages)
self.total_price = sum(msg.message_price for msg in self.messages)
self.remaining_balance = min(msg.remaining_balance for msg in self.messages)
class NexmoMessage(object):
"""A wrapper to manipulate a single `message` entry in a Nexmo response.
When a text messages is sent in several parts, Nexmo will return a status
for each and everyone of them.
The class does nothing more than simply wrapping the json data for easy
access.
"""
def __init__(self, json_data):
data = {
'to': json_data['to'],
'message_id': json_data['message-id'],
'status': int(json_data['status']),
'remaining_balance': D(json_data['remaining-balance']),
'message_price': D(json_data['message-price']),
'network': json_data['network']
}
self.__dict__.update(data)
|
thibault/libnexmo
|
libnexmo/response.py
|
Python
|
mit
| 1,868
|
# -*- coding:utf-8 -*-
from ...errors.httpbadrequestexception import HttpBadRequestException
import saklient
# module saklient.cloud.errors.mustbeofsamezoneexception
class MustBeOfSameZoneException(HttpBadRequestException):
## 不適切な要求です。参照するリソースは同一ゾーンに存在しなければなりません。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(MustBeOfSameZoneException, self).__init__(status, code, "不適切な要求です。参照するリソースは同一ゾーンに存在しなければなりません。" if message is None or message == "" else message)
|
hnakamur/saklient.python
|
saklient/cloud/errors/mustbeofsamezoneexception.py
|
Python
|
mit
| 731
|
import math
import pandas as pd
import numpy as np
from scipy import misc
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
# Look pretty...
# matplotlib.style.use('ggplot')
plt.style.use('ggplot')
def Plot2D(T, title, x, y):
# This method picks a bunch of random samples (images in your case)
# to plot onto the chart:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_xlabel('Component: {0}'.format(x))
ax.set_ylabel('Component: {0}'.format(y))
ax.scatter(T[:,x],T[:,y], marker='.',alpha=0.7)
from os import listdir
file_path = "/Users/szabolcs/dev/git/DAT210x/Module4/Datasets/ALOI/32/"
#
# TODO: Start by creating a regular old, plain, "vanilla"
# python list. You can call it 'samples'.
#
file_names = listdir(file_path)
samples = []
#
# TODO: Write a for-loop that iterates over the images in the
# Module4/Datasets/ALOI/32/ folder, appending each of them to
# your list. Each .PNG image should first be loaded into a
# temporary NDArray, just as shown in the Feature
# Representation reading.
for file_name in file_names:
pic = misc.imread(file_path + file_name)
ser = [item for sublist in pic for item in sublist]
pic = pd.Series(ser)
#pic = pic[::2, ::2]
pic = pic.values.reshape(-1, 3)
samples.append(pic)
#
# Optional: Resample the image down by a factor of two if you
# have a slower computer. You can also convert the image from
# 0-255 to 0.0-1.0 if you'd like, but that will have no
# effect on the algorithm's results.
#
df = pd.DataFrame.from_records(samples)
print(df.shape)
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
for i in range(num_images):
df.loc[i,:] = df.loc[i,:].values.reshape(num_pixels, num_pixels).T.reshape(-1)
print(df.shape)
#df.iloc[0] = pd.to_numeric(df.iloc[0], errors="coerce")
#print(df.dtypes)
#
# TODO: Once you're done answering the first three questions,
# right before you converted your list to a dataframe, add in
# additional code which also appends to your list the images
# in the Module4/Datasets/ALOI/32_i directory. Re-run your
# assignment and answer the final question below.
#
# .. your code here ..
#
# TODO: Convert the list to a dataframe
#
# .. your code here ..
#
# TODO: Implement Isomap here. Reduce the dataframe df down
# to three components, using K=6 for your neighborhood size
#
from sklearn.manifold import Isomap
imap = Isomap(n_components=2, n_neighbors=6)
imap.fit(df)
df_imap = imap.transform(df)
Plot2D(df_imap, "Isomap", 0, 1)
#
# TODO: Create a 2D Scatter plot to graph your manifold. You
# can use either 'o' or '.' as your marker. Graph the first two
# isomap components
#
# .. your code here ..
#
# TODO: Create a 3D Scatter plot to graph your manifold. You
# can use either 'o' or '.' as your marker:
#
# .. your code here ..
plt.show()
|
szigyi/DAT210x
|
Module4/assignment5.py
|
Python
|
mit
| 2,886
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Agency.alias'
db.add_column('lobbyingph_agency', 'alias',
self.gf('django.db.models.fields.CharField')(default='', max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Agency.alias'
db.delete_column('lobbyingph_agency', 'alias')
models = {
'lobbyingph.agency': {
'Meta': {'ordering': "['name']", 'object_name': 'Agency'},
'alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.article': {
'Meta': {'ordering': "['-date']", 'object_name': 'Article'},
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 10, 19, 0, 0)'}),
'headline': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'quote': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'lobbyingph.bill': {
'Meta': {'ordering': "['number']", 'object_name': 'Bill'},
'bill_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'default': "'http://legislation.phila.gov/detailreport/?key='", 'max_length': '200'})
},
'lobbyingph.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.communication_method': {
'Meta': {'ordering': "['name']", 'object_name': 'Communication_Method'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.exp_direct_comm': {
'Meta': {'object_name': 'Exp_Direct_Comm'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Category']"}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'officials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Official']", 'null': 'True', 'blank': 'True'}),
'other_desc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'lobbyingph.exp_indirect_comm': {
'Meta': {'object_name': 'Exp_Indirect_Comm'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Category']"}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Receipent_Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'methods': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Communication_Method']", 'null': 'True', 'blank': 'True'}),
'officials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Official']", 'null': 'True', 'blank': 'True'}),
'other_desc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.SmallIntegerField', [], {})
},
'lobbyingph.exp_other': {
'Meta': {'object_name': 'Exp_Other'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'official': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Official']", 'null': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Principal']", 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'})
},
'lobbyingph.filing': {
'Meta': {'object_name': 'Filing'},
'corrected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'firms': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Firm']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Lobbyist']", 'null': 'True', 'blank': 'True'}),
'principal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Principal']", 'null': 'True', 'blank': 'True'}),
'quarter': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'total_exp_direct_comm': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'total_exp_indirect_comm': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'total_exp_other': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'year': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'})
},
'lobbyingph.firm': {
'Meta': {'ordering': "['name']", 'object_name': 'Firm'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.issue': {
'Meta': {'ordering': "['description']", 'object_name': 'Issue'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'detail_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lobbyingph.lobbyist': {
'Meta': {'ordering': "['name']", 'object_name': 'Lobbyist'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'firm': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Firm']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'principals': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Principal']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.official': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Official'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'lobbyingph.principal': {
'Meta': {'ordering': "['name']", 'object_name': 'Principal'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.receipent_group': {
'Meta': {'ordering': "['name']", 'object_name': 'Receipent_Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.source': {
'Meta': {'ordering': "['name']", 'object_name': 'Source'},
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['lobbyingph']
|
AxisPhilly/lobbying.ph-django
|
lobbyingph/migrations/0030_auto__add_field_agency_alias.py
|
Python
|
mit
| 14,317
|
import random
import math
def estimatePi(error):
itt=1000 #itterations
previousPI=0
while True:
hits=0 # number of hits
for i in range(0,itt):
x=random.uniform(0,1)
y=random.uniform(0,1)
z=x*x+y*y #Pythagorean Theorum
if math.sqrt(z)<=1: #if point(x,y)lies within the triangle
hits=hits+1
currentPI=(hits*4)/itt
#print(currentPI)
if previousPI==0:
previousPI=currentPI
continue
if (math.fabs(previousPI-currentPI)<error) is True:
return currentPI#return The estimation of pi is 4*hits/shots
previousPI=(currentPI+previousPI)/2
#previousPI=currentPI
error=float(input("Enter the error value :"))
pi = estimatePi(error)
print("Pi : ",pi)
|
Alexgeni/Some-implementations
|
EstimationOfPi.py
|
Python
|
mit
| 681
|
"""
Load winds on pressure levels and calculate vorticity and divergence
"""
import os, sys
import datetime
import iris
import iris.unit as unit
diag = '30201'
cube_name_u='eastward_wind'
cube_name_v='northward_wind'
pp_file_path='/projects/cascade/pwille/moose_retrievals/'
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq'] # All minus large 3
experiment_ids = ['djznw']
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
#fu = '/projects/cascade/pwille/moose_retrievals/%s/%s/%s.pp' % (expmin1, experiment_id, diag)
try:
print 'hello'
|
peterwilletts24/Monsoon-Python-Scripts
|
vort_and_div/vorticity_and_diverg.py
|
Python
|
mit
| 613
|
import time
import asyncio
from aiokafka import AIOKafkaProducer
from settings import KAFKA_SERVERS, SAVEPOINT, LOG_FILE, KAFKA_TOPIC
class LogStreamer:
def __init__(self,
KAFKA_SERVERS,
KAFKA_TOPIC,
loop,
savepoint_file,
log_file):
self.KAFKA_TOPIC = KAFKA_TOPIC
self.loop = loop
self.producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=KAFKA_SERVERS)
self.savepoint_file = savepoint_file
self.log_file = log_file
async def produce(self, finite=False):
last = self.savepoint_file.read()
if last:
self.log_file.seek(int(last))
skip_first_empty = True
while True:
line = self.log_file.readline()
line = line.strip(' \t\n\r')
if not line:
if finite and not skip_first_empty:
return
skip_first_empty = False
time.sleep(0.1)
current_position = self.log_file.tell()
if last != current_position:
self.savepoint_file.seek(0)
self.savepoint_file.write(str(current_position))
continue
'''
Here we can convert our data to JSON. But I because JSON performance is not extremely good
with standart libraries, and because we use asynchronous non-blocking model here, I think it's
best to just pass data as is. I want to create as little as possible overhead here. We want to
stream data as fast as possible.
'''
await self.producer.send_and_wait(self.KAFKA_TOPIC, line.encode())
def start(self):
self.loop.run_until_complete(self.producer.start())
self.loop.run_until_complete(self.produce())
self.loop.run_until_complete(self.producer.stop())
self.loop.close()
if __name__ == '__main__':
with open(SAVEPOINT, 'r+') as savepoint_file, open(LOG_FILE, 'r') as log_file:
streamer = LogStreamer(KAFKA_SERVERS,
KAFKA_TOPIC,
asyncio.get_event_loop(),
savepoint_file,
log_file)
streamer.start()
|
artyomboyko/log-analysis
|
log_reader.py
|
Python
|
mit
| 2,330
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-03 02:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0013_auto_20161002_0504'),
]
operations = [
migrations.CreateModel(
name='SignUpLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('referer', models.CharField(max_length=255, null=True)),
('ip', models.CharField(max_length=255, null=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
sbuss/voteswap
|
users/migrations/0014_signuplog.py
|
Python
|
mit
| 928
|
#!/usr/bin/env python
import copy
from cStringIO import StringIO
from fnmatch import fnmatch
import gzip
import hashlib
import mimetypes
import os
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
import app_config
GZIP_FILE_TYPES = ['.html', '.js', '.json', '.css', '.xml']
class FakeTime:
def time(self):
return 1261130520.0
# Hack to override gzip's time implementation
# See: http://stackoverflow.com/questions/264224/setting-the-gzip-timestamp-from-python
gzip.time = FakeTime()
def deploy_file(connection, src, dst, headers={}):
"""
Deploy a single file to S3, if the local version is different.
"""
bucket = connection.get_bucket(app_config.S3_BUCKET['bucket_name'])
k = bucket.get_key(dst)
s3_md5 = None
if k:
s3_md5 = k.etag.strip('"')
else:
k = Key(bucket)
k.key = dst
file_headers = copy.copy(headers)
if 'Content-Type' not in headers:
file_headers['Content-Type'] = mimetypes.guess_type(src)[0]
# Gzip file
if os.path.splitext(src)[1].lower() in GZIP_FILE_TYPES:
file_headers['Content-Encoding'] = 'gzip'
with open(src, 'rb') as f_in:
contents = f_in.read()
output = StringIO()
f_out = gzip.GzipFile(filename=dst, mode='wb', fileobj=output)
f_out.write(contents)
f_out.close()
local_md5 = hashlib.md5()
local_md5.update(output.getvalue())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
print 'Skipping %s (has not changed)' % src
else:
print 'Uploading %s --> %s (gzipped)' % (src, dst)
k.set_contents_from_string(output.getvalue(), file_headers, policy='public-read')
# Non-gzip file
else:
with open(src, 'rb') as f:
local_md5 = hashlib.md5()
local_md5.update(f.read())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
print 'Skipping %s (has not changed)' % src
else:
print 'Uploading %s --> %s' % (src, dst)
k.set_contents_from_filename(src, file_headers, policy='public-read')
def deploy_folder(src, dst, headers={}, ignore=[]):
"""
Deploy a folder to S3, checking each file to see if it has changed.
"""
to_deploy = []
for local_path, subdirs, filenames in os.walk(src, topdown=True):
rel_path = os.path.relpath(local_path, src)
for name in filenames:
if name.startswith('.'):
continue
src_path = os.path.join(local_path, name)
skip = False
for pattern in ignore:
if fnmatch(src_path, pattern):
skip = True
break
if skip:
continue
if rel_path == '.':
dst_path = os.path.join(dst, name)
else:
dst_path = os.path.join(dst, rel_path, name)
to_deploy.append((src_path, dst_path))
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
for src, dst in to_deploy:
deploy_file(s3, src, dst, headers)
def delete_folder(dst):
"""
Delete a folder from S3.
"""
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
bucket = s3.get_bucket(app_config.S3_BUCKET['bucket_name'])
for key in bucket.list(prefix='%s/' % dst):
print 'Deleting %s' % (key.key)
key.delete()
|
stlpublicradio/ferguson-project
|
fabfile/flat.py
|
Python
|
mit
| 3,519
|
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.keras import Input
from tensorflow.python.keras import Model
from tensorflow.python.keras import Sequential
from tensorflow.tools.api.generator.api.keras import activations
from tensorflow.tools.api.generator.api.keras import applications
from tensorflow.tools.api.generator.api.keras import backend
from tensorflow.tools.api.generator.api.keras import callbacks
from tensorflow.tools.api.generator.api.keras import constraints
from tensorflow.tools.api.generator.api.keras import datasets
from tensorflow.tools.api.generator.api.keras import estimator
from tensorflow.tools.api.generator.api.keras import initializers
from tensorflow.tools.api.generator.api.keras import layers
from tensorflow.tools.api.generator.api.keras import losses
from tensorflow.tools.api.generator.api.keras import metrics
from tensorflow.tools.api.generator.api.keras import models
from tensorflow.tools.api.generator.api.keras import optimizers
from tensorflow.tools.api.generator.api.keras import preprocessing
from tensorflow.tools.api.generator.api.keras import regularizers
from tensorflow.tools.api.generator.api.keras import utils
from tensorflow.tools.api.generator.api.keras import wrappers
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/__init__.py
|
Python
|
mit
| 1,351
|
#!/usr/bin/env python3
# Copyright (c) 2017 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import AuroracoinTestFramework
from test_framework.util import assert_equal
class ListSinceBlockTest (AuroracoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def run_test (self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
assert_equal(self.is_network_split, False)
self.nodes[2].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
# Split network into two
self.split_network()
assert_equal(self.is_network_split, True)
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
print('lastblockhash=%s' % (lastblockhash))
self.sync_all()
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert_equal(found, True)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
aurarad/auroracoin
|
qa/rpc-tests/listsinceblock.py
|
Python
|
mit
| 2,579
|
# Adrian deWynter, 2017
# Implementation of various algorithms
# applied to strings
# Given a long string find the greater
# number that is also a palindrome.
def nextPalindrome(S):
def isPalindrome(x): return x == x[::-1]
while True:
S = S + 1
if isPalindrome(S): return S
# Given two words A,B find if A = rot(B)
def isRotation(A,B):
return B in A+A
# Print all possible combinations of a certain
# s \in {0,1}^* for a given wildcard (*)
def wS(s,i):
if i == len(s):
print "".join(s)
else:
if s[i] == "*":
s[i] = "1"
wS(s,i+1)
s[i] = "0"
wS(s,i+1)
else:
wS(s,i+1)
def allNonRepeatingWordsInTwoSentences(a,b):
# Hash a, hash b, print differences. O(a + b)
d = {}
def insertHash(x):
if x not in d:
d[x] = 1
else:
d[x] = d[x] + 1
for c in a:
insertHash(c.split(" "))
for c in b:
insertHash(c.split(" "))
ans = []
for k,v in d:
if d[k] > 1:
ans.append(d[k])
ans.append(" ")
print "".join(ans[:-1])
# Split a string into the minimum number of substrings
# such that each substring is a palindrome.
# This doesn't really work.
# Instead maintain an array:
# mincuts[i] = min cuts until i in S
# ispalindrome[i][j]
def minSubPalindromes(S):
p = []
M = [[None for _ in range(len(S))] for _ in range(len(S))]
for i in range(1,len(S)):
for i in range(1,len(S)):
if S[i] == S[j]:
M[i][j] = max(M[i-1][j-1], M[i-1][j], M[i][j - 1]) + 1
else:
M[i][j] = max(M[i-1][j-1], M[i-1][j], M[i][j - 1]) + 1
print M[-1][-1]
# Longest word made of words.
# I have no idea what it does.
def longestWordsMadeOfWords(W):
# First method
W.sort()
W=W[::-1]
i = 0
def splitWord(w):
ans = []
for i in range(1,len(w)):
ans.append( (w[:i], w[i:] ))
return ans
while i < len(W):
w = W[i]
for a,b in splitWord(w):
if a not in W or b not in W:
i = i + 1
break
return w
# Find smallest window if a string A containing all
# characters of another string B
def smallestWindow(A,B):
M = [[0 for _ in range(len(A))] for _ in range(len(B))]
M[0] = [1 if B[0] == A[i] else 0 for i in range(len(A))]
for i in range(len(B)): M[i][0] = 1 if A[0] == B[i] else 0
for i in range(1,len(A)):
for j in range(1,len(B)):
if A[i] == A[j]:
M[i][j] = max(1, M[i-1][j-1],M[i-1][j],M[i][j-1])
if M[-1][-1] == len(B): return 1
# Alphabetical order:
def alienAlphabet(A):
node = None
def insertNode(node,v):
node_ = Node()
node_.value = v
node_.next = None
node.next = node_
for k,v in A:
node = Node()
node.value = k[0]
for c in range(1,len(k)):
if node.value != k[c]:
node_ = node
while node.next is not None:
if node.value == k[c]:
break
else:
if node.next.value != k[c]:
insertNode(node,k[c])
node = node.next
if node.next is None and node.value != k[c]:
insertNode(node,k[c])
while node.next is not None: print node.value
# Find minimum nnumber of operations that can
# be performed to turn s1 into s2
def minNum(s1,s2):
def levensheinDistance(s1,s2,ls1=len(s1),ls2=len(s2)):
if ls1 == 0: return ls2
if ls2 == 0: return ls1
if s1[ls1-1] == s2[ls2-1]:
cost = 0
else:
cost = 1
return min(
levensheinDistance(s1,s2,ls1-1,ls2) + 1,
levensheinDistance(s1,s2,ls1,ls2-1) + 1,
levensheinDistance(s1,s2,ls1-1,ls2-1) + cost)
return levensheinDistance(s1,s2)
# Dynamic programming approach:
M = [[0 for _ in s1] for _ in s2]
for i in range(1,len(s1)):
for j in range(1,len(s2)):
if s1[i] != s2[j]:
M[i][j] = max(M[i-1][j],M[i][j-1],M[i-1][j-1])
print M[-1][-1]
# Find all positions where the anagram of a substring
# S exists in A
# Complexity: O(A + S)
def needleHaystack(S,A):
indexes = []
T = sufixTree(A)
i = 0
while i < len(S):
k = T.findSubstring(S)
if k = len(S): indexes.append(k)
S = getNextAnagram(S)
return indexes
left,right = 0,0
count = len(S)
indexes = []
dic = {}
for c in S:
if c in S:
dic[c] = dic[c] + 1
else:
dic[c] = 0
while right < len(A):
right = right + 1
if A[right] in dic and A[right] >= 0:
A[right] = A[right] - 1
count = count -1
if count == 0: indexes.append(left)
left = left + 1
if right - left == len(S) and left in A and A[left] >= 0:
A[left] = A[left] + 1
count = count + 1
return indexes
|
adewynter/Tools
|
Algorithms/stringOps.py
|
Python
|
mit
| 4,317
|
import numpy as np;
np.set_printoptions(linewidth=40, precision=5, suppress=True)
import pandas as pd; pd.options.display.max_rows=80;pd.options.display.expand_frame_repr=False;pd.options.display.max_columns=20
import pylab as plt;
import os; home=os.path.expanduser('~') +'/'
import sys;sys.path.insert(1,'/home/arya/workspace/bio/')
from CLEAR.Libs.Markov import Markov
import Utils.Util as utl
import Utils.Simulation as Simulation
import matplotlib as mpl
import seaborn as sns
import Utils.Plots as pplt
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 56});
mpl.rc('text', usetex=True)
sns.set_style("whitegrid", {"grid.color": "1", 'axes.linewidth': .5, "grid.linewidth": ".09"})
subptitle = list('ABCDEFGHI')
def createSelectionSimulations(s=0.1,maxGen=100):
def runSim(i):
try:
sim = Simulation.Simulation(maxGeneration=maxGen, generationStep=1, s=s, foldInitialAFs=False,
ModelName='Markov', L=1000, numReplicates=1,
makeSureSelectedSiteDontGetLost=False, experimentID=0)
x=sim.X[:,sim.siteUnderSelection,0]
except:
x=np.zeros(sim.X.shape[0])
x[:]=None
if not i%1000: print s, i
return x
X=map(runSim,range(10000))
a=pd.DataFrame(X)
a.to_pickle(utl.outpath + 'markov/T{}.S{:03.0f}.obs.df'.format(maxGen, s * 1000))
print 'done!'
def plotNull(subp, nu0=0.005, fontsize=5):
obs = pd.read_pickle(utl.outpath + 'markov/neutral.obs.{}.pkl'.format(nu0))
T = Markov.computeTransition(0, N=1000)
dfplt = pd.concat([pd.Series({'scale': 10, 'xlim': [0.0, 0.01], 'ylim': [0, 1]}, name=(0.005, 1)),
pd.Series({'scale': 30, 'xlim': [0.06, 0.14], 'ylim': [0, 0.15]}, name=(0.1, 1)),
pd.Series({'scale': 30, 'xlim': [0.0, 0.015], 'ylim': [0, 0.3]}, name=(0.005, 10)),
pd.Series({'scale': 45, 'xlim': [0.0, 0.2], 'ylim': [0, 0.025]}, name=(0.1, 10)),
pd.Series({'scale':30, 'xlim':[0.0,0.03],'ylim': [0,0.2]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
xx=np.arange(0,1,0.00001)
N=200; tau=1;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx);
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.subplot(3, 3, subp[0]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[1].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[1].flatten(),bins=500,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*4)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0)$')
tau=10
for _ in range(9):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[10].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[10].flatten(),bins=100,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*20)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
tau=100
for _ in range(90):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2]);
brownian.plot(color='r');
markov.plot(color='b')
o=pd.Series(obs.X[100].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[100].flatten(),bins=30,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*60)
o.name = 'Observation';
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
if subp[2] == 3:
plt.legend(loc='center right', fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[2] - 1]), fontsize=fontsize)
def plotAlternative(subp, s=0.1, fontsize=5):
nu0=0.005
obs = pd.read_pickle(utl.outpath + 'markov/T100.S{:03.0f}.obs.df'.format(s * 1000))
T = Markov.computeTransition(s, 1000)
dfplt= pd.concat([pd.Series({'scale':10, 'xlim':[0.0,0.01],'ylim': [0,0.2]},name=(0.005,1)),pd.Series({'scale':30, 'xlim':[0.06,0.14],'ylim': [0,0.15]},name=(0.1,1)),
pd.Series({'scale':30, 'xlim':[0.0,0.015],'ylim': [0,0.15]},name=(0.005,10)),pd.Series({'scale':45, 'xlim':[0.0,0.2],'ylim': [0,0.025]},name=(0.1,10)),
pd.Series({'scale':30, 'xlim':[0.0,1],'ylim': [0,0.01]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
plt.subplot(3, 3, subp[0])
tau=1
o=(obs[1].value_counts().sort_index()/obs.shape[0])
o.loc[0.0055]=0.1211
o.index=o.index-0.0005/2
markov.plot(color='b');
o.plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0,s)$')
plt.xlabel('$s$')
tau=10
for _ in range(9):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1])
markov.plot(color='b');
(obs[10].value_counts().sort_index() / obs.shape[0]).plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
tau=100
for _ in range(90):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2])
counts,limits=np.histogram(obs[100].values,bins=50,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/obs.shape[0]
o/=35
o.loc[0.0] = o.iloc[0]
o = o.sort_index()
o.iloc[1] = o.iloc[2]
# o=(obs[100].value_counts().sort_index()/obs.shape[0])
o.name = 'Observation';
o.plot(color='g');
markov.plot(color='b');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[2] - 1]), fontsize=fontsize)
if __name__ == '__main__':
# createNeutralSimulations()
# createSelectionSimulations(s=0.01)
# createSelectionSimulations(s=0.1)
reload(pplt)
dpi = 200;
fig = plt.figure(figsize=(6.2, 4), dpi=dpi);
pplt.setStyle(lw=1);
fontsize = 7
plotNull(range(1, 4), fontsize=fontsize);
plotNull(range(4, 7), 0.1, fontsize=fontsize);
plotAlternative(range(7, 10), fontsize=fontsize);
plt.tight_layout()
pplt.savefig('markovDists', dpi=dpi);
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
print 'Done'
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Plot/Markov.py
|
Python
|
mit
| 8,854
|
#! /usr/bin/env python
class Duck:
"""
this class implies a new way to express polymorphism using duck typing.
This class has 2 functions: quack() and fly() consisting no parameter.
"""
def quack(self):
print("Quack, quack!");
def fly(self):
print("Flap, Flap!");
class Person:
def quack(self):
print("I'm Quackin'!");
def fly(self):
print("I'm Flyin'!");
def in_the_forest(mallard):
""" This function is used for express polymorphism behavior except inheritance """
mallard.quack()
mallard.fly()
duck = Duck()
person = Person()
# passing object to in_the_forest() function
in_the_forest(Duck())
in_the_forest(Person())
|
IPVL/Tanvin-PythonWorks
|
pythonOOP/codes/duck_test.py
|
Python
|
mit
| 722
|
"""
This module defines serializers for the main API data objects:
.. autosummary::
:nosignatures:
DimensionSerializer
FilterSerializer
MessageSerializer
QuestionSerializer
"""
from django.core.paginator import Paginator
from rest_framework import serializers, pagination
import emoticonvis.apps.corpus.models as corpus_models
import emoticonvis.apps.enhance.models as enhance_models
from django.contrib.auth.models import User
# A simple string field that looks up dimensions on deserialization
class MessageSerializer(serializers.ModelSerializer):
"""
JSON representation of :class:`.Message`
objects for the API.
Messages are provided in a simple format that is useful for displaying
examples:
::
{
"id": 52,
"dataset": 2,
"text": "Some sort of thing or other",
"sender": {
"id": 2,
"dataset": 1
"original_id": 2568434,
"username": "my_name",
"full_name": "My Name"
},
"time": "2010-02-25T00:23:53Z"
}
Additional fields may be added later.
"""
class Meta:
model = corpus_models.Message
fields = ('id', 'dataset', 'text', )
class UserSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
return instance.username
class Meta:
model = User
fields = ('username', )
class FeatureVectorSerializer(serializers.Serializer):
message = MessageSerializer()
tokens = serializers.ListField()
feature_vector = serializers.ListField(child=serializers.DictField())
class FeatureCodeDistributionSerializer(serializers.Serializer):
feature_index = serializers.IntegerField()
feature_text = serializers.CharField()
distribution = serializers.ListField(child=serializers.DictField())
class SVMResultSerializer(serializers.Serializer):
results = serializers.DictField()
messages = serializers.ListField(child=FeatureVectorSerializer(), required=True)
class FeatureSerializer(serializers.ModelSerializer):
token_list = serializers.ListField(child=serializers.CharField(), required=False)
class Meta:
model = enhance_models.Feature
fields = ('id', 'dictionary', 'index', 'text', 'document_frequency', 'token_list', )
read_only_fields = ('id', 'dictionary', 'index', 'text', 'document_frequency', )
class PaginatedMessageSerializer(pagination.PaginationSerializer):
class Meta:
object_serializer_class = MessageSerializer
class DatasetSerializer(serializers.ModelSerializer):
class Meta:
model = corpus_models.Dataset
fields = ('id', 'name', 'description', 'message_count', )
read_only_fields = ('id', 'name', 'description', 'message_count', )
class DictionarySerializer(serializers.ModelSerializer):
dataset = DatasetSerializer()
class Meta:
model = enhance_models.Dictionary
fields = ('id', 'name', 'time', 'feature_count', 'dataset', )
read_only_fields = ('id', 'name', 'time', 'feature_count', 'dataset', )
class CodeAssignmentSerializer(serializers.ModelSerializer):
class Meta:
model = coding_models.CodeAssignment
fields = ('id', 'source', 'message', 'code', 'is_example', 'is_ambiguous', 'is_saved', )
read_only_fields = ('id', 'source', )
class CodeDefinitionSerializer(serializers.Serializer):
code = serializers.CharField(required=False)
source = UserSerializer(required=False)
text = serializers.CharField()
examples = MessageSerializer(many=True, required=False)
class CodeMessageSerializer(serializers.Serializer):
code = serializers.CharField()
source = UserSerializer()
messages = MessageSerializer(many=True)
class DisagreementIndicatorSerializer(serializers.ModelSerializer):
user_assignment = CodeAssignmentSerializer(required=False)
partner_assignment = CodeAssignmentSerializer(required=False)
class Meta:
model = coding_models.DisagreementIndicator
fields = ('id', 'message', 'user_assignment', 'partner_assignment', 'type', )
read_only_fields = ('id', 'message', 'user_assignment', 'partner_assignment', )
class PairwiseSerializer(serializers.Serializer):
user_code = serializers.CharField()
partner_code = serializers.CharField()
count = serializers.IntegerField()
|
nanchenchen/emoticon-analysis
|
emoticonvis/apps/api/serializers.py
|
Python
|
mit
| 4,406
|
# Log into the site with your browser, obtain the "Cookie" header,
# and put it here
cookie = ''
|
jonmsawyer/site-tools
|
flgetpics/cookie.py
|
Python
|
mit
| 97
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestHuberLoss(unittest.TestCase):
def setUp(self):
self.shape = (4, 10)
self.x = (numpy.random.random(self.shape) - 0.5) * 20
self.x = self.x.astype(numpy.float32)
self.t = numpy.random.random(self.shape).astype(numpy.float32)
self.gy = numpy.random.random(self.shape[0]).astype(numpy.float32)
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.huber_loss(x, t, delta=1)
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = cuda.to_cpu(loss.data)
diff_data = cuda.to_cpu(x_data) - cuda.to_cpu(t_data)
expected_result = numpy.zeros(self.shape)
mask = numpy.abs(diff_data) < 1
expected_result[mask] = 0.5 * diff_data[mask]**2
expected_result[~mask] = numpy.abs(diff_data[~mask]) - 0.5
loss_expect = numpy.sum(expected_result, axis=1)
gradient_check.assert_allclose(loss_value, loss_expect)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
def check_backward(self, x_data, t_data, y_grad):
gradient_check.check_backward(
functions.HuberLoss(delta=1),
(x_data, t_data), y_grad, eps=1e-2, atol=1e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
|
benob/chainer
|
tests/chainer_tests/functions_tests/loss_tests/test_huber_loss.py
|
Python
|
mit
| 2,031
|
'''
compile_test.py - check pyximport functionality with pysam
==========================================================
test script for checking if compilation against
pysam and tabix works.
'''
# clean up previous compilation
import os
import unittest
import pysam
from TestUtils import make_data_files, BAM_DATADIR, TABIX_DATADIR
def setUpModule():
make_data_files(BAM_DATADIR)
make_data_files(TABIX_DATADIR)
try:
os.unlink('tests/_compile_test.c')
os.unlink('tests/_compile_test.pyxbldc')
except OSError:
pass
import pyximport
pyximport.install(build_in_temp=False)
import _compile_test
class BAMTest(unittest.TestCase):
input_filename = os.path.join(BAM_DATADIR, "ex1.bam")
def testCount(self):
nread = _compile_test.testCountBAM(
pysam.Samfile(self.input_filename))
self.assertEqual(nread, 3270)
class GTFTest(unittest.TestCase):
input_filename = os.path.join(TABIX_DATADIR, "example.gtf.gz")
def testCount(self):
nread = _compile_test.testCountGTF(
pysam.Tabixfile(self.input_filename))
self.assertEqual(nread, 237)
if __name__ == "__main__":
unittest.main()
|
pysam-developers/pysam
|
tests/compile_test.py
|
Python
|
mit
| 1,181
|
from pylab import *
import matplotlib.cm as cmx
import matplotlib.colors as colors
def MakeColourMap(N, colormap='jet'):
'''
To make colour map with N possible colours. Interpolated from map 'colormap'
'''
### Colour map for cells (if you want to plot multiple cells)
values = range(N)
jet = cm = plt.get_cmap(colormap)
cNorm = colors.Normalize(vmin=0, vmax=values[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
return scalarMap
|
h-mayorquin/camp_india_2016
|
tutorials/Spatial Coding/makecmap.py
|
Python
|
mit
| 452
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from django.http import HttpResponse
import service.api
def api_available(request):
return HttpResponse(service.api.get_proxy())
def hello(request):
return HttpResponse("Hello world!")
|
Piasy/proxy-searcher
|
service/service/views.py
|
Python
|
mit
| 233
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Generic rendering function that can handle all simulations, but makes poor renders. Nice for sketching.
import os, time, subprocess, re
def Say(text, verbosity=0, end='\n', suppressTime=False):
if verbosity<=VERBOSITY:
if suppressTime:
timeStr = ''
else:
timeStr = time.strftime('%H:%M:%S ')
if verbosity == 0:
print(timeStr + text, end=end)
else:
print('DEBUG: ' + text, end=end)
###############################################################################
# Global settings #
###################
dirFilter = '.*' # regular expression
#dirFilter = 'ecoli_'
#dirFilter = 'odetol' # regular expression
VERBOSITY = 0 # Note that only rendermonitor.py is printed to console, render.py shows up in logfile.txt
iterModDiv = 5
###############################################################################
resultsPath = os.getcwd()[:os.getcwd().index("/blender")] + "/results"
while True:
t0 = time.time()
dirList = [dirs for dirs in os.listdir(resultsPath) if os.path.isdir(os.path.join(resultsPath, dirs)) and os.path.isdir(os.path.join(resultsPath, dirs, 'output')) and re.search(dirFilter,dirs)]
dirList.sort()
for d in dirList:
Say(d)
#######################################################################
# Pre-render settings #
#######################
renderpySettingsDict = {'VERBOSITY':VERBOSITY,
'resolution_percentage':50,
'offset':'array([120,120,20])',
'model.L':'array([60e-6,60e-6,60e-6])',
'saveBlend':True,
'drawStick':False,
'renderDir':'render'
}
#renderpySettingsDict['suppressRender'] = True
if re.match('^aom', d):
renderpySettingsDict['model.L'] = 'array([20e-6,20e-6,20e-6])'
renderpySettingsDict['offset'] = 'array([10,10,10])'
renderpySettingsDict['configMaterial'] = 'ConfigAOM'
renderpySettingsDict['gridStepSize'] = 5
elif re.match('^as', d):
renderpySettingsDict['model.L'] = 'array([80e-6,80e-6,80e-6])'
renderpySettingsDict['offset'] = 'array([40,40,40])'
renderpySettingsDict['configMaterial'] = 'ConfigAS'
elif re.match('^ecoli', d):
renderpySettingsDict['model.L'] = 'array([80e-6,80e-6,80e-6])'
renderpySettingsDict['offset'] = 'array([40,40,0])'
renderpySettingsDict['configMaterial'] = 'ConfigEcoli' # Change colours of cells for consistency with paper/experiments
renderpySettingsDict['colourByGeneration'] = True
#######################################################################
dAbs = resultsPath + "/" + d + "/output"
Say(dAbs, 2)
fileList = [files for files in os.listdir(dAbs) if os.path.splitext(files)[-1]=='.mat']
fileList.sort(reverse=True)
for f in fileList:
if not int(re.match('g(\d{4})r(\d{4}).mat',f).group(2))%iterModDiv == 0:
# relaxation iteration (YYYY in filename gXXXXrYYYY.mat) % iterModulusDivider == 0
continue
fAbs = dAbs + "/" + f
# Check if file is already plotted
fName = os.path.splitext(fAbs.split("/")[-1])[0]
renderPath = (fAbs[:fAbs.index("/output/"+fName)] + "/" + renderpySettingsDict['renderDir']) if ("/output/"+f in fAbs) else ("/".join(fAbs.split("/")[:-1]))
if os.path.isfile(renderPath + "/" + fName + ".png"):
Say(" " + f + ' --> already rendered', 2)
else:
Say(" " + f, end='\r')
callStr = ["blender", "--background", "--python", "render.py", "--", fAbs] # Call string is with filename
[callStr.extend([key,str(val)]) for key,val in renderpySettingsDict.items()] # Append settingsDict
Say("\nCall string = " + " ".join(callStr), verbosity=2)
[stdout, _] = subprocess.Popen(callStr, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
stdout = stdout.decode()
if 'Error' in stdout or 'WARNING' in stdout:
with open('logfile.txt', 'w') as file:
file.write(time.strftime('%Y/%m/%d, %H:%M:%S') + " (" + fAbs + ")\n\n" + stdout)
if 'error' in stdout.lower() and 'warning' in stdout.lower():
suffix = " --> WARNING and ERROR"
elif 'error' in stdout.lower():
suffix = " --> ERROR"
else:
suffix = " --> "
for line in stdout.split('\n'):
if 'warning' in line.lower():
suffix += line + ' '
Say(" " + f + suffix)
else:
Say('', suppressTime=True) # Make newline
time.sleep(max(0, 10-(time.time()-t0))) # There must be at least some time between each loop
|
tomasstorck/diatomas
|
blender/rendermonitor.py
|
Python
|
mit
| 5,383
|
import unittest
from db.migrations import migrations_util
class TestMigrationUtil(unittest.TestCase):
"""Test the CLI API."""
@classmethod
def setUpClass(cls):
cls.db_path = '/some/random/path/file.db'
def setUp(self):
self.parser = migrations_util.make_argument_parser(self.db_path)
def test_cli_parser_default(self):
options = self.parser.parse_args(['upgrade'])
self.assertEqual(options.path, self.db_path)
self.assertEqual(options.action, 'upgrade')
def test_cli_parser_user(self):
other_db_path = '/some/other/path/file.db'
options = self.parser.parse_args([
'downgrade',
'--path',
other_db_path
])
self.assertEqual(options.path, other_db_path)
self.assertEqual(options.action, 'downgrade')
def test_cli_parser_bad_action(self):
self.assertRaises(
SystemExit,
self.parser.parse_args,
['retrograde']
)
if __name__ == '__main__':
unittest.main()
|
im0rtel/OpenBazaar
|
tests/test_migrations_util.py
|
Python
|
mit
| 1,060
|
import asyncio
import json
import uuid
import pytest
from photonpump import exceptions as exn
from photonpump import messages as msg
from photonpump import messages_pb2 as proto
from photonpump.conversations import CatchupSubscription
from ..fakes import TeeQueue
async def anext(it, count=1):
if count == 1:
return await asyncio.wait_for(it.anext(), 1)
result = []
while len(result) < count:
result.append(await asyncio.wait_for(it.anext(), 1))
return result
async def reply_to(convo, message, output):
command, payload = message
await convo.respond_to(msg.InboundMessage(uuid.uuid4(), command, payload), output)
def read_as(cls, message):
body = cls()
body.ParseFromString(message.payload)
return body
async def drop_subscription(
convo, output, reason=msg.SubscriptionDropReason.Unsubscribed
):
response = proto.SubscriptionDropped()
response.reason = reason
await convo.respond_to(
msg.InboundMessage(
uuid.uuid4(),
msg.TcpCommand.SubscriptionDropped,
response.SerializeToString(),
),
output,
)
async def confirm_subscription(convo, output_queue=None, event_number=1, commit_pos=1):
response = proto.SubscriptionConfirmation()
response.last_event_number = event_number
response.last_commit_position = commit_pos
await convo.respond_to(
msg.InboundMessage(
uuid.uuid4(),
msg.TcpCommand.SubscriptionConfirmation,
response.SerializeToString(),
),
output_queue,
)
return await convo.result
def event_appeared(
commit_position=1,
prepare_position=1,
event_number=10,
event_id=None,
type="some-event",
data=None,
stream="stream-123",
):
response = proto.StreamEventAppeared()
response.event.event.event_stream_id = stream
response.event.event.event_number = event_number
response.event.event.event_id = (event_id or uuid.uuid4()).bytes_le
response.event.event.event_type = type
response.event.event.data_content_type = msg.ContentType.Json
response.event.event.metadata_content_type = msg.ContentType.Binary
response.event.commit_position = commit_position
response.event.prepare_position = prepare_position
response.event.event.data = json.dumps(data).encode("UTF-8") if data else bytes()
return (msg.TcpCommand.StreamEventAppeared, response.SerializeToString())
class ReadStreamEventsResponseBuilder:
def __init__(self, stream=None):
self.result = msg.ReadStreamResult.Success
self.next_event_number = 10
self.last_event_number = 9
self.is_end_of_stream = False
self.last_commit_position = 8
self.stream = stream or "some-stream"
self.events = []
def at_end_of_stream(self):
self.is_end_of_stream = True
return self
def with_next_event_number(self, num):
self.next_event_number = num
return self
def with_last_position(self, event_number=9, commit_position=8):
self.last_event_number = event_number
self.last_commit_position = commit_position
return self
def with_event(
self,
event_number=10,
event_id=None,
type="some-event",
data=None,
link_event_number=None,
):
event = proto.ResolvedIndexedEvent()
event.event.event_stream_id = self.stream
event.event.event_number = event_number
event.event.event_id = (event_id or uuid.uuid4()).bytes_le
event.event.event_type = type
event.event.data_content_type = msg.ContentType.Json
event.event.metadata_content_type = msg.ContentType.Binary
event.event.data = json.dumps(data).encode("UTF-8") if data else bytes()
if link_event_number is not None:
event.link.event_number = link_event_number
event.link.event_stream_id = "some-stream-name"
event.link.event_id = uuid.uuid4().bytes_le
event.link.event_type = "$>"
event.link.data_content_type = msg.ContentType.Json
event.link.metadata_content_type = msg.ContentType.Binary
event.link.data = f"{event_number}@{self.stream}".encode("UTF-8")
self.events.append(event)
return self
def build(self):
response = proto.ReadStreamEventsCompleted()
response.result = self.result
response.next_event_number = self.next_event_number
response.last_event_number = self.last_event_number
response.is_end_of_stream = self.is_end_of_stream
response.last_commit_position = self.last_commit_position
response.events.extend(self.events)
return (
msg.TcpCommand.ReadStreamEventsForwardCompleted,
response.SerializeToString(),
)
EMPTY_STREAM_PAGE = (
ReadStreamEventsResponseBuilder(stream="stream-123")
.with_next_event_number(0)
.at_end_of_stream()
.build()
)
@pytest.mark.asyncio
async def test_start_read_phase():
"""
A "catchup" subscription starts by iterating the events in the stream until
it reaches the most recent event.
This is the "Read" phase.
"""
output = TeeQueue()
conversation_id = uuid.uuid4()
convo = CatchupSubscription(
"my-stream", start_from=0, conversation_id=conversation_id
)
await convo.start(output)
[request] = output.items
body = proto.ReadStreamEvents()
body.ParseFromString(request.payload)
assert request.command is msg.TcpCommand.ReadStreamEventsForward
assert body.event_stream_id == "my-stream"
assert body.from_event_number == 0
assert body.resolve_link_tos is True
assert body.require_master is False
assert body.max_count == 100
@pytest.mark.asyncio
async def test_end_of_stream():
"""
During the Read phase, we yield the events to the subscription so that the
user is unaware of the chicanery in the background.
When we reach the end of the stream, we should send a subscribe message to
start the volatile subscription.
"""
convo = CatchupSubscription("my-stream")
output = TeeQueue()
await convo.start(output)
event_1_id = uuid.uuid4()
event_2_id = uuid.uuid4()
response = (
ReadStreamEventsResponseBuilder(stream="stream-123")
.at_end_of_stream()
.with_event(event_id=event_1_id, event_number=32)
.with_event(event_id=event_2_id, event_number=33)
).build()
await reply_to(convo, response, output)
subscription = await convo.result
event_1 = await anext(subscription.events)
event_2 = await anext(subscription.events)
assert event_1.stream == "stream-123"
assert event_1.id == event_1_id
assert event_1.event_number == 32
assert event_2.stream == "stream-123"
assert event_2.id == event_2_id
assert event_2.event_number == 33
@pytest.mark.asyncio
async def test_paging():
"""
During the read phase, we expect to page through multiple batches of
events. In this scenario we have two batches, each of two events.
"""
convo = CatchupSubscription("my-stream")
output = TeeQueue()
await convo.start(output)
await output.get()
event_1_id = uuid.uuid4()
event_2_id = uuid.uuid4()
event_3_id = uuid.uuid4()
event_4_id = uuid.uuid4()
first_response = (
ReadStreamEventsResponseBuilder()
.with_event(event_id=event_1_id, event_number=32)
.with_event(event_id=event_2_id, event_number=33)
.with_next_event_number(34)
).build()
second_response = (
ReadStreamEventsResponseBuilder()
.with_event(event_id=event_3_id, event_number=34)
.with_event(event_id=event_4_id, event_number=35)
).build()
await reply_to(convo, first_response, output)
subscription = await convo.result
event_1 = await anext(subscription.events)
event_2 = await anext(subscription.events)
assert event_1.id == event_1_id
assert event_2.id == event_2_id
reply = await output.get()
body = proto.ReadStreamEvents()
body.ParseFromString(reply.payload)
assert body.from_event_number == 34
await reply_to(convo, second_response, output)
event_3 = await anext(subscription.events)
event_4 = await anext(subscription.events)
assert event_3.id == event_3_id
assert event_4.id == event_4_id
@pytest.mark.asyncio
async def test_subscribes_at_end_of_stream():
"""
When we have read all the events in the stream, we should send a
request to subscribe for new events.
"""
convo = CatchupSubscription("my-stream")
output = TeeQueue()
await convo.start(output)
await output.get()
await reply_to(
convo, ReadStreamEventsResponseBuilder().at_end_of_stream().build(), output
)
reply = await output.get()
payload = proto.SubscribeToStream()
payload.ParseFromString(reply.payload)
assert reply.command == msg.TcpCommand.SubscribeToStream
assert payload.event_stream_id == "my-stream"
assert payload.resolve_link_tos is True
@pytest.mark.asyncio
async def test_should_perform_a_catchup_when_subscription_is_confirmed():
"""
When we have read all the events in the stream, we should send a
request to subscribe for new events.
We should start reading catchup events from the `next_event_number` returned
by the historical event read.
"""
convo = CatchupSubscription("my-stream")
output = TeeQueue()
await convo.start(output)
await reply_to(
convo,
ReadStreamEventsResponseBuilder()
.with_next_event_number(17)
.at_end_of_stream()
.build(),
output,
)
await confirm_subscription(convo, output, event_number=42, commit_pos=40)
[read_historial, subscribe, catch_up] = await output.next_event(3)
assert read_historial.command == msg.TcpCommand.ReadStreamEventsForward
assert subscribe.command == msg.TcpCommand.SubscribeToStream
assert catch_up.command == msg.TcpCommand.ReadStreamEventsForward
payload = proto.ReadStreamEvents()
payload.ParseFromString(catch_up.payload)
assert payload.event_stream_id == "my-stream"
assert payload.from_event_number == 17
@pytest.mark.asyncio
async def test_should_return_catchup_events_before_subscribed_events():
"""
It's possible that the following sequence of events occurs:
* The client reads the last batch of events from a stream containing
50 events.
* The client sends SubscribeToStream
* Event 51 is written to the stream
* The server creates a subscription starting at event 51 and
responds with SubscriptionConfirmed
* Event 52 is written to the stream
* The client receives event 52.
To solve this problem, the client needs to perform an additional read
to fetch any missing events created between the last batch and the
subscription confirmation.
--------------
In this scenario, we read a single event (1) from the end of the stream
and expect to create a subscription.
We receive event 4 immediately on the subscription. We expect that the
client requests missing events.
We receive two pages, of one event each: 3, and 4, and then drop the subscription.
Lastly, we expect that the events are yielded in the correct order
despite being received out of order and that we have no duplicates.
"""
convo = CatchupSubscription("my-stream")
output = TeeQueue()
await convo.start(output)
await output.get()
last_page = (
ReadStreamEventsResponseBuilder()
.at_end_of_stream()
.with_event(event_number=1, type="a")
.build()
)
subscribed_event = event_appeared(event_number=4, type="d")
first_catchup = (
ReadStreamEventsResponseBuilder().with_event(event_number=2, type="b").build()
)
second_catchup = (
ReadStreamEventsResponseBuilder()
.with_event(event_number=3, type="c")
.with_event(event_number=4, type="d")
).build()
await reply_to(convo, last_page, output)
assert (await output.get()).command == msg.TcpCommand.SubscribeToStream
await confirm_subscription(convo, output, event_number=3)
await reply_to(convo, subscribed_event, output)
assert (await output.get()).command == msg.TcpCommand.ReadStreamEventsForward
await reply_to(convo, first_catchup, output)
assert (await output.get()).command == msg.TcpCommand.ReadStreamEventsForward
await reply_to(convo, second_catchup, output)
await drop_subscription(convo, output)
events = []
subscription = await convo.result
async for e in subscription.events:
events.append(e)
assert len(events) == 4
[a, b, c, d] = events
assert a.event_number == 1
assert b.event_number == 2
assert c.event_number == 3
assert d.event_number == 4
@pytest.mark.asyncio
async def test_subscription_dropped_mid_stream():
convo = CatchupSubscription("my-stream")
output = TeeQueue()
empty_page = (
ReadStreamEventsResponseBuilder(stream="stream-123").at_end_of_stream().build()
)
await reply_to(convo, empty_page, output)
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
await reply_to(convo, empty_page, output)
subscription = convo.result.result()
await reply_to(convo, event_appeared(), output)
await drop_subscription(convo, output)
events = [e async for e in subscription.events]
assert len(events) == 1
@pytest.mark.asyncio
async def test_subscription_failure_mid_stream():
output = TeeQueue()
convo = CatchupSubscription("my-stream")
event_id = uuid.uuid4()
await reply_to(convo, EMPTY_STREAM_PAGE, output)
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
subscription = convo.result.result()
await reply_to(convo, event_appeared(event_id=event_id), output)
await drop_subscription(
convo, output, msg.SubscriptionDropReason.SubscriberMaxCountReached
)
with pytest.raises(exn.SubscriptionFailed):
event = await anext(subscription.events)
assert event.id == event_id
await anext(subscription.events)
@pytest.mark.asyncio
async def test_unsubscription():
correlation_id = uuid.uuid4()
output = TeeQueue()
convo = CatchupSubscription("my-stream", conversation_id=correlation_id)
await convo.start(output)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
sub = convo.result.result()
await sub.unsubscribe()
[read_historical, subscribe, catch_up, unsubscribe] = output.items
assert unsubscribe.command == msg.TcpCommand.UnsubscribeFromStream
assert unsubscribe.conversation_id == correlation_id
@pytest.mark.asyncio
async def test_subscribe_with_context_manager():
conversation_id = uuid.uuid4()
output = TeeQueue()
convo = CatchupSubscription("my-stream", conversation_id=conversation_id)
await convo.start(output)
# Create a subscription with three events in it
await reply_to(convo, EMPTY_STREAM_PAGE, output)
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
for i in range(0, 3):
await reply_to(
convo, event_appeared(event_id=uuid.uuid4(), event_number=i), output
)
async with (await convo.result) as subscription:
events_seen = 0
async for _ in subscription.events:
events_seen += 1
if events_seen == 3:
break
# Having exited the context manager it should send
# an unsubscribe message
[read_historical, subscribe, catch_up, unsubscribe] = output.items
assert unsubscribe.command == msg.TcpCommand.UnsubscribeFromStream
assert unsubscribe.conversation_id == conversation_id
@pytest.mark.asyncio
async def test_restart_from_historical():
"""
If we ask the conversation to start again while we're reading historical events
we should re-send the most recent page request.
In this scenario, we start reading the stream at event 10, we receive a
page with 2 events, we request the next page starting at 12.
When we restart the conversation, we should again request the page starting at 12.
"""
conversation_id = uuid.uuid4()
output = TeeQueue()
convo = CatchupSubscription(
"my-stream", start_from=10, conversation_id=conversation_id
)
await convo.start(output)
await reply_to(
convo,
(
ReadStreamEventsResponseBuilder(stream="stream-123")
.with_event(event_number=10)
.with_event(event_number=11)
.with_next_event_number(12)
.build()
),
output,
)
await convo.start(output)
[first_page, second_page, second_page_again] = [
read_as(proto.ReadStreamEvents, m) for m in output.items
]
assert second_page.from_event_number == second_page_again.from_event_number
@pytest.mark.asyncio
async def test_restart_from_catchup():
"""
If the connection drops during the catchup phase, we need to unsubscribe
from the stream and then go back to reading historical events starting from
the last page.
=> Request historical events
<= Receive 1 event, next_event = 1
=> Subscribe
<= Confirmed
=> Catch up from 1
** Restart **
=> Unsubscribe
<= Confirmed
=> Read historical from 1
<= Empty page
=> Subscribe
"""
conversation_id = uuid.uuid4()
output = TeeQueue()
convo = CatchupSubscription("my-stream", conversation_id=conversation_id)
await convo.start(output)
await output.get()
page_one = (
ReadStreamEventsResponseBuilder()
.with_event(event_number=1)
.with_next_event_number(1)
.at_end_of_stream()
.build()
)
await reply_to(convo, page_one, output)
await output.get()
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
first_catch_up = read_as(proto.ReadStreamEvents, await output.get())
await reply_to(convo, page_one, output)
# Restart
await convo.start(output)
unsubscribe = await output.get()
assert first_catch_up.from_event_number == 1
assert unsubscribe.command == msg.TcpCommand.UnsubscribeFromStream
await drop_subscription(convo, output)
second_catchup = read_as(proto.ReadStreamEvents, await output.get())
assert second_catchup.from_event_number == 1
@pytest.mark.asyncio
async def test_historical_duplicates():
"""
It's possible that we receive the reply to a ReadStreamEvents request after we've
resent the request. This will result in our receiving a duplicate page.
In this instance, we should not raise duplicate events.
=> Request historical
RESTART
=> Request historical
<= 2 events
<= 3 events
Should only see the 3 unique events
"""
two_events = (
ReadStreamEventsResponseBuilder()
.with_event(event_number=1)
.with_event(event_number=2)
.with_next_event_number(2)
.at_end_of_stream()
.build()
)
three_events = (
ReadStreamEventsResponseBuilder()
.with_event(event_number=1)
.with_event(event_number=2)
.with_event(event_number=3)
.with_next_event_number(3)
.at_end_of_stream()
.build()
)
output = TeeQueue()
convo = CatchupSubscription("my-stream")
await convo.start(output)
await convo.start(output)
await reply_to(convo, two_events, output)
await reply_to(convo, three_events, output)
[event_1, event_2, event_3] = await anext(convo.subscription.events, 3)
assert event_1.event_number == 1
assert event_2.event_number == 2
assert event_3.event_number == 3
@pytest.mark.asyncio
async def test_subscription_duplicates():
"""
If we receive subscription events while catching up, we buffer them internally.
If we restart the conversation at that point we need to make sure we clear our buffer
and do not raise duplicate events.
=> Request historical
<= Empty
=> Subscribe to stream
<= Confirmed
=> Request catchup
<= Subscribed event 2 appeared
<= Event 1, not end of stream
RESTART
=> Drop subscription
<= Dropped
=> Request historical from_event = 1
<= Receive event 2 at end of stream
=> Subscribe
<= Confirmed
=> Catchup
<= Subscribed event 3 appeared
<= Empty
Should yield [event 1, event 2, event 3]
"""
event_1_not_end_of_stream = (
ReadStreamEventsResponseBuilder()
.with_event(event_number=1)
.with_next_event_number(2)
.build()
)
event_2_at_end_of_stream = (
ReadStreamEventsResponseBuilder()
.with_event(event_number=2)
.with_next_event_number(2)
.at_end_of_stream()
.build()
)
output = TeeQueue()
convo = CatchupSubscription("my-stream")
await convo.start(output)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
await reply_to(convo, event_appeared(event_number=2), output)
await reply_to(convo, event_1_not_end_of_stream, output)
# RESTART
await convo.start(output)
output.items.clear()
await drop_subscription(convo, output)
second_read_historical = read_as(proto.ReadStreamEvents, output.items[0])
await reply_to(convo, event_2_at_end_of_stream, output)
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
await reply_to(convo, event_appeared(event_number=3), output)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
[event_1, event_2, event_3] = await anext(convo.subscription.events, 3)
assert event_1.event_number == 1
assert event_2.event_number == 2
assert event_3.event_number == 3
assert second_read_historical.from_event_number == 2
@pytest.mark.asyncio
async def test_live_restart():
"""
If we reset the conversation while we are live, we should first unsubscribe
then start a historical read from the last read event.
=> Read historial
<= empty
=> subscribe
<= confirmed
=> catchup
<= empty
<= event 1 appeared
<= event 2 appeared
RESTART
=> unsubscribe
<= dropped
=> Read historical from 2
"""
output = TeeQueue()
convo = CatchupSubscription("my-stream")
await convo.start(output)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
await confirm_subscription(convo, output, event_number=10, commit_pos=10)
await reply_to(convo, EMPTY_STREAM_PAGE, output)
await reply_to(convo, event_appeared(event_number=1), output)
await reply_to(convo, event_appeared(event_number=2), output)
output.items.clear()
await convo.start(output)
await drop_subscription(convo, output)
[unsubscribe, read_historical] = output.items
read_historical = read_as(proto.ReadStreamEvents, read_historical)
assert unsubscribe.command == msg.TcpCommand.UnsubscribeFromStream
assert read_historical.from_event_number == 2
@pytest.mark.asyncio
async def test_paging_projection():
""" """
convo = CatchupSubscription("my-stream")
output = TeeQueue()
await convo.start(output)
await output.get()
event_1_id = uuid.uuid4()
event_2_id = uuid.uuid4()
event_3_id = uuid.uuid4()
event_4_id = uuid.uuid4()
first_response = (
ReadStreamEventsResponseBuilder()
.with_event(event_id=event_1_id, event_number=0, link_event_number=32)
.with_event(event_id=event_2_id, event_number=0, link_event_number=33)
.with_next_event_number(34)
).build()
second_response = (
ReadStreamEventsResponseBuilder()
.with_event(event_id=event_3_id, event_number=0, link_event_number=34)
.with_event(event_id=event_4_id, event_number=0, link_event_number=35)
).build()
await reply_to(convo, first_response, output)
subscription = await convo.result
event_1 = await anext(subscription.events)
event_2 = await anext(subscription.events)
assert event_1.id == event_1_id
assert event_2.id == event_2_id
reply = await output.get()
body = proto.ReadStreamEvents()
body.ParseFromString(reply.payload)
assert body.from_event_number == 34
await reply_to(convo, second_response, output)
event_3 = await anext(subscription.events)
event_4 = await anext(subscription.events)
assert event_3.id == event_3_id
assert event_4.id == event_4_id
|
madedotcom/photon-pump
|
test/conversations/test_catchup.py
|
Python
|
mit
| 25,039
|
import numpy as np
from elephas.mllib.adapter import *
from pyspark.mllib.linalg import Matrices, Vectors
def test_to_matrix():
x = np.ones((4, 2))
mat = to_matrix(x)
assert mat.numRows == 4
assert mat.numCols == 2
def test_from_matrix():
mat = Matrices.dense(1, 2, [13, 37])
x = from_matrix(mat)
assert x.shape == (1, 2)
def test_from_vector():
x = np.ones((3,))
vector = to_vector(x)
assert len(vector) == 3
def test_to_vector():
vector = Vectors.dense([4, 2])
x = from_vector(vector)
assert x.shape == (2,)
|
maxpumperla/elephas
|
tests/mllib/test_adapter.py
|
Python
|
mit
| 570
|
# coding=utf-8
import zipfile
import re
import os
import hashlib
import json
import logging
from django.shortcuts import render
from django.db.models import Q, Count
from django.core.paginator import Paginator
from rest_framework.views import APIView
from django.conf import settings
from account.models import SUPER_ADMIN
from account.decorators import super_admin_required
from utils.shortcuts import (serializer_invalid_response, error_response,
success_response, paginate, rand_str, error_page)
from .serizalizers import (CreateProblemSerializer, EditProblemSerializer, ProblemSerializer,
ProblemTagSerializer, CreateProblemTagSerializer)
from .models import Problem, ProblemTag
from .decorators import check_user_problem_permission
logger = logging.getLogger("app_info")
def problem_page(request, problem_id):
"""
前台题目详情页
"""
try:
problem = Problem.objects.get(id=problem_id, visible=True)
except Problem.DoesNotExist:
return error_page(request, u"题目不存在")
return render(request, "oj/problem/problem.html", {"problem": problem, "samples": json.loads(problem.samples)})
class ProblemTagAdminAPIView(APIView):
"""
获取所有标签的列表
"""
def get(self, request):
return success_response(ProblemTagSerializer(ProblemTag.objects.all(), many=True).data)
class ProblemAdminAPIView(APIView):
@super_admin_required
def post(self, request):
"""
题目发布json api接口
---
request_serializer: CreateProblemSerializer
response_serializer: ProblemSerializer
"""
serializer = CreateProblemSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
problem = Problem.objects.create(title=data["title"],
description=data["description"],
input_description=data["input_description"],
output_description=data["output_description"],
test_case_id=data["test_case_id"],
source=data["source"],
samples=json.dumps(data["samples"]),
time_limit=data["time_limit"],
memory_limit=data["memory_limit"],
difficulty=data["difficulty"],
created_by=request.user,
hint=data["hint"],
visible=data["visible"])
for tag in data["tags"]:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
return success_response(ProblemSerializer(problem).data)
else:
return serializer_invalid_response(serializer)
@check_user_problem_permission
def put(self, request):
"""
题目编辑json api接口
---
request_serializer: EditProblemSerializer
response_serializer: ProblemSerializer
"""
serializer = EditProblemSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
problem = Problem.objects.get(id=data["id"])
problem.title = data["title"]
problem.description = data["description"]
problem.input_description = data["input_description"]
problem.output_description = data["output_description"]
problem.test_case_id = data["test_case_id"]
problem.source = data["source"]
problem.time_limit = data["time_limit"]
problem.memory_limit = data["memory_limit"]
problem.difficulty = data["difficulty"]
problem.samples = json.dumps(data["samples"])
problem.hint = data["hint"]
problem.visible = data["visible"]
# 删除原有的标签的对应关系
problem.tags.remove(*problem.tags.all())
# 重新添加所有的标签
for tag in data["tags"]:
try:
tag = ProblemTag.objects.get(name=tag)
except ProblemTag.DoesNotExist:
tag = ProblemTag.objects.create(name=tag)
problem.tags.add(tag)
problem.save()
return success_response(ProblemSerializer(problem).data)
else:
return serializer_invalid_response(serializer)
def get(self, request):
"""
题目分页json api接口
---
response_serializer: ProblemSerializer
"""
problem_id = request.GET.get("problem_id", None)
if problem_id:
try:
# 普通管理员只能获取自己创建的题目
# 超级管理员可以获取全部的题目
problem = Problem.objects.get(id=problem_id)
if request.user.admin_type != SUPER_ADMIN:
problem = problem.get(created_by=request.user)
return success_response(ProblemSerializer(problem).data)
except Problem.DoesNotExist:
return error_response(u"题目不存在")
# 获取问题列表
problems = Problem.objects.all().order_by("-create_time")
if request.user.admin_type != SUPER_ADMIN:
problems = problems.filter(created_by=request.user)
visible = request.GET.get("visible", None)
if visible:
problems = problems.filter(visible=(visible == "true"))
keyword = request.GET.get("keyword", None)
if keyword:
problems = problems.filter(Q(title__contains=keyword) |
Q(description__contains=keyword))
return paginate(request, problems, ProblemSerializer)
class TestCaseUploadAPIView(APIView):
"""
上传题目的测试用例
"""
def _is_legal_test_case_file_name(self, file_name):
# 正整数开头的 .in 或者.out 结尾的
regex = r"^[1-9]\d*\.(in|out)$"
return re.compile(regex).match(file_name) is not None
def post(self, request):
if "file" not in request.FILES:
return error_response(u"文件上传失败")
f = request.FILES["file"]
tmp_zip = "/tmp/" + rand_str() + ".zip"
try:
with open(tmp_zip, "wb") as test_case_zip:
for chunk in f:
test_case_zip.write(chunk)
except IOError as e:
logger.error(e)
return error_response(u"上传失败")
test_case_file = zipfile.ZipFile(tmp_zip, 'r')
name_list = test_case_file.namelist()
l = []
# 如果文件是直接打包的,那么name_list 就是["1.in", "1.out"]这样的
# 如果文件还有一层文件夹test_case,那么name_list就是["test_case/", "test_case/1.in", "test_case/1.out"]
# 现在暂时只支持第一种,先判断一下是什么格式的
# 第一种格式的
if "1.in" in name_list and "1.out" in name_list:
for file_name in name_list:
if self._is_legal_test_case_file_name(file_name):
name = file_name.split(".")
# 有了.in 判断对应的.out 在不在
if name[1] == "in":
if (name[0] + ".out") in name_list:
l.append(file_name)
else:
return error_response(u"测试用例文件不完整,缺少" + name[0] + ".out")
else:
# 有了.out 判断对应的 .in 在不在
if (name[0] + ".in") in name_list:
l.append(file_name)
else:
return error_response(u"测试用例文件不完整,缺少" + name[0] + ".in")
problem_test_dir = rand_str()
test_case_dir = settings.TEST_CASE_DIR + problem_test_dir + "/"
# 得到了合法的测试用例文件列表 然后去解压缩
os.mkdir(test_case_dir)
for name in l:
f = open(test_case_dir + name, "wb")
try:
f.write(test_case_file.read(name).replace("\r\n", "\n"))
except MemoryError:
return error_response(u"单个测试数据体积过大!")
finally:
f.close()
l.sort()
file_info = {"test_case_number": len(l) / 2, "test_cases": {}}
# 计算输出文件的md5
for i in range(len(l) / 2):
md5 = hashlib.md5()
striped_md5 = hashlib.md5()
f = open(test_case_dir + str(i + 1) + ".out", "r")
# 完整文件的md5
while True:
data = f.read(2 ** 8)
if not data:
break
md5.update(data)
# 删除标准输出最后的空格和换行
# 这时只能一次全部读入了,分块读的话,没办法确定文件结尾
f.seek(0)
striped_md5.update(f.read().rstrip())
file_info["test_cases"][str(i + 1)] = {"input_name": str(i + 1) + ".in",
"output_name": str(i + 1) + ".out",
"output_md5": md5.hexdigest(),
"striped_output_md5": striped_md5.hexdigest(),
"output_size": os.path.getsize(test_case_dir + str(i + 1) + ".out")}
# 写入配置文件
open(test_case_dir + "info", "w").write(json.dumps(file_info))
return success_response({"test_case_id": problem_test_dir,
"file_list": {"input": l[0::2],
"output": l[1::2]}})
else:
return error_response(u"测试用例压缩文件格式错误,请保证测试用例文件在根目录下直接压缩")
def problem_list_page(request, page=1):
"""
前台的问题列表
"""
# 正常情况
problems = Problem.objects.filter(visible=True)
# 搜索的情况
keyword = request.GET.get("keyword", None)
if keyword:
problems = problems.filter(Q(title__contains=keyword) | Q(description__contains=keyword))
difficulty_order = request.GET.get("order_by", None)
if difficulty_order:
if difficulty_order[0] == "-":
problems = problems.order_by("-difficulty")
difficulty_order = "difficulty"
else:
problems = problems.order_by("difficulty")
difficulty_order = "-difficulty"
else:
difficulty_order = "difficulty"
# 按照标签筛选
tag_text = request.GET.get("tag", None)
if tag_text:
try:
tag = ProblemTag.objects.get(name=tag_text)
except ProblemTag.DoesNotExist:
return error_page(request, u"标签不存在")
problems = tag.problem_set.all().filter(visible=True)
paginator = Paginator(problems, 20)
try:
current_page = paginator.page(int(page))
except Exception:
return error_page(request, u"不存在的页码")
previous_page = next_page = None
try:
previous_page = current_page.previous_page_number()
except Exception:
pass
try:
next_page = current_page.next_page_number()
except Exception:
pass
if request.user.is_authenticated():
problems_status = json.loads(request.user.problems_status)
else:
problems_status = {}
# 右侧标签列表 按照关联的题目的数量排序 排除题目数量为0的
tags = ProblemTag.objects.annotate(problem_number=Count("problem")).filter(problem_number__gt=0).order_by("-problem_number")
return render(request, "oj/problem/problem_list.html",
{"problems": current_page, "page": int(page),
"previous_page": previous_page, "next_page": next_page,
"keyword": keyword, "tag": tag_text,"problems_status": problems_status,
"tags": tags, "difficulty_order": difficulty_order})
|
hxsf/OnlineJudge
|
problem/views.py
|
Python
|
mit
| 12,819
|
from scrapy.commands.crawl import Command
from scrapy.exceptions import UsageError
class CustomCrawlCommand(Command):
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
# added new code
spider_settings_path = self.settings.getdict('SPIDER_SETTINGS', {}).get(spname, None)
if spider_settings_path is not None:
self.settings.setmodule(spider_settings_path, priority='cmdline')
# end
crawler = self.crawler_process.create_crawler()
spider = crawler.spiders.create(spname, **opts.spargs)
crawler.crawl(spider)
self.crawler_process.start()
|
lnxpgn/scrapy_multiple_spiders
|
commands/crawl.py
|
Python
|
mit
| 809
|
# pylint:disable=missing-docstring,invalid-name,too-few-public-methods,old-style-class
class SomeClass: # [blank-line-after-class-required]
def __init__(self):
pass
|
Shopify/shopify_python
|
tests/functional/blank_line_after_class_required.py
|
Python
|
mit
| 178
|
"""Print all records in the pickle for the specified test"""
import sys
import argparse
from autocms.core import (load_configuration, load_records)
def main():
"""Print all records corresponding to test given as an argument"""
parser = argparse.ArgumentParser(description='Submit one or more jobs.')
parser.add_argument('testname', help='test directory')
parser.add_argument('-c', '--configfile', type=str,
default='autocms.cfg',
help='AutoCMS configuration file name')
args = parser.parse_args()
config = load_configuration(args.configfile)
records = load_records(args.testname,config)
for job in records:
print str(job)+'\n'
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
|
appeltel/AutoCMS
|
print_records.py
|
Python
|
mit
| 801
|
import numpy as np
import pandas as pd
"""
Specifications (So far, only implemented for the single index part below):
feature_df ... Data Frame of intervals along the genome,
equivalent of a bed file, but 1-indexed
index:
(chrom, start)
columns
required:
end
optional:
name
...
"""
def check_rod(rod):
"""
Check if input follows the ROD specifications.
Throw assertion error otherwise.
Specifications:
rod ... Series or DataFrame of reference ordered data
index:
(chrom, pos)
"""
assert rod.index.names == ('chrom','pos'), "Index names must be ('chrom','pos')."
assert rod.ix and rod.iloc and rod.loc, ("ROD lacks pandas indexing functionality. "
"Is it DataFrame or Series?")
def check_feature_df(feature_df):
"""
Check if input follows the ROD specifications.
Throw assertion error otherwise.
Specifications:
index:
(chrom, start)
columns
required:
end
optional:
feature
"""
assert feature_df.index.names == ('chrom','start'), "Index names must be ('chrom','start')."
assert feature_df.ix and feature_df.iloc and feature_df.loc, ("feature_df lacks pandas indexing "
"functionality. Is it DataFrame or Series?")
assert 'end' in feature_df.columns, "feature_df lacks required column 'end'"
def index_rolling(s,window,func,overlap=0,min_n_values=0,*args,**kwargs):
"""
Apply function in rolling windows, where the window
size is defined with respect to the index values.
This means that different windows can comprise different
numbers of elements.
s ... pandas Series
window ... window size in units of the index values
func ... function to apply to the series values within each
window
overlap ... oberlap size of windows
min_n_values ... minimum number of values (e.g. SNPs) per window
None will be returned for windows that have less values.
args, kwarg ... additional arguments for func
Example: index_rolling(pd.Series(range(20),index=np.random.rand(20))),0.1,max)
"""
assert isinstance(s, pd.Series)
#note that basis must be sorted in order for this to work properly
windows_min = s.index.min()
windows_max = s.index.max()
window_starts = np.arange(windows_min, windows_max-window, window-overlap)
window_starts = pd.Series(window_starts, index = window_starts+window/2)
def applyToWindow(val):
# using slice_indexer rather that what.loc [val:val+window] allows
# window limits that are not specifically in the index
try:
indexer = s.index.slice_indexer(val,val+window,1)
except IndexError:
print val, val+window
print s
raise
chunk = s.iloc[indexer]
try:
if len(chunk) < min_n_values:
#print indexer, "too few snps"
return None
except TypeError:
return None
try:
return func(chunk,*args,**kwargs)
except ValueError, e:
if "empty sequence" in str(e):
#print indexer, chunk
return None
else:
raise
rolled = window_starts.apply(applyToWindow)
return rolled
##How should the output of this look like? What index?
def data_per_feature(rod,feature_df, feature_name='feature', max_dist=0):
"""
Get the entires in rod which lie within a feature
(e.g. gene) in feature_df.
Input:
rod (reference ordered data)... pandas series or data frame with multiindex (chrom, pos)
such as SNP genotypes
feature_df (gene annotation data frame)... index must be (chrom,feature_name), must have columns 'start', 'end'
max_dist not implemented yet, we need to take care of overlapping genes,
currently we only take the last
"""
rod = pd.DataFrame(rod)
chrom_features = []
chrpos_names = rod.index.names
for chrom in rod.index.droplevel(1).unique():
try:
feature_chrom = feature_df.ix[chrom]
except KeyError:
continue
rod_chrom = rod.ix[chrom]
if not feature_chrom.index.is_monotonic:
feature_chrom = feature_chrom.sort_index()
if not rod_chrom.index.is_monotonic:
rod_chrom = rod_chrom.sort_index()
pos_rel_to_start = feature_chrom.index.searchsorted(rod_chrom.index)
pos_rel_to_end = np.searchsorted(feature_chrom["end"].values,rod_chrom.index.values)
in_feature = (pos_rel_to_start - pos_rel_to_end) == 1
feature_id = feature_chrom.iloc[pos_rel_to_end[in_feature]][feature_name].values
snp_df = rod_chrom[in_feature].copy()
snp_df['chrom'] = chrom
snp_df[feature_name] = feature_id
chrom_features.append(snp_df)
dpf = pd.concat(chrom_features)
dpf.set_index(['chrom'],append=True,inplace=True)
dpf = dpf.reorder_levels(['chrom','pos'])
return dpf
def get_features_per_data(peak_s, feature_df, feature_name='feature', max_dist=0):
"""
take the input data series and gets a similar series
with one entry per pair data-point gene
(i.e., there can be 0,1 or more entries per data point)
"""
all_features = []
if not feature_df.index.is_monotonic:
feature_df = feature_df.sort_index()
tot_hit_df = pd.DataFrame()
for chrom in peak_s.index.droplevel(1).unique():
loc_feature_df = feature_df.ix[chrom]
#loc_feature_df = loc_feature_df.append(pd.DataFrame(np.nan,index=[np.inf],columns=loc_feature_df.columns))
#print loc_feature_df.index-max_dist, peak_s.ix[chrom].index.values
#try:
pos_rel_to_start = np.searchsorted(loc_feature_df.index.values-max_dist,peak_s.ix[chrom].index.values)
#except:
# print chrom, peak_s.ix[chrom]
pos_rel_to_end = np.searchsorted(loc_feature_df["end"].values+max_dist,peak_s.ix[chrom].index.values)
features_per_datapoint = (pos_rel_to_start - pos_rel_to_end)
#why is this so slow print features_per_datapoint.shape
data_idx = [i for i in range(len(features_per_datapoint)) for j in range(features_per_datapoint[i])]
features = loc_feature_df[feature_name].iloc[np.hstack([range(a,b) for a,b in zip(pos_rel_to_end,pos_rel_to_start)])].values
data_df = pd.DataFrame(peak_s.ix[chrom].iloc[data_idx])
data_df[feature_name] = features
data_df['chrom'] = chrom
all_features.append(data_df)
fpd=pd.concat(all_features)
#fpd.set_index(['chrom'],append=True,inplace=True)
#fpd = dpf.reorder_levels(['chrom','pos'])
g = fpd.reset_index().groupby(['chrom','pos'])
def get_series_of_features(gdf):
features = gdf[feature_name].unique()
r = pd.Series({i:s for i,s in enumerate(features)})
df = pd.DataFrame({feature_name:r,peak_s.name:gdf[peak_s.name].values[0]})
return df
d = g.apply(get_series_of_features)
d.index.names = ['chrom','pos','number']
return d
def features_per_data_to_data_per_features(fpd, feature_name='features'):
return fpd.reset_index().set_index(feature_name).sort_index()
def get_features(peak_s, feature_df, feature_name='feature', max_dist=0):
"""
take the input series and gets.
names of features nearby
Input:
peak_s ... pandas series with (chrom, pos) index and value of
the statistic ('peak height'). Series should be named.
feature_df ... data frame with feature info.
"""
all_features = []
if not feature_df.index.is_monotonic:
feature_df = feature_df.sort_index()
tot_hit_df = pd.DataFrame()
for chrom in peak_s.index.droplevel(1).unique():
loc_feature_df = feature_df.ix[chrom]
#loc_feature_df = loc_feature_df.append(pd.DataFrame(np.nan,index=[np.inf],columns=loc_feature_df.columns))
#print loc_feature_df.index-max_dist, peak_s.ix[chrom].index.values
#try:
pos_rel_to_start = np.searchsorted(loc_feature_df.index.values-max_dist,peak_s.ix[chrom].index.values)
#except:
# print chrom, peak_s.ix[chrom]
pos_rel_to_end = np.searchsorted(loc_feature_df["end"].values+max_dist,peak_s.ix[chrom].index.values)
features = list(set(loc_feature_df[feature_name].iloc[np.hstack([range(a,b) for a,b in zip(pos_rel_to_end,pos_rel_to_start)])]))
all_features += features
return all_features
def apply_to_feature(feature_df,groupby_func_name=None,function=None):
"""
Apply a function to the entries for each feature.
feature_df ... dataframe with index (chrom, feature_name, pos)
(Such as the output of data_per_feature())
groupby_func_name ... name of the function of the groupby object
to apply to the data
This is faster than applying a function object.
function ... alternatively: function object to apply
"""
groups = feature_df.groupby(lambda idx: idx[1])
if groupby_func_name is not None:
return getattr(groups,groupby_func_name)()
elif function is not None:
return groups.apply(function)
else:
raise ValueError("Either groupby_func_name or function have to be given.")
#--------------WORK WITH SINGLE NUMERIC INDEX------------------------
def rod_to_1d(rod, chrom_len_s, drop=True):
"""
Converts a (chrom, pos) multiindex to a
single numeric index that runs through chromosomes.
Note that the (chrom, pos) index is sorted lexographically,
i.e., if chrom names are strings, the entries in the resulting
index are Chr1, Chr10, Chr2, Chr20, Chr3,...,ChrX, ChrY.
Example: (Chr2, 1) is converted to len(Chr1)+len(Chr10)+1.
The inverse operation is given by rod_to_chrompos.
Input:
rod ... series or dataframe with reference ordered data
with multiindex (chrom,pos)
chrom_len_s ... series with chromosome names as keys
and chromosome length as values
drop ... If False, keep chrom, pos as columns
"""
if not chrom_len_s.index.is_monotonic:
chrom_len_s = chrom_len_s.sort_index()
rod = pd.DataFrame(rod).copy()
if not rod.index.is_monotonic:
rod.sortlevel(inplace=True)
columns = columns = [c for c in rod.columns if c not in ['index']]
rod.reset_index(inplace=True)
#return rod.groupby('chrom')
try:
index = rod.groupby('chrom').apply(lambda df: df['pos']+chrom_len_s.loc[:df['chrom'].iloc[0]].iloc[:-1].sum()).values
except KeyError, e:
print chrom_len_s
raise e
rod['index'] = index
rod.set_index('index', inplace=True, drop=True)
if not drop:
columns = list(columns) + ['chrom', 'pos']
rod.index = rod.index.values.astype(int)
if not rod.index.is_monotonic:
rod.sort_index(inplace=True)
return rod[columns] if len(columns)>1 else rod[columns[0]]
def rod_to_chrompos(rod_1d, chrom_len_s, drop=True):
"""
Reverts the action of rod_to_1d.
Converts a single numeric index that runs through
chromosomes to a (chrom, pos) multiindex.
Note that the single index is expected to correspond
to (chrom, pos) sorted lexographically,
i.e., if chrom names are strings, the entries should be in
in the order Chr1, Chr10, Chr2, Chr20, Chr3,...,ChrX, ChrY.
Example: len(Chr1)+len(Chr10)+1 is converted to (Chr2, 1).
Input:
rod ... series or dataframe of reference ordered data
with single index running through chromosomes.
(as produced by rod_to_1d())
chrom_len_s ... series with chromosome names as keys
and chromosome length as values
drop ... If False, keep numeric index as columns
"""
if not chrom_len_s.index.is_monotonic:
chrom_len_s = chrom_len_s.sort_index()
rod = pd.DataFrame(rod_1d).copy()
if not rod.index.is_monotonic:
rod.sort_index(inplace=True)
columns = [c for c in rod.columns if c not in ['chrom','pos']]
cs = chrom_len_s.cumsum()
rod['chrom'] = np.nan
for chrom, (start, end) in zip(cs.index,zip([0] + list(cs.values[:-1]),cs.values)):
end = min(end,rod.index[-1])
rod.loc[slice(start,end),'chrom'] = chrom
#print chrom, rod.loc[slice(start,end),columns[0]]
rod.loc[slice(star/t,end),'pos'] = rod.ix[slice(start,end)].index - start
if drop:
rod.set_index(['chrom','pos'], inplace=True, drop=True)
else:
rod = rod.reset_index().set_index(['chrom','pos'], drop=True)
columns = list(columns) + ['index']
#if not rod.index.is_monotonic:
# rod.sort_index(inplace=True)
return rod[columns] if len(columns)>1 else rod[columns[0]]
def feature_df_to_1d(feature_df, chrom_len_s):
"""
Converts mulitindex feature_df (chrom,start)
to single numeric index running trough all
chromosomes. The column 'end' is also converted.
See rod_to_1d for details.
"""
feature_df = feature_df.copy()
feature_df.index.names = (feature_df.index.names[0], 'pos')
feature_df_1d = rod_to_1d(feature_df, chrom_len_s,drop=False)
#print feature_df_1d
end_df = feature_df_1d.reset_index().set_index(['chrom','end'])
#print end_df
end_df.drop('pos',axis=1,inplace=True)
end_df.rename(columns={'index':'start'}, inplace=True)
end_df.index.names = (end_df.index.names[0], 'pos')
end_1d = rod_to_1d(end_df, chrom_len_s)
end_1d.index.name = 'end'
end_1d = end_1d.reset_index().set_index('start')
end_1d.index.name = 'index'
if not end_1d.index.is_monotonic:
end_1d.sort_index(inplace=True)
return end_1d
def feature_df_to_chrompos(feature_df_1d, chrom_len_s):
"""
Converts feature_df with single numberic index
running through all chromosomes to
multiindex (chrom,start)
The column 'end' is also converted.
This is the inverse function of
feature_df_to_1d().
See rod_to_chrompos for details.
"""
feature_df_1d = feature_df_1d.copy()
feature_df = rod_to_chrompos(feature_df_1d, chrom_len_s)
feature_df.index.names = (feature_df.index.names[0], 'start')
end_df_1d = feature_df.reset_index().set_index('end')
end_df = rod_to_chrompos(end_df_1d, chrom_len_s)
end_df.index.names = (end_df.index.names[0],'end')
end_df = end_df.reset_index().set_index(['chrom','start'])
if not end_df.index.is_monotonic:
end_df.sortlevel(inplace=True)
return end_df
def data_per_feature_FI(rod, feature_df, feature_name = 'feature'):
"""
Get the entires in rod which lie within a feature
(e.g. gene) in feature_df. FI stands for flattened index.
Input:
rod (reference ordered data)... pandas series or data frame with numeric index
such as SNP genotypes
feature_df (gene annotation data frame)... index must be (chrom,feature_name), must have columns 'start', 'end'
"""
rod = pd.DataFrame(rod)
pos_rel_to_start = feature_df.index.searchsorted(rod.index)
#feature_df["end"] is not necessarily sorted, but wouldn't sorting it here
#lead to problems as well?
pos_rel_to_end = np.searchsorted(feature_df["end"].values,rod.index.values)
in_feature = (pos_rel_to_start - pos_rel_to_end) == 1
feature_id = feature_df.iloc[pos_rel_to_end[in_feature]][feature_name].values
rod = rod[in_feature].copy()
rod[feature_name] = feature_id
|
feilchenfeldt/enrichme
|
pandas_util.py
|
Python
|
mit
| 15,740
|
import sys
import os
import Image
def simpleQuant():
im = Image.open('bubbles.jpg')
w,h = im.size
for row in range(h):
for col in range(w):
r,g,b = im.getpixel((col,row))
r = r // 36 * 36
g = g // 42 * 42
b = b // 42 * 42
im.putpixel((col,row),(r,g,b))
im.show()
|
robin1885/algorithms-exercises-using-python
|
source-code-from-author-book/Listings-for-Second-Edition/listing_8_21.py
|
Python
|
mit
| 347
|
# @Author: ganeshkumarm
# @Date: 2016-11-19T19:20:11+05:30
# @Last modified by: ganeshkumarm
# @Last modified time: 2016-11-19T19:20:45+05:30
#Built in modules
import os
import sys
import time
import subprocess
import datetime
import platform
from win10toast import ToastNotifier
#Used defined module
import exception
class Notify(object):
def __init__(self):
self.title = 'Alert From Alertify'
self.platform = platform.system()
self.toaster = ToastNotifier()
def counter(self, notify_time, message):
s = 00
m = notify_time
if self.platform == 'Linux':
os.system('clear')
elif self.platform == 'Windows':
os.system('cls');
print "Alertify"
print "Alerts in %d minutes %d seconds ..." % (m, s)
time.sleep(1)
s = 59
m -= 1
while s >= 00:
if m == -1:
print "Completed"
print "Bye"
return
if self.platform == 'Linux':
os.system('clear')
elif self.platform == 'Windows':
os.system('cls');
print "Alertify"
print "-------"
print message
print "-" * len(message)
print "Alerts in %d minutes %d seconds ..." % (m, s)
time.sleep(1)
s -= 1
if s == 0:
s = 59
m -= 1
def sleep_time(self, notify_time):
try:
time.sleep(notify_time * 60)
except Exception, e:
print e
def sendNotification(self, message, start_time):
try:
end_time = datetime.datetime.now()
diff_time_in_delta = end_time - start_time
diff_time_in_mins = divmod(diff_time_in_delta.days * 86400 + diff_time_in_delta.seconds, 60)
diff_time_msg = ' (Set ' + str(diff_time_in_mins[0]) + ' minutes ' + str(diff_time_in_mins[1]) + ' seconds ago)'
if self.platform == 'Linux':
os.system('notify-send "'+self.title+'" "'+message+'\r'+diff_time_msg+'"')
elif self.platform == 'Windows':
self.toaster.show_toast(self.title, message+'\n'+str(diff_time_msg), duration=300)
except Exception, e:
print e
def main():
try:
counter_flag = True
notify = Notify()
if len(sys.argv) <= 2:
try:
raise exception.PassArgument("Please pass Time and Message as arguments")
except exception.PassArgument, e:
print e.args
print "Exiting ...."
sys.exit()
notify_time = sys.argv[1]
if not notify_time.isdigit():
try:
raise exception.InvalidArgument("Time parameter must be a positive integer value")
except exception.InvalidArgument, e:
print e.args
print "Exiting ...."
sys.exit()
notify_time = int(sys.argv[1])
if sys.argv[len(sys.argv) - 1] == '--no-counter':
message = ' '.join([sys.argv[i] for i in range(2, len(sys.argv) - 1)])
counter_flag = False
else:
message = ' '.join([sys.argv[i] for i in range(2, len(sys.argv))])
start_time = datetime.datetime.now()
if counter_flag:
notify.counter(notify_time, message)
else:
notify.sleep_time(notify_time)
notify.sendNotification(message, start_time)
except KeyboardInterrupt:
print "\nQuitting ..."
print "Bye"
if __name__ == "__main__":
main()
|
GaneshmKumar/Alertify
|
alertify/alertify.py
|
Python
|
mit
| 3,659
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'blog_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=60)),
('body', self.gf('django.db.models.fields.TextField')()),
('tags', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'blog', ['Post'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'blog_post')
models = {
u'blog.post': {
'Meta': {'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tags': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'})
}
}
complete_apps = ['blog']
|
daviferreira/leticiastallone.com
|
leticiastallone/blog/migrations/0001_initial.py
|
Python
|
mit
| 1,393
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Q2(c): Recurrent neural nets for NER
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import sys
import tensorflow as tf
import numpy as np
logger = logging.getLogger("hw3.q2.1")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class RNNCell(tf.nn.rnn_cell.RNNCell):
"""Wrapper around our RNN cell implementation that allows us to play
nicely with TensorFlow.
"""
def __init__(self, input_size, state_size):
self.input_size = input_size
self._state_size = state_size
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Updates the state using the previous @state and @inputs.
Remember the RNN equations are:
h_t = sigmoid(x_t W_x + h_{t-1} W_h + b)
TODO: In the code below, implement an RNN cell using @inputs
(x_t above) and the state (h_{t-1} above).
- Define W_x, W_h, b to be variables of the apporiate shape
using the `tf.get_variable' functions. Make sure you use
the names "W_x", "W_h" and "b"!
- Compute @new_state (h_t) defined above
Tips:
- Remember to initialize your matrices using the xavier
initialization as before.
Args:
inputs: is the input vector of size [None, self.input_size]
state: is the previous state vector of size [None, self.state_size]
scope: is the name of the scope to be used when defining the variables inside.
Returns:
a pair of the output vector and the new state vector.
"""
scope = scope or type(self).__name__
# It's always a good idea to scope variables in functions lest they
# be defined elsewhere!
with tf.variable_scope(scope):
### YOUR CODE HERE (~6-10 lines)
W_x = tf.get_variable("W_x", shape=(self.input_size, self.state_size), initializer=tf.contrib.layers.xavier_initializer())
W_h = tf.get_variable("W_h", shape=(self.state_size, self.output_size), initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("b", shape=(self.output_size), initializer=tf.contrib.layers.xavier_initializer())
new_state = tf.nn.sigmoid(tf.matmul(inputs, W_x) + tf.matmul(state, W_h) + b)
### END YOUR CODE ###
# For an RNN , the output and state are the same (N.B. this
# isn't true for an LSTM, though we aren't using one of those in
# our assignment)
output = new_state
return output, new_state
def test_rnn_cell():
with tf.Graph().as_default():
with tf.variable_scope("test_rnn_cell"):
x_placeholder = tf.placeholder(tf.float32, shape=(None,3))
h_placeholder = tf.placeholder(tf.float32, shape=(None,2))
with tf.variable_scope("rnn"):
tf.get_variable("W_x", initializer=np.array(np.eye(3,2), dtype=np.float32))
tf.get_variable("W_h", initializer=np.array(np.eye(2,2), dtype=np.float32))
tf.get_variable("b", initializer=np.array(np.ones(2), dtype=np.float32))
tf.get_variable_scope().reuse_variables()
cell = RNNCell(3, 2)
y_var, ht_var = cell(x_placeholder, h_placeholder, scope="rnn")
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
x = np.array([
[0.4, 0.5, 0.6],
[0.3, -0.2, -0.1]], dtype=np.float32)
h = np.array([
[0.2, 0.5],
[-0.3, -0.3]], dtype=np.float32)
y = np.array([
[0.832, 0.881],
[0.731, 0.622]], dtype=np.float32)
ht = y
y_, ht_ = session.run([y_var, ht_var], feed_dict={x_placeholder: x, h_placeholder: h})
print("y_ = " + str(y_))
print("ht_ = " + str(ht_))
assert np.allclose(y_, ht_), "output and state should be equal."
assert np.allclose(ht, ht_, atol=1e-2), "new state vector does not seem to be correct."
def do_test(_):
logger.info("Testing rnn_cell")
test_rnn_cell()
logger.info("Passed!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tests the RNN cell implemented as part of Q2 of Homework 3')
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('test', help='')
command_parser.set_defaults(func=do_test)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
|
AppleFairy/CS224n-Natural-Language-Processing-with-Deep-Learning
|
assignment/assignment3/q2_rnn_cell.py
|
Python
|
mit
| 4,987
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Sean Quinn
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from dice.tokens import Term
import pytest
def test_initialize_term():
"""
"""
pass
def test_term_repr():
pass
def test_term_str():
pass
def test_evaluate_term():
pass
|
extesla/dice-python
|
tests/dice/tokens/test_term.py
|
Python
|
mit
| 1,315
|
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="scatter", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatter/_customdatasrc.py
|
Python
|
mit
| 411
|
"""
Contains base data structures for defining graph constrained group testing problem,
and interfaces to operate on them.
Basic structure to exchange graph constrained group testing problem definition is :class:`Problem`.
It consists of enumeration of faulty elements, graph of links between elements and natural language
description of the problem. Graph is described by :class:`ProblemGraph` which consists of
:class:`networkx.DiGraph`, and distinguished nodes stored in :attr:`ProblemGraph.source`,
and :attr:`ProblemGraph.sink`
Interface of every algorithm solving group constrained group testing problem is defined by
:class:`Solver`, Abstract class :class:`ExperimentStatistics` defines generic interface that can
be used by experiment runners to verify result returned by solver. Result later is stored together
with statistics (:class:`TestStatistics`) in memory, where it can be retrieved for each problem/solver pair.
Experiment runner is a function accepting :class:`Experiment` parameter that fills it during call.
"""
import collections
from abc import ABCMeta, abstractmethod
class ToDict(object):
__metaclass__ = ABCMeta
@abstractmethod
def toDict(self):
raise NotImplementedError()
class EmptyToDict(ToDict):
def toDict(self):
return {}
class Problem(collections.namedtuple("Problem", ["all_nodes", "faulty_set", "description"]), EmptyToDict):
pass
class GCGTProblem(collections.namedtuple("GCGTProblem", ["all_nodes", "faulty_set", "description", "problem_graph"]), EmptyToDict):
pass
ProblemGraph = collections.namedtuple("ProblemGraph", ["graph", "source", "sink"])
class ExperimentStatistics(object):
"""
Maintains statistics related with the experiment, for each problem and solver statistics object is gathered
"""
def __init__(self, rendererMapping):
self._renderers = rendererMapping or {}
for k, v in self._renderers.iteritems():
if v is None:
self._renderers[k] = lambda x: x.toDict()
self.results = []
self.headers = set({})
def set_result(self, objectsMapping):
result = {}
for k, v in objectsMapping.iteritems():
rendered = self._render(k, v)
self._add_headers(rendered)
result.update(rendered)
self.results.append(result)
def _add_headers(self, rendered):
for k in rendered:
self.headers.add(k)
def _render(self, rendererIdentifier, obj):
result = {}
if isinstance(obj, (int, float, str)):
defaultrenderer = lambda x: {'value': x}
else:
defaultrenderer = lambda obj: obj.toDict()
renderer = self._renderers.get(rendererIdentifier, defaultrenderer)
for k, v in renderer(obj).iteritems():
result[self._join(rendererIdentifier, k)] = v
return result
def _join(self, *args):
return ".".join(args)
def process(self):
raise NotImplementedError()
class TestStatistics(ToDict):
"""
Maintains various statistics related with the single run of group testing algorithm
"""
def __init__(self):
self.variable_dict = {}
def set_var(self, var, value):
self.variable_dict[var] = value
def get_var(self, var):
return self.variable_dict[var]
def inc_var(self, var):
self.variable_dict[var] = self.variable_dict.get(var, 0) + 1
def toDict(self):
return self.variable_dict
class Solver(ToDict):
SOLVER_TYPE_TAG = 'solver_type'
def __init__(self, problem_description, tester, *args, **kwargs):
raise NotImplementedError()
def solve(self):
"""
runs algorithm solving graph constrained group testing problem
:returns: set of nodes identified by algorithm as positive
:rtype: set
"""
raise NotImplementedError()
class SolverError(Exception):
pass
class GCGTSolver(Solver):
"""
Interface of classes implementing combinatorial group testing algorithm.
Problem description and tester object have to be inserted in constructor
"""
def __init__(self, problem_description, tester, *args, **kwargs):
"""
:param problem_description: graph constrained combinatorial problem description
:type problem_description: base_types.Problem
:param tester: tester object which will test all paths
:type tester: base_types.PathTester
"""
self.problem_description = problem_description
self.graph = self.problem_description.problem_graph.graph
self.source = self.problem_description.problem_graph.source
self.sink = self.problem_description.problem_graph.sink
self.tester = tester
class SetTester(object):
def test_paths(self, paths):
"""
check results for batch tests of paths
:param paths: paths that will be tested
:type paths: list[set]
:returns: list of boolean representing results for each of the `paths`.
:rtype: list[bool]
"""
raise NotImplementedError()
|
szredinger/graph-constr-group-testing
|
graph_constr_group_testing/core/base_types.py
|
Python
|
mit
| 5,126
|
import matplotlib.pyplot as pl
import numpy as np
import math
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
def plot(X, m, x_star, t, z_t):
fig = pl.figure(figsize=(10,10))
# Draw the grid first
ax = pl.axes()
ax.set_xlim(-4,20)
ax.set_ylim(-4,20)
ax.xaxis.set_major_locator(pl.MultipleLocator(5.0))
ax.xaxis.set_minor_locator(pl.MultipleLocator(1.0))
ax.yaxis.set_major_locator(pl.MultipleLocator(5.0))
ax.yaxis.set_minor_locator(pl.MultipleLocator(1.0))
ax.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')
ax.grid(which='minor', axis='x', linewidth=0.25, linestyle='-', color='0.75')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')
ax.grid(which='minor', axis='y', linewidth=0.25, linestyle='-', color='0.75')
# Draw map
for y, row in enumerate(m):
for x, cell in enumerate(row):
if (cell == 'W'):
rect = pl.Rectangle((x,y), 1, 1, fill=True,color='#cacaca')
ax.add_patch(rect)
# Draw the robot and its direction
x,y,theta = x_star['x'], x_star['y'], x_star['theta']
dx = 1 * math.cos(theta)
dy = 1 * math.sin(theta)
ax.arrow(x,y,dx,dy, head_width=.4, head_length=0.5, length_includes_head=True)
circle = pl.Circle((x, y), radius=0.35, fc='y')
ax.add_patch(circle)
# Draw information
directions = 'n nw w sw s se e ne'.split()
title_arr = []
#print z_t
for direction in directions:
#print z_t[direction]
title_arr.append("%s: %4.2f" % (direction, z_t[direction]))
ax.set_title('; '.join(title_arr))
#print X
xs = [xx[0]['x'] for xx in X]
ys = [xx[0]['y'] for xx in X]
pl.scatter(xs, ys)
return fig
|
mufid/berkilau
|
ws/CSUIBotClass2014/util/plotter2.py
|
Python
|
mit
| 1,816
|
from unittest import skipIf
from django.conf import settings
def skipIfDefaultUser(test_func):
"""
Skip a test if a default user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL == "auth.User", "Default user model in use")(
test_func
)
def skipIfCustomUser(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL != "auth.User", "Custom user model in use")(
test_func
)
|
yunojuno/django-request-profiler
|
tests/utils.py
|
Python
|
mit
| 491
|
from django import forms
from django.contrib.auth.models import User
from .models import Perfil,SolicitudColaboracion
class SolicitudColaboracionForm(forms.ModelForm):
class Meta:
model = SolicitudColaboracion
fields = ('name','licenciatura_leyes','telefono','fecha_nacimiento')
|
SurielRuano/Orientador-Legal
|
colaboradores/forms.py
|
Python
|
mit
| 287
|
#!/usr/bin/env python
"""
voice_nav.py allows controlling a mobile base using simple speech commands.
Based on the voice_cmd_vel.py script by Michael Ferguson in the pocketsphinx ROS package.
"""
import roslib; #roslib.load_manifest('pi_speech_tutorial')
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from math import copysign
from sound_play.libsoundplay import SoundClient
class voice_cmd_vel:
def __init__(self):
self.rate = rospy.get_param("~rate", 5)
r = rospy.Rate(self.rate)
self.paused = False
self.voice = rospy.get_param("~voice", "voice_cmu_us_bdl_arctic_clunits")
self.wavepath = rospy.get_param("~wavepath", "")
# Create the sound client object
self.soundhandle = SoundClient()
rospy.sleep(1)
self.soundhandle.stopAll()
# Subscribe to the /recognizer/output topic to receive voice commands.
rospy.Subscriber('/recognizer/output', String, self.speechCb)
# A mapping from keywords to commands.
self.keywords_to_command = {'stop': ['stop', 'halt', 'abort', 'kill', 'panic', 'off', 'freeze', 'shut down', 'turn off', 'help', 'help me'],
'bye': ['bye', 'cheers', 'goodbye', 'see you', 'bye'],
'cafe' : ['cafe', 'campus', 'tea', 'coffee', 'eat'],
'hello': ['hi', 'hey', 'hello'],
'help' : ['help me', 'can help', 'help'],
'name' : ['your name', 'name'],
'wash' : ['washroom', 'toilet'],
'library' : ['library', 'book', 'borrow'],
'labs' : ['labs'],
'talk': ['talk to me?', 'really talk?', 'you talk', 'you really talk?', 'talk'],
'amazing' : ['amazing', 'wonderful'],
'psychology' : ['psychology'],
'teaching' : ['teaching', 'music'],
'engineering' : ['engineering'],
'biology' : ['biology', 'english', 'chemistry'],
'maths' : ['computing', 'mathematics'],
'geo' : ['geology', 'geography'],
'marine' : ['marine'],
'art' : ['art'],
'roland' : ['reception', 'architecture'],
'business' : ['business'],
'staff' : ['staff'],
'sports' : ['sports'],
'robots' : ['robotics', 'robots'],
'visit' : ['visit', 'to do'],
'supermarket' : ['shop', 'supermarket'],
'cashpoint' : ['cash points', 'ATM', 'cash machines'],
'day' : ['day', 'today'],
'weather' : ['weather'],
'pause': ['pause speech'],
'continue': ['continue speech']}
rospy.loginfo("Ready to receive voice commands")
# We have to keep publishing the cmd_vel message if we want the robot to keep moving.
while not rospy.is_shutdown():
r.sleep()
def get_command(self, data):
for (command, keywords) in self.keywords_to_command.iteritems():
for word in keywords:
if data.find(word) > -1:
return command
def speechCb(self, msg):
command = self.get_command(msg.data)
rospy.loginfo("Command: " + str(command))
if command == 'pause':
self.paused = True
elif command == 'continue':
self.paused = False
if self.paused:
return
if command == 'hello':
self.soundhandle.say("Greetings!.", self.voice)
if command == 'help':
self.soundhandle.say("Ask me questions", self.voice)
if command == 'talk':
self.soundhandle.say("yes, I can", self.voice)
if command == 'bye':
self.soundhandle.say("Bye Bye", self.voice)
if command == 'weather':
self.soundhandle.say("I Don't know.", self.voice)
if command == 'supermarket':
self.soundhandle.say("The nearest supermarket is the TESCO!. ", self.voice)
if command == 'day':
self.soundhandle.say("It's tuesday!.", self.voice)
if command == 'psychology':
self.soundhandle.say("It's in link building!", self.voice)
if command == 'teaching':
self.soundhandle.say("the rolle building!.", self.voice)
if command == 'engineering':
self.soundhandle.say("That's right here!.", self.voice)
if command == 'biology':
self.soundhandle.say("It's is in the Davy building!.!", self.voice)
if command == 'maths':
self.soundhandle.say("In the babbage building!.!", self.voice)
if command == 'geo':
self.soundhandle.say("It's in the Fitzroy building!.!", self.voice)
if command == 'marine':
self.soundhandle.say("In the reynolds And the marine building.! ", self.voice)
if command == 'art':
self.soundhandle.say(" in the scott building!.!", self.voice)
if command == 'roland':
self.soundhandle.say(" in the roland levinsky building!.!", self.voice)
if command == 'business':
self.soundhandle.say("should be cookworthy building!", self.voice)
if command == 'staff':
self.soundhandle.say("In the Portland Square building!", self.voice)
if command == 'sports':
self.soundhandle.say("It's the Nancy Astor building. ", self.voice)
if command == 'robots':
self.soundhandle.say("in Smeaton's building or in Portland Square. !", self.voice)
if command == 'cashpoint':
self.soundhandle.say("There are some on the eastern exit of this building.!!", self.voice)
if command == 'visit':
self.soundhandle.say("Well, you can walk along the seashore. May be.!", self.voice)
if command == 'name':
self.soundhandle.say("charlie.", self.voice)
if command == 'amazing':
self.soundhandle.say("thank you so much.", self.voice)
if command == 'cafe':
self.soundhandle.say(" at the S U shop.", self.voice)
if command == 'wash':
self.soundhandle.say("the second floor and the third floor.", self.voice)
if command == 'library':
self.soundhandle.say("It's next to the Smeaton's building.", self.voice)
if command == 'labs':
self.soundhandle.say(" on the third floor.", self.voice)
def cleanup(self):
# When shutting down be sure to stop the robot! Publish a Twist message consisting of all zeros.
rospy.loginfo("Shutting Down..")
if __name__=="__main__":
rospy.init_node('voice_nav')
try:
voice_cmd_vel()
except:
pass
|
jdekerautem/TurtleBot-Receptionist
|
pocketsphinx_files/notsotalkative.py
|
Python
|
mit
| 6,969
|
from tabulate import tabulate
class Response():
message = None;
data = None;
def print(self):
if self.message:
if type(self.message) == "str":
print(self.message)
elif type(self.message) == "list":
for message in self.message:
print("{}\n".format(message))
if (self.data):
if len(self.data["rows"]) > 0:
print(tabulate(self.data["rows"], headers=self.data["headers"]))
else:
print("Empty!")
|
mozey/taskmage
|
taskmage/response.py
|
Python
|
mit
| 557
|
"""Add user
Revision ID: 13a57b7f084
Revises: None
Create Date: 2014-05-11 17:12:17.244013
"""
# revision identifiers, used by Alembic.
revision = '13a57b7f084'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('password_hash', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
plumdog/datasheet
|
alembic/versions/13a57b7f084_add_user.py
|
Python
|
mit
| 741
|
# project/tests/test_auth.py
import time
import json
import unittest
from project.server import db
from project.server.models import User, BlacklistToken
from project.tests.base import BaseTestCase
def register_user(self, email, password):
return self.client.post(
'/auth/register',
data=json.dumps(dict(
email=email,
password=password
)),
content_type='application/json',
)
def login_user(self, email, password):
return self.client.post(
'/auth/login',
data=json.dumps(dict(
email=email,
password=password
)),
content_type='application/json',
)
class TestAuthBlueprint(BaseTestCase):
def test_registration(self):
""" Test for user registration """
with self.client:
response = register_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully registered.')
self.assertTrue(data['auth_token'])
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 201)
def test_registered_with_already_registered_user(self):
""" Test registration with already registered email"""
user = User(
email='joe@gmail.com',
password='test'
)
db.session.add(user)
db.session.commit()
with self.client:
response = register_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'User already exists. Please Log in.')
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 202)
def test_registered_user_login(self):
""" Test for login of registered-user login """
with self.client:
# user registration
resp_register = register_user(self, 'joe@gmail.com', '123456')
data_register = json.loads(resp_register.data.decode())
self.assertTrue(data_register['status'] == 'success')
self.assertTrue(
data_register['message'] == 'Successfully registered.'
)
self.assertTrue(data_register['auth_token'])
self.assertTrue(resp_register.content_type == 'application/json')
self.assertEqual(resp_register.status_code, 201)
# registered user login
response = login_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully logged in.')
self.assertTrue(data['auth_token'])
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 200)
def test_non_registered_user_login(self):
""" Test for login of non-registered user """
with self.client:
response = login_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'User does not exist.')
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 404)
def test_user_status(self):
""" Test for user status """
with self.client:
resp_register = register_user(self, 'joe@gmail.com', '123456')
response = self.client.get(
'/auth/status',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_register.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['data'] is not None)
self.assertTrue(data['data']['email'] == 'joe@gmail.com')
self.assertTrue(data['data']['admin'] is 'true' or 'false')
self.assertEqual(response.status_code, 200)
def test_user_status_malformed_bearer_token(self):
""" Test for user status with malformed bearer token"""
with self.client:
resp_register = register_user(self, 'joe@gmail.com', '123456')
response = self.client.get(
'/auth/status',
headers=dict(
Authorization='Bearer' + json.loads(
resp_register.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'Bearer token malformed.')
self.assertEqual(response.status_code, 401)
def test_valid_logout(self):
""" Test for logout before token expires """
with self.client:
# user registration
resp_register = register_user(self, 'joe@gmail.com', '123456')
data_register = json.loads(resp_register.data.decode())
self.assertTrue(data_register['status'] == 'success')
self.assertTrue(
data_register['message'] == 'Successfully registered.')
self.assertTrue(data_register['auth_token'])
self.assertTrue(resp_register.content_type == 'application/json')
self.assertEqual(resp_register.status_code, 201)
# user login
resp_login = login_user(self, 'joe@gmail.com', '123456')
data_login = json.loads(resp_login.data.decode())
self.assertTrue(data_login['status'] == 'success')
self.assertTrue(data_login['message'] == 'Successfully logged in.')
self.assertTrue(data_login['auth_token'])
self.assertTrue(resp_login.content_type == 'application/json')
self.assertEqual(resp_login.status_code, 200)
# valid token logout
response = self.client.post(
'/auth/logout',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully logged out.')
self.assertEqual(response.status_code, 200)
def test_valid_blacklisted_token_logout(self):
""" Test for logout after a valid token gets blacklisted """
with self.client:
# user registration
resp_register = register_user(self, 'joe@gmail.com', '123456')
data_register = json.loads(resp_register.data.decode())
self.assertTrue(data_register['status'] == 'success')
self.assertTrue(
data_register['message'] == 'Successfully registered.')
self.assertTrue(data_register['auth_token'])
self.assertTrue(resp_register.content_type == 'application/json')
self.assertEqual(resp_register.status_code, 201)
# user login
resp_login = login_user(self, 'joe@gmail.com', '123456')
data_login = json.loads(resp_login.data.decode())
self.assertTrue(data_login['status'] == 'success')
self.assertTrue(data_login['message'] == 'Successfully logged in.')
self.assertTrue(data_login['auth_token'])
self.assertTrue(resp_login.content_type == 'application/json')
self.assertEqual(resp_login.status_code, 200)
# blacklist a valid token
blacklist_token = BlacklistToken(
token=json.loads(resp_login.data.decode())['auth_token'])
db.session.add(blacklist_token)
db.session.commit()
# blacklisted valid token logout
response = self.client.post(
'/auth/logout',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')
self.assertEqual(response.status_code, 401)
def test_valid_blacklisted_token_user(self):
""" Test for user status with a blacklisted valid token """
with self.client:
resp_register = register_user(self, 'joe@gmail.com', '123456')
# blacklist a valid token
blacklist_token = BlacklistToken(
token=json.loads(resp_register.data.decode())['auth_token'])
db.session.add(blacklist_token)
db.session.commit()
response = self.client.get(
'/auth/status',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_register.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')
self.assertEqual(response.status_code, 401)
if __name__ == '__main__':
unittest.main()
|
L33tCh/afj-flask
|
project/tests/test_auth.py
|
Python
|
mit
| 9,874
|
"""
[DEPRECATED] Run a single container in debug mode
"""
import typer
from controller import print_and_exit
from controller.app import Application
# Deprecated since 2.1
@Application.app.command(help="Replaced by run --debug command")
def volatile(
service: str = typer.Argument(
...,
help="Service name",
shell_complete=Application.autocomplete_allservice,
),
command: str = typer.Argument(
"bash", help="UNIX command to be executed on selected running service"
),
user: str = typer.Option(
None,
"--user",
"-u",
help="User existing in selected service",
show_default=False,
),
) -> None:
# Deprecated since 2.1
print_and_exit("Volatile command is replaced by rapydo run --debug {}", service)
|
rapydo/do
|
controller/commands/volatile.py
|
Python
|
mit
| 802
|
# -*- coding: utf-8 -*-
# A search engine based on probabilitistic models of the information retrival.
# Author - Janu Verma
# email - jv367@cornell.edu
# http://januverma.wordpress.com/
# @januverma
import sys
from pydoc import help
import os
from collections import defaultdict
from math import log, sqrt
import operator
class ProbModel:
"""
Implements probabilitistic models for information retrieval.
"""
def __init__(self, directory):
"""
Arguments:
directory - Directory of documents to be searched.
"""
self.corpus = os.listdir(directory)
self.text = {}
for f in self.corpus:
f = os.path.join(directory,f)
with open(f) as doc:
info = doc.read()
self.text[f] = info
def words(self, document):
"""
All the words in a document.
Arguments:
document : A textual document.
Returns:
A list containing all the words in the document.
"""
words = document.split()
words = [x.lower() for x in words]
words = [x for x in words if len(x) >= 2and not x.isdigit()]
return words
def word_freq(self, wordlist):
"""
Build a dictionary of words with the frequencies of their occurance in the document.
Arguments:
document : A list of all the words in a document.
Returns:
A dictionary containing all the words in the document with their frequencies.
"""
wordFreq = defaultdict(int)
for w in wordlist:
wordFreq[w] += 1
return wordFreq
def vocabalury(self):
"""
All the words in the corpus.
Returns:
A list of all the words in the corpus.
"""
allWords = []
allDocs = self.text
for d in allDocs.keys():
d = allDocs[d]
docWords = self.words(d)
allWords.extend(docWords)
return allWords
def doc_freq(self):
"""
Compute the document frequency of all the terms in the corpus.
Returns:
A dictionary of all the terms in the corpus with their document frequency.
"""
allWords = self.vocabalury()
allWords = set(allWords)
allDocs = self.text
docFreq = defaultdict(int)
for x in allWords:
for d in allDocs.keys():
d = allDocs[d]
docTerms = self.words(d)
if (x in docTerms):
docFreq[x] += 1
return docFreq
def docScore(self, document, query, k, b):
"""
Compute the log odds ratio of the document being relevant to the query.
Arguments:
document : A textual document.
query : The search query.
k : tuning parameter for term frequency.
b : tuning parameter for for document length.
Returns:
A floating variable score
"""
# total number of docs
n = len(self.corpus)
# words in the document
docText = self.words(document)
# length of the document
l = len(docText)
# average length of a document
l_av = float(len(self.vocabalury()))/n
# document frequency dict
df = self.doc_freq()
# words in the document
tokens = self.words(document)
#term frequency dict
tf = self.word_freq(tokens)
# inittalize the score for the document
score = 0
# query
queryWords = self.words(query)
for x in queryWords:
try:
tf_x = tf[x]
except:
continue
try:
df_x = df[x]
except:
continue
# inverse document frequency of the term.
idf = log(n/df_x)
# correction factor
correction = float((k + 1)*(tf_x))/(k*(1-b) + b*(l/(l_av)) + (tf_x))
# total contribution
contribution = idf * correction
score += contribution
return score
def ranking(self, query, k, b):
"""
Ranking of the documents based on their relevance to the query.
Arguments:
query: The search query
Returns:
A dictionary of all the documents in the corpus with their corresponding relevance odds ratio.
"""
if (k != None):
k = k
else:
k = 0
if (b != None):
b = b
else:
b = 0
documents = self.text
rankingDict = defaultdict(float)
for d in documents.keys():
docText = documents[d]
score = self.docScore(docText, query, k, b)
rankingDict[d] = score
return rankingDict
def search(self, query, n_docs, k=None, b=None):
"""
Returns documents which are most relavant to the query.
Ranking is done by decreasing odds ratio for the document to be relevant for the query.
Arguments:
String query : Search query
Integer n_docs : Number of matching documents retrived.
Float k : tuning parameter for term frequency, (0<=k<=1).
A value of 0 corresponds to a binary model (no term frequency),
and a large value corresponds to using raw term frequency
Float b: tuning parameter for for document length, (0<=b<=1).
b = 1 corresponds to fully scaling the term weight by the document length,
while b = 0 corresponds to no length normalization.
Returns:
A list of length n_docs containing documents most relevant to the search query.
The list if sorted in the descending order.
"""
if (n_docs > len(self.corpus)):
n_docs = len(self.corpus)
relevantDocs = []
if (k != None):
k = k
if (b != None):
b = b
rankings = self.ranking(query, k, b)
rankings = sorted(rankings.iteritems(), key=operator.itemgetter(1), reverse=True)
for i in range(n_docs):
u,v = rankings[i]
relevantDocs.append(u)
return relevantDocs
|
Jverma/InfoR
|
InfoR/ProbabilitisticModels.py
|
Python
|
mit
| 5,245
|
from typing import NamedTuple, List
from data import crossword
class Clue(str):
def __init__(self, value) -> None:
super(Clue, self).__init__(value)
self._tokens = crossword.tokenize_clue(value)
class _Node(object):
_clue: Clue
_occupied: int
def __init__(self, clue: Clue, occupied: int) -> None:
self._clue = clue
self._occupied = occupied
class Parsed(List):
pass
# A list of nodes, initially Nulls
|
PhilHarnish/forge
|
src/puzzle/problems/crossword/_cryptic_nodes.py
|
Python
|
mit
| 437
|
from collections import defaultdict
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q, Count, Sum, Max, Min
from django.db.models.signals import pre_save
from django.dispatch import receiver
from hashlib import sha1
from proso.dict import group_keys_by_value_lists
from proso.django.cache import cache_pure
from proso.list import flatten
from proso_common.models import get_config
from proso_models.models import Answer, Item, get_environment, get_mastery_trashold, get_predictive_model, get_time_for_knowledge_overview
from time import time as time_lib
import json
import logging
LOGGER = logging.getLogger('django.request')
class TagManager(models.Manager):
def prepare_related(self):
return self.prefetch_related('concepts')
class Tag(models.Model):
"""
Arbitrary tag for concepts.
"""
type = models.CharField(max_length=50)
value = models.CharField(max_length=200)
lang = models.CharField(max_length=2)
type_name = models.CharField(max_length=100)
value_name = models.CharField(max_length=100)
objects = TagManager()
class Meta:
unique_together = ("type", "value", "lang")
def to_json(self, nested=False):
data = {
"id": self.pk,
"object_type": "tag",
"type": self.type,
"value": self.value,
"lang": self.lang,
"type_name": self.type_name,
"value_name": self.value_name,
}
if not nested:
data["concepts"] = [concept.to_json(nested=True) for concept in self.concepts.all()]
return data
def __str__(self):
return "{}: {}".format(self.type, self.value)
class ConceptManager(models.Manager):
def prepare_related(self):
return self.prefetch_related('tags', 'actions')
@cache_pure()
def get_concept_item_mapping(self, concepts=None, lang=None):
"""
Get mapping of concepts to items belonging to concept.
Args:
concepts (list of Concept): Defaults to None meaning all concepts
lang (str): language of concepts, if None use language of concepts
Returns:
dict: concept (int) -> list of item ids (int)
"""
if concepts is None:
concepts = self.filter(active=True)
if lang is not None:
concepts = concepts.filter(lang=lang)
if lang is None:
languages = set([concept.lang for concept in concepts])
if len(languages) > 1:
raise Exception('Concepts has multiple languages')
lang = list(languages)[0]
item_lists = Item.objects.filter_all_reachable_leaves_many([json.loads(concept.query)
for concept in concepts], lang)
return dict(zip([c.pk for c in concepts], item_lists))
@cache_pure()
def get_item_concept_mapping(self, lang):
""" Get mapping of items_ids to concepts containing these items
Args:
lang (str): language of concepts
Returns:
dict: item (int) -> set of concepts (int)
"""
concepts = self.filter(active=True, lang=lang)
return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))
def get_concepts_to_recalculate(self, users, lang, concepts=None):
"""
Get concept which have same changes and have to be recalculated
Args:
users (list of users or user): users whose user stats we are interesting in
lang (str): language of used concepts
concepts (Optional[list of concepts]): list of primary keys of concepts or concepts
Defaults to None meaning all concepts.
Returns:
dict: user -> set of concepts (int) - in case of list of users
list of stats (str) - in case of one user
"""
only_one_user = False
if not isinstance(users, list):
only_one_user = True
users = [users]
mapping = self.get_item_concept_mapping(lang)
current_user_stats = defaultdict(lambda: {})
user_stats_qs = UserStat.objects.filter(user__in=users, stat="answer_count") # we need only one type
if concepts is not None:
user_stats_qs = user_stats_qs.filter(concept__in=concepts)
for user_stat in user_stats_qs:
current_user_stats[user_stat.user_id][user_stat.concept_id] = user_stat
concepts_to_recalculate = defaultdict(lambda: set())
for user, item, time in Answer.objects.filter(user__in=users)\
.values_list("user_id", "item").annotate(Max("time")):
if item not in mapping:
# in reality this should by corner case, so it is efficient to not filter Answers
continue # item is not in concept
time_expiration_lower_bound = get_config('proso_models', 'knowledge_overview.time_shift_hours', default=4)
time_expiration_factor = get_config('proso_models', 'knowledge_overview.time_expiration_factor', default=2)
for concept in mapping[item]:
if user in current_user_stats and concept in current_user_stats[user] \
and current_user_stats[user][concept].time > time:
if not self.has_time_expired(current_user_stats[user][concept].time, time, time_expiration_lower_bound, time_expiration_factor):
continue # cache is up to date
if concepts is None or concept in ([c.pk for c in concepts] if type(concepts[0]) == Concept else Concept):
concepts_to_recalculate[user].add(concept)
if only_one_user:
return concepts_to_recalculate[users[0]]
return concepts_to_recalculate
def has_time_expired(self, cache_time, last_answer_time, lower_bound, expiration_factor):
cache_timedelta = cache_time - last_answer_time
if cache_timedelta > timedelta(days=365):
return False
if cache_timedelta < timedelta(hours=lower_bound):
return False
return cache_timedelta < expiration_factor * (datetime.now() - cache_time)
class Concept(models.Model):
"""
Model concepts for open learner model
"""
identifier = models.CharField(max_length=20, blank=True)
query = models.TextField()
name = models.CharField(max_length=200)
lang = models.CharField(max_length=2)
tags = models.ManyToManyField(Tag, related_name="concepts", blank=True)
active = models.BooleanField(default=True)
objects = ConceptManager()
class Meta:
unique_together = ("identifier", "lang")
def to_json(self, nested=False):
data = {
"id": self.pk,
"object_type": "concept",
"identifier": self.identifier,
"name": self.name,
"query": self.query,
"lang": self.lang,
}
if not nested:
data["tags"] = [tag.to_json(nested=True) for tag in self.tags.all()]
data["actions"] = [action.to_json(nested=True) for action in self.actions.all()]
return data
@staticmethod
def create_identifier(query):
"""
Crete identifier of concept
Args:
query (str): query defining concept
Returns:
str: identifier of length 20
"""
return sha1(query.encode()).hexdigest()[:20]
def __str__(self):
return self.name
def __repr__(self):
return "{}-{}".format(self.identifier, self.lang)
class ActionManager(models.Manager):
def prepare_related(self):
return self.select_related('concept')
class Action(models.Model):
"""
Actions which can be done with concept
"""
concept = models.ForeignKey(Concept, related_name="actions")
identifier = models.CharField(max_length=50)
name = models.CharField(max_length=200)
url = models.CharField(max_length=200)
objects = ActionManager()
def to_json(self, nested=False):
data = {
"id": self.pk,
"object_type": "action",
"identifier": self.identifier,
"name": self.name,
"url": self.url,
}
if not nested:
data["concept"] = self.concept.to_json(nested=True)
return data
def __str__(self):
return "{} - {}".format(self.concept, self.name)
class UserStatManager(models.Manager):
def prepare_related(self):
return self.select_related('concept')
def recalculate_concepts(self, concepts, lang=None):
"""
Recalculated given concepts for given users
Args:
concepts (dict): user id (int -> set of concepts to recalculate)
lang(Optional[str]): language used to get items in all concepts (cached).
Defaults to None, in that case are get items only in used concepts
"""
if len(concepts) == 0:
return
if lang is None:
items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values()))))
else:
items = Concept.objects.get_concept_item_mapping(lang=lang)
environment = get_environment()
mastery_threshold = get_mastery_trashold()
for user, concepts in concepts.items():
all_items = list(set(flatten([items[c] for c in concepts])))
answer_counts = environment.number_of_answers_more_items(all_items, user)
correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user)
predictions = dict(list(zip(all_items, get_predictive_model().
predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview()))))
new_user_stats = []
stats_to_delete_condition = Q()
for concept in concepts:
answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate(
time_spent=Sum("response_time"),
sessions=Count("session", True),
time_first=Min("time"),
time_last=Max("time"),
)
stats = {
"answer_count": sum(answer_counts[i] for i in items[concept]),
"correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]),
"item_count": len(items[concept]),
"practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]),
"mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]),
"prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]),
"time_spent": answer_aggregates["time_spent"] / 1000,
"session_count": answer_aggregates["sessions"],
"time_first": answer_aggregates["time_first"].timestamp(),
"time_last": answer_aggregates["time_last"].timestamp(),
}
stats_to_delete_condition |= Q(user=user, concept=concept)
for stat_name, value in stats.items():
new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value))
self.filter(stats_to_delete_condition).delete()
self.bulk_create(new_user_stats)
def get_user_stats(self, users, lang=None, concepts=None, since=None, recalculate=True):
"""
Finds all UserStats of given concepts and users.
Recompute UserStats if necessary
Args:
users (Optional[list of users] or [user]): list of primary keys of user or users
Defaults to None meaning all users.
lang (string): use only concepts witch the lang. Defaults to None meaning all languages.
concepts (Optional[list of concepts]): list of primary keys of concepts or concepts
Defaults to None meaning all concepts.
Returns:
dict: user_id -> dict (concept_identifier - > (stat_name -> value)) -- for more users
dict: concept_identifier - > (stat_name -> value) -- for one user
"""
only_one_user = False
if not isinstance(users, list):
users = [users]
only_one_user = True
if recalculate:
if lang is None:
raise ValueError('Recalculation without lang is not supported.')
time_start = time_lib()
concepts_to_recalculate = Concept.objects.get_concepts_to_recalculate(users, lang, concepts)
LOGGER.debug("user_stats - getting identifying concepts to recalculate: %ss", (time_lib() - time_start))
time_start = time_lib()
self.recalculate_concepts(concepts_to_recalculate, lang)
LOGGER.debug("user_stats - recalculating concepts: %ss", (time_lib() - time_start))
qs = self.prepare_related().filter(user__in=users, concept__active=True)
if concepts is not None:
qs = qs.filter(concept__in=concepts)
if lang is not None:
qs = qs.filter(concept__lang=lang)
if since is not None:
qs = qs.filter(time__gte=since)
data = defaultdict(lambda: defaultdict(lambda: {}))
for user_stat in qs:
data[user_stat.user_id][user_stat.concept.identifier][user_stat.stat] = user_stat.value
if only_one_user:
return data[users[0].pk if type(users[0]) == User else users[0]]
return data
class UserStat(models.Model):
"""
Represent arbitrary statistic (float) of the user on concept
"""
concept = models.ForeignKey(Concept)
user = models.ForeignKey(User, related_name="stats")
stat = models.CharField(max_length=50)
time = models.DateTimeField(auto_now=True)
value = models.FloatField()
objects = UserStatManager()
class Meta:
unique_together = ("concept", "user", "stat")
def __str__(self):
return "{} - {}: {}".format(self.stat, self.concept, self.value)
@receiver(pre_save, sender=Concept)
def generate_identifier(sender, instance, **kwargs):
"""
Generate and set identifier of concept before saving object to DB
Args:
sender (class): should be Concept
instance (Concept): saving concept
"""
identifier = Concept.create_identifier(instance.query)
qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)
if instance.pk:
qs = qs.exclude(pk=instance.pk)
if qs.count() > 0:
raise ValueError("Concept identifier conflict")
instance.identifier = identifier
|
adaptive-learning/proso-apps
|
proso_concepts/models.py
|
Python
|
mit
| 14,974
|
from django.contrib import admin
from simulation.models import SimulationStage, SimulationStageMatch, SimulationStageMatchResult
class SimulationStageAdmin(admin.ModelAdmin):
list_display = ["number", "created_at"]
list_filter = ["created_at"]
class SimulationStageMatchAdmin(admin.ModelAdmin):
list_display = ["stage", "order", "raund",
"cat", "rat", "won", "created_at"]
list_filter = ["stage", "created_at"]
search_fields = ["cat", "rat"]
readonly_fields = ["won", "cat_password", "rat_password", "system_password"]
class SimulationStageMatchResultAdmin(admin.ModelAdmin):
list_display = ["match", "is_caught", "distance", "is_cancelled", "created_at"]
list_filter = ["created_at"]
search_fields = ["match"]
admin.site.register(SimulationStage, SimulationStageAdmin)
admin.site.register(SimulationStageMatch, SimulationStageMatchAdmin)
admin.site.register(SimulationStageMatchResult, SimulationStageMatchResultAdmin)
|
bilbeyt/ituro
|
ituro/simulation/admin.py
|
Python
|
mit
| 972
|
import os
import common_pygame
import random
pygame = common_pygame.pygame
screen = common_pygame.screen
class Bonus():
def __init__(self, sounds, menu):
self.menu = menu
self.sounds = sounds
self.bonusType = 0
self.bonusAnim = 0
self.font = pygame.font.Font(None, 64)
self.bonusList = list()
self.bonusList.append(self.font.render(
str("plasma gun !"), True, (255, 255, 0)))
self.score = 0
self.bonuscount = 1
def ProcessBonus(self, ship):
# if ship.score %200 ==0 and ship.weapon==1 and ship.score>0:
if ship.score > 400 * self.bonuscount and self.score < 400 * self.bonuscount:
self.menu.play_sound(self.sounds["plasmagun.wav"])
ship.setWeapon(2)
self.bonusType = 0
self.bonusAnim = 30
self.score = ship.score
self.bonuscount = self.bonuscount + 1
if self.bonusAnim > 0:
self.bonusAnim = self.bonusAnim - 1
# show bonus for the plasma weapon
if self.bonusType == 0:
screen.blit(self.bonusList[0], (250, 250))
|
antismap/MICshooter
|
sources/bonus.py
|
Python
|
mit
| 1,158
|
"""
https://en.wikipedia.org/wiki/Square_root_of_a_matrix
B is the sqrt of a matrix A if B*B = A
"""
import numpy as np
from scipy.linalg import sqrtm
from scipy.stats import special_ortho_group
def denman_beaver(A, n=50):
Y = A
Z = np.eye(len(A))
for i in range(n):
Yn = 0.5*(Y + np.linalg.inv(Z))
Zn = 0.5*(Z + np.linalg.inv(Y))
Y = Yn
Z = Zn
return (Y, Z)
def babylonian(A, n=50):
X = np.eye(len(A))
for i in range(n):
X = 0.5*(X + np.dot(A, np.linalg.inv(X)))
return X
def gen_random_matrix(n):
return np.random.rand(n, n)
def gen_rotation_matrix(n):
return special_ortho_group.rvs(n)*np.random.randint(-100, 101)
def gen_symmetric_matrix(n):
A = np.random.randint(-10, 11, size=(n, n))
A = 0.5*(A + A.T)
return A
def test(title, gen_matrix, size, iters):
print("Testing {} matrix".format(title))
for i in range(1, size):
for j in range(iters):
try:
A = gen_matrix(i)
d = np.linalg.det(A)
Y, _ = denman_beaver(A)
X = babylonian(A)
Z = sqrtm(A)
print("{}x{} matrix (det {})".format(i, i, d))
print(A)
print("Denman Beaver")
print(np.dot(Y, Y))
print("Babylonian")
print(np.dot(X, X))
print("Scipy")
print(np.dot(Z, Z))
print()
except:
pass
# iteration methods above tend to fail on random and symmetric matrices
test("random", gen_random_matrix, 5, 10)
test("symmetric", gen_symmetric_matrix, 5, 10)
# for rotation matrices, the iteration methods work
test("rotation", gen_rotation_matrix, 5, 10)
|
qeedquan/misc_utilities
|
math/matrix-sqrt.py
|
Python
|
mit
| 1,781
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
u, i=np.genfromtxt('Rohdaten/Daten_1_5.txt', unpack=True)
i=np.log(i)
u=np.log(u)
def f(u, a, b):
return a * u + b
params, covariance = curve_fit(f, u, i)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '+-', errors[0])
print('b =', params[1], '+-', errors[1])
x_plot = np.linspace(2.2, 5.6)
plt.plot(u, i, 'rx', label='Messwerte')
plt.plot(x_plot, f(x_plot, params[0], params[1]), 'b-', label='Ausgleichsgerade')
plt.xlabel('log(U / V)')
plt.ylabel('log(I / mA)')
#plt.xlim(8, 300)
#plt.yscale('log')
#plt.xscale('log')
plt.grid()
plt.legend(loc='best')
plt.savefig('build/oppellog.pdf')
#
|
mwindau/praktikum
|
v504/oppellog.py
|
Python
|
mit
| 701
|
# -*- coding: utf-8 -*-
"""
Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol in Python.
It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, and DCC chat.
To best understand how to make an IRC client, the reader more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtleties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* DCC file transfers are not supported.
* RFCs 2810, 2811, 2812, and 2813 have not been considered.
Notes:
* connection.quit() only sends QUIT to the server.
* ERROR from the server triggers the error event and the disconnect event.
* dropping of the connection triggers the disconnect event.
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
import bisect
import re
import select
import socket
import time
import struct
import logging
import threading
import abc
import collections
import functools
import itertools
import contextlib
import warnings
import jaraco.functools
from jaraco.functools import Throttler
from jaraco.stream import buffer
from more_itertools import consume, always_iterable, repeatfunc
from . import connection
from . import events
from . import features
from . import ctcp
from . import message
from . import schedule
log = logging.getLogger(__name__)
class IRCError(Exception):
"An IRC exception"
class InvalidCharacters(ValueError):
"Invalid characters were encountered in the message"
class MessageTooLong(ValueError):
"Message is too long"
class Connection(metaclass=abc.ABCMeta):
"""
Base class for IRC connections.
"""
transmit_encoding = 'utf-8'
"encoding used for transmission"
@abc.abstractproperty
def socket(self):
"The socket for this connection"
def __init__(self, reactor):
self.reactor = reactor
def encode(self, msg):
"""Encode a message for transmission."""
return msg.encode(self.transmit_encoding)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
class ServerConnection(Connection):
"""
An IRC server connection.
ServerConnection objects are instantiated by calling the server
method on a Reactor object.
"""
buffer_class = buffer.DecodingLineBuffer
socket = None
connected = False
def __init__(self, reactor):
super().__init__(reactor)
self.features = features.FeatureSet()
# save the method args to allow for easier reconnection.
@jaraco.functools.save_method_args
def connect(
self,
server,
port,
nickname,
password=None,
username=None,
ircname=None,
connect_factory=connection.Factory(),
):
"""Connect/reconnect to a server.
Arguments:
* server - Server name
* port - Port number
* nickname - The nickname
* password - Password (if any)
* username - The username
* ircname - The IRC name ("realname")
* server_address - The remote host/port of the server
* connect_factory - A callable that takes the server address and
returns a connection (with a socket interface)
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
log.debug(
"connect(server=%r, port=%r, nickname=%r, ...)", server, port, nickname
)
if self.connected:
self.disconnect("Changing servers")
self.buffer = self.buffer_class()
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.server_address = (server, port)
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.connect_factory = connect_factory
try:
self.socket = self.connect_factory(self.server_address)
except socket.error as ex:
raise ServerConnectionError("Couldn't connect to socket: %s" % ex)
self.connected = True
self.reactor._on_connect(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def reconnect(self):
"""
Reconnect with the last arguments passed to self.connect()
"""
self.connect(*self._saved_connect.args, **self._saved_connect.kwargs)
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
# Without this thread lock, there is a window during which
# select() can find a closed socket, leading to an EBADF error.
with self.reactor.mutex:
self.disconnect("Closing object")
self.reactor._remove_connection(self)
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
return self.real_server_name or ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method.
"""
return self.real_nickname
@contextlib.contextmanager
def as_nick(self, name):
"""
Set the nick for the duration of the context.
"""
orig = self.get_nickname()
self.nick(name)
try:
yield orig
finally:
self.nick(orig)
def process_data(self):
"read and process input from self.socket"
try:
reader = getattr(self.socket, 'read', self.socket.recv)
new_data = reader(2**14)
except socket.error:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
self.buffer.feed(new_data)
# process each non-empty line after logging all lines
for line in self.buffer:
log.debug("FROM SERVER: %s", line)
if not line:
continue
self._process_line(line)
def _process_line(self, line):
event = Event("all_raw_messages", self.get_server_name(), None, [line])
self._handle_event(event)
grp = _rfc_1459_command_regexp.match(line).group
source = NickMask.from_group(grp("prefix"))
command = self._command_from_group(grp("command"))
arguments = message.Arguments.from_group(grp('argument'))
tags = message.Tag.from_group(grp('tags'))
if source and not self.real_server_name:
self.real_server_name = source
if command == "nick":
if source.nick == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
elif command == "featurelist":
self.features.load(arguments)
handler = (
self._handle_message
if command in ["privmsg", "notice"]
else self._handle_other
)
handler(arguments, command, source, tags)
def _handle_message(self, arguments, command, source, tags):
target, msg = arguments[:2]
messages = ctcp.dequote(msg)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for m in messages:
if isinstance(m, tuple):
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
m = list(m)
log.debug(
"command: %s, source: %s, target: %s, " "arguments: %s, tags: %s",
command,
source,
target,
m,
tags,
)
event = Event(command, source, target, m, tags)
self._handle_event(event)
if command == "ctcp" and m[0] == "ACTION":
event = Event("action", source, target, m[1:], tags)
self._handle_event(event)
else:
log.debug(
"command: %s, source: %s, target: %s, " "arguments: %s, tags: %s",
command,
source,
target,
[m],
tags,
)
event = Event(command, source, target, [m], tags)
self._handle_event(event)
def _handle_other(self, arguments, command, source, tags):
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0] if arguments else None
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
log.debug(
"command: %s, source: %s, target: %s, " "arguments: %s, tags: %s",
command,
source,
target,
arguments,
tags,
)
event = Event(command, source, target, arguments, tags)
self._handle_event(event)
@staticmethod
def _command_from_group(group):
command = group.lower()
# Translate numerics into more readable strings.
return events.numeric.get(command, command)
def _handle_event(self, event):
"""[Internal]"""
self.reactor._handle_event(self, event)
if event.type in self.handlers:
for fn in self.handlers[event.type]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.reactor.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.reactor.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_items('ADMIN', server)
def cap(self, subcommand, *args):
"""
Send a CAP command according to `the spec
<http://ircv3.atheme.org/specification/capability-negotiation-3.1>`_.
Arguments:
subcommand -- LS, LIST, REQ, ACK, CLEAR, END
args -- capabilities, if required for given subcommand
Example:
.cap('LS')
.cap('REQ', 'multi-prefix', 'sasl')
.cap('END')
"""
cap_subcommands = set('LS LIST REQ ACK NAK CLEAR END'.split())
client_subcommands = set(cap_subcommands) - {'NAK'}
assert subcommand in client_subcommands, "invalid subcommand"
def _multi_parameter(args):
"""
According to the spec::
If more than one capability is named, the RFC1459 designated
sentinel (:) for a multi-parameter argument must be present.
It's not obvious where the sentinel should be present or if it
must be omitted for a single parameter, so follow convention and
only include the sentinel prefixed to the first parameter if more
than one parameter is present.
"""
if len(args) > 1:
return (':' + args[0],) + args[1:]
return args
self.send_items('CAP', subcommand, *_multi_parameter(args))
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
tmpl = "\001{ctcptype} {parameter}\001" if parameter else "\001{ctcptype}\001"
self.privmsg(target, tmpl.format(**vars()))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
try:
del self.connected
except AttributeError:
return
self.quit(message)
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except socket.error:
pass
del self.socket
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_items('GLOBOPS', ':' + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_items('INFO', server)
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_items('INVITE', nick, channel)
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_items('ISON', *tuple(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_items('JOIN', channel, key)
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_items('KICK', channel, nick, comment and ':' + comment)
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
self.send_items('LINKS', remote_server, server_mask)
def list(self, channels=None, server=""):
"""Send a LIST command."""
self.send_items('LIST', ','.join(always_iterable(channels)), server)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_items('LUSERS', server)
def mode(self, target, command):
"""Send a MODE command."""
self.send_items('MODE', target, command)
def motd(self, server=""):
"""Send an MOTD command."""
self.send_items('MOTD', server)
def names(self, channels=None):
"""Send a NAMES command."""
self.send_items('NAMES', ','.join(always_iterable(channels)))
def nick(self, newnick):
"""Send a NICK command."""
self.send_items('NICK', newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_items('NOTICE', target, ':' + text)
def oper(self, nick, password):
"""Send an OPER command."""
self.send_items('OPER', nick, password)
def part(self, channels, message=""):
"""Send a PART command."""
self.send_items('PART', ','.join(always_iterable(channels)), message)
def pass_(self, password):
"""Send a PASS command."""
self.send_items('PASS', password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_items('PING', target, target2)
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_items('PONG', target, target2)
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
self.send_items('PRIVMSG', target, ':' + text)
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
target = ','.join(targets)
return self.privmsg(target, text)
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_items('QUIT', message and ':' + message)
def _prep_message(self, string):
# The string should not contain any carriage return other than the
# one added here.
if '\n' in string:
msg = "Carriage returns not allowed in privmsg(text)"
raise InvalidCharacters(msg)
bytes = self.encode(string) + b'\r\n'
# According to the RFC http://tools.ietf.org/html/rfc2812#page-6,
# clients should not transmit more than 512 bytes.
if len(bytes) > 512:
msg = "Messages limited to 512 bytes including CR/LF"
raise MessageTooLong(msg)
return bytes
def send_items(self, *items):
"""
Send all non-empty items, separated by spaces.
"""
self.send_raw(' '.join(filter(None, items)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
if self.socket is None:
raise ServerNotConnectedError("Not connected.")
sender = getattr(self.socket, 'write', self.socket.send)
try:
sender(self._prep_message(string))
log.debug("TO SERVER: %s", string)
except socket.error:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_items('SQUIT', server, comment and ':' + comment)
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_items('STATS', statstype, server)
def time(self, server=""):
"""Send a TIME command."""
self.send_items('TIME', server)
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
self.send_items('TOPIC', channel, new_topic and ':' + new_topic)
def trace(self, target=""):
"""Send a TRACE command."""
self.send_items('TRACE', target)
def user(self, username, realname):
"""Send a USER command."""
cmd = 'USER {username} 0 * :{realname}'.format(**locals())
self.send_raw(cmd)
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_items('USERHOST', ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_items('USERS', server)
def version(self, server=""):
"""Send a VERSION command."""
self.send_items('VERSION', server)
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_items('WALLOPS', ':' + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_items('WHO', target, op and 'o')
def whois(self, targets):
"""Send a WHOIS command."""
self.send_items('WHOIS', ",".join(always_iterable(targets)))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_items('WHOWAS', nick, max, server)
def set_rate_limit(self, frequency):
"""
Set a `frequency` limit (messages per second) for this connection.
Any attempts to send faster than this rate will block.
"""
self.send_raw = Throttler(self.send_raw, frequency)
def set_keepalive(self, interval):
"""
Set a keepalive to occur every `interval` on this `ServerConnection`.
:param interval: `int` in seconds, or `datetime.timedelta`
"""
pinger = functools.partial(self.ping, 'keep-alive')
self.reactor.scheduler.execute_every(period=interval, func=pinger)
class PrioritizedHandler(collections.namedtuple('Base', ('priority', 'callback'))):
def __lt__(self, other):
"when sorting prioritized handlers, only use the priority"
return self.priority < other.priority
class Reactor:
"""
Processes events from one or more IRC server connections.
This class implements a reactor in the style of the `reactor pattern
<http://en.wikipedia.org/wiki/Reactor_pattern>`_.
When a Reactor object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the reactor object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler,
process_once, and process_forever.
This is functionally an event-loop which can either use it's own
internal polling loop, or tie into an external event-loop, by
having the external event-system periodically call `process_once`
on the instantiated reactor class. This will allow the reactor
to process any queued data and/or events.
Calling `process_forever` will hand off execution to the reactor's
internal event-loop, which will not return for the life of the
reactor.
Here is an example:
client = irc.client.Reactor()
server = client.server()
server.connect("irc.some.where", 6667, "my_nickname")
server.privmsg("a_nickname", "Hi there!")
client.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message "Hi there!"
to the nickname a_nickname.
The methods of this class are thread-safe; accesses to and modifications
of its internal lists of connections, handlers, and delayed commands
are guarded by a mutex.
"""
scheduler_class = schedule.DefaultScheduler
connection_class = ServerConnection
def __do_nothing(*args, **kwargs):
pass
def __init__(self, on_connect=__do_nothing, on_disconnect=__do_nothing):
"""Constructor for Reactor objects.
on_connect: optional callback invoked when a new connection
is made.
on_disconnect: optional callback invoked when a socket is
disconnected.
The arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
self._on_connect = on_connect
self._on_disconnect = on_disconnect
scheduler = self.scheduler_class()
assert isinstance(scheduler, schedule.IScheduler)
self.scheduler = scheduler
self.connections = []
self.handlers = {}
# Modifications to these shared lists and dict need to be thread-safe
self.mutex = threading.RLock()
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
conn = self.connection_class(self)
with self.mutex:
self.connections.append(conn)
return conn
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for Reactor.__init__.
"""
with self.mutex:
log.log(logging.DEBUG - 2, "process_data()")
for sock, conn in itertools.product(sockets, self.connections):
if sock == conn.socket:
conn.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for Reactor.__init__.
"""
with self.mutex:
self.scheduler.run_pending()
@property
def sockets(self):
with self.mutex:
return [
conn.socket
for conn in self.connections
if conn is not None and conn.socket is not None
]
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
log.log(logging.DEBUG - 2, "process_once()")
sockets = self.sockets
if sockets:
in_, out, err = select.select(sockets, [], [], timeout)
self.process_data(in_)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
# This loop should specifically *not* be mutex-locked.
# Otherwise no other thread would ever be able to change
# the shared state of a Reactor object running this function.
log.debug("process_forever(timeout=%s)", timeout)
one = functools.partial(self.process_once, timeout=timeout)
consume(repeatfunc(one))
def disconnect_all(self, message=""):
"""Disconnects all connections."""
with self.mutex:
for conn in self.connections:
conn.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of
numeric_events for possible event types.
handler -- Callback function taking 'connection' and 'event'
parameters.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
"NO MORE", no more handlers will be called.
"""
handler = PrioritizedHandler(priority, handler)
with self.mutex:
event_handlers = self.handlers.setdefault(event, [])
bisect.insort(event_handlers, handler)
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
with self.mutex:
if event not in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h.callback:
self.handlers[event].remove(h)
return 1
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
with self.mutex:
conn = DCCConnection(self, dcctype)
self.connections.append(conn)
return conn
def _handle_event(self, connection, event):
"""
Handle an Event event incoming on ServerConnection connection.
"""
with self.mutex:
matching_handlers = sorted(
self.handlers.get("all_events", []) + self.handlers.get(event.type, [])
)
for handler in matching_handlers:
result = handler.callback(connection, event)
if result == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
with self.mutex:
self.connections.remove(connection)
self._on_disconnect(connection.socket)
_cmd_pat = (
"^(@(?P<tags>[^ ]*) )?(:(?P<prefix>[^ ]+) +)?"
"(?P<command>[^ ]+)( *(?P<argument> .+))?"
)
_rfc_1459_command_regexp = re.compile(_cmd_pat)
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""
A DCC (Direct Client Connection).
DCCConnection objects are instantiated by calling the dcc
method on a Reactor object.
"""
socket = None
connected = False
passive = False
peeraddress = None
peerport = None
def __init__(self, reactor, dcctype):
super().__init__(reactor)
self.dcctype = dcctype
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.buffer = buffer.LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error as x:
raise DCCConnectionError("Couldn't connect to socket: %s" % x)
self.connected = True
self.reactor._on_connect(self.socket)
return self
def listen(self, addr=None):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.buffer = buffer.LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = True
default_addr = socket.gethostbyname(socket.gethostname()), 0
try:
self.socket.bind(addr or default_addr)
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error as x:
raise DCCConnectionError("Couldn't bind socket: %s" % x)
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
try:
del self.connected
except AttributeError:
return
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except socket.error:
pass
del self.socket
self.reactor._handle_event(
self, Event("dcc_disconnect", self.peeraddress, "", [message])
)
self.reactor._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = True
log.debug("DCC connection from %s:%d", self.peeraddress, self.peerport)
self.reactor._handle_event(
self, Event("dcc_connect", self.peeraddress, None, None)
)
return
try:
new_data = self.socket.recv(2**14)
except socket.error:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
self.buffer.feed(new_data)
chunks = list(self.buffer)
if len(self.buffer) > 2**14:
# Bad peer! Naughty peer!
log.info(
"Received >16k from a peer without a newline; " "disconnecting."
)
self.disconnect()
return
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
log.debug("FROM PEER: %s", chunk)
arguments = [chunk]
log.debug(
"command: %s, source: %s, target: %s, arguments: %s",
command,
prefix,
target,
arguments,
)
event = Event(command, prefix, target, arguments)
self.reactor._handle_event(self, event)
def privmsg(self, text):
"""
Send text to DCC peer.
The text will be padded with a newline if it's a DCC CHAT session.
"""
if self.dcctype == 'chat':
text += '\n'
return self.send_bytes(self.encode(text))
def send_bytes(self, bytes):
"""
Send data to DCC peer.
"""
try:
self.socket.send(bytes)
log.debug("TO PEER: %r\n", bytes)
except socket.error:
self.disconnect("Connection reset by peer.")
class SimpleIRCClient:
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Functionally, any of the event names in `events.py` may be subscribed
to by prefixing them with `on_`, and creating a function of that
name in the child-class of `SimpleIRCClient`. When the event of
`event_name` is received, the appropriately named method will be
called (if it exists) by runtime class introspection.
See `_dispatcher()`, which takes the event name, postpends it to
`on_`, and then attemps to look up the class member function by
name and call it.
Instance attributes that can be used by sub classes:
reactor -- The Reactor instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
reactor_class = Reactor
def __init__(self):
self.reactor = self.reactor_class()
self.connection = self.reactor.server()
self.dcc_connections = []
self.reactor.add_global_handler("all_events", self._dispatcher, -10)
self.reactor.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, connection, event):
"""
Dispatch events to on_<event.type> method, if present.
"""
log.debug("_dispatcher: %s", event.type)
def do_nothing(connection, event):
return None
method = getattr(self, "on_" + event.type, do_nothing)
method(connection, event)
def _dcc_disconnect(self, connection, event):
self.dcc_connections.remove(connection)
def connect(self, *args, **kwargs):
"""Connect using the underlying connection"""
self.connection.connect(*args, **kwargs)
def dcc(self, *args, **kwargs):
"""Create and associate a new DCCConnection object.
Use the returned object to listen for or connect to
a DCC peer.
"""
dcc = self.reactor.dcc(*args, **kwargs)
self.dcc_connections.append(dcc)
return dcc
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
warnings.warn("Use self.dcc(type).connect()", DeprecationWarning)
return self.dcc(dcctype).connect(address, port)
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
warnings.warn("Use self.dcc(type).listen()", DeprecationWarning)
return self.dcc(dcctype).listen()
def start(self):
"""Start the IRC client."""
self.reactor.process_forever()
class Event:
"""
An IRC event.
>>> print(Event('privmsg', '@somebody', '#channel'))
type: privmsg, source: @somebody, target: #channel, arguments: [], tags: []
"""
def __init__(self, type, source, target, arguments=None, tags=None):
"""
Initialize an Event.
Arguments:
type -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event-specific arguments.
"""
self.type = type
self.source = source
self.target = target
if arguments is None:
arguments = []
self.arguments = arguments
if tags is None:
tags = []
self.tags = tags
def __str__(self):
tmpl = (
"type: {type}, "
"source: {source}, "
"target: {target}, "
"arguments: {arguments}, "
"tags: {tags}"
)
return tmpl.format(**vars(self))
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""
Convert an IP number as an integer given in ASCII
representation to an IP address string.
>>> ip_numstr_to_quad('3232235521')
'192.168.0.1'
>>> ip_numstr_to_quad(3232235521)
'192.168.0.1'
"""
packed = struct.pack('>L', int(num))
bytes = struct.unpack('BBBB', packed)
return ".".join(map(str, bytes))
def ip_quad_to_numstr(quad):
"""
Convert an IP address string (e.g. '192.168.0.1') to an IP
number as a base-10 integer given in ASCII representation.
>>> ip_quad_to_numstr('192.168.0.1')
'3232235521'
"""
bytes = map(int, quad.split("."))
packed = struct.pack('BBBB', *bytes)
return str(struct.unpack('>L', packed)[0])
class NickMask(str):
"""
A nickmask (the source of an Event)
>>> nm = NickMask('pinky!username@example.com')
>>> nm.nick
'pinky'
>>> nm.host
'example.com'
>>> nm.user
'username'
>>> isinstance(nm, str)
True
>>> nm = NickMask('красный!red@yahoo.ru')
>>> isinstance(nm.nick, str)
True
Some messages omit the userhost. In that case, None is returned.
>>> nm = NickMask('irc.server.net')
>>> nm.nick
'irc.server.net'
>>> nm.userhost
>>> nm.host
>>> nm.user
"""
@classmethod
def from_params(cls, nick, user, host):
return cls('{nick}!{user}@{host}'.format(**vars()))
@property
def nick(self):
nick, sep, userhost = self.partition("!")
return nick
@property
def userhost(self):
nick, sep, userhost = self.partition("!")
return userhost or None
@property
def host(self):
nick, sep, userhost = self.partition("!")
user, sep, host = userhost.partition('@')
return host or None
@property
def user(self):
nick, sep, userhost = self.partition("!")
user, sep, host = userhost.partition('@')
return user or None
@classmethod
def from_group(cls, group):
return cls(group) if group else None
def _ping_ponger(connection, event):
"A global handler for the 'ping' event"
connection.pong(event.target)
|
jaraco/irc
|
irc/client.py
|
Python
|
mit
| 41,993
|
#!/usr/bin/env python
# coding: utf-8
# # rede_gephi_com_ipca_csv
# In[6]:
ano_eleicao = '2014'
rede =f'rede{ano_eleicao}'
csv_dir = f'/home/neilor/{rede}'
# In[7]:
dbschema = f'rede{ano_eleicao}'
table_edges = f"{dbschema}.gephi_edges_com_ipca_2018"
table_nodes = f"{dbschema}.gephi_nodes_com_ipca_2018"
table_receitas = f"{dbschema}.receitas_com_ipca_2018"
table_candidaturas = f"{dbschema}.candidaturas_com_ipca_2018"
table_municipios = f"{dbschema}.municipios_{ano_eleicao}"
# In[8]:
import sys
sys.path.append('../')
import mod_tse as mtse
# In[9]:
import os
home = os.environ["HOME"]
local_dir = f'{home}/temp'
# In[10]:
mtse.execute_query(f"update {table_municipios} set rede= 'N';")
# ## REDE BRASIL
# In[11]:
def salva_rede_brasil(csv_dir,rede):
rede_dir_BR = f'{csv_dir}/{rede}_Brasil'
os.makedirs(rede_dir_BR)
edges_csv_query=f"""copy
(
select * from {table_edges}
)
TO '{rede_dir_BR}/{rede}_Brasil_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_csv_query)
nodes_csv_query=f"""copy
(
select * from {table_nodes}
)
TO '{rede_dir_BR}/{rede}_Brasil_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_csv_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas}
)
TO '{rede_dir_BR}/{rede}_Brasil_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas}
)
TO '{rede_dir_BR}/{rede}_Brasil_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# ## REDES POR ESTADO
# In[12]:
def salva_rede_csv_uf(csv_dir,rede,sg_uf):
rede_dir_uf = f'{csv_dir}/{rede}_{sg_uf}'
os.makedirs(rede_dir_uf)
edges_query=f"""copy
(
select * from {table_edges} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_query)
nodes_query=f"""copy
(
select * from {table_nodes} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas} where sg_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas} where receptor_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# In[13]:
import pandas as pd
import shutil
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.makedirs(csv_dir)
salva_rede_brasil(csv_dir,rede)
df_uf = mtse.pandas_query(f'select sg_uf from {table_candidaturas} group by sg_uf order by sg_uf')
for index, row in df_uf.iterrows():
sg_uf = row['sg_uf']
salva_rede_csv_uf(csv_dir,rede,sg_uf)
# In[14]:
import datetime
print(datetime.datetime.now())
# In[ ]:
|
elivre/arfe
|
e2014/SCRIPTS/055-rede2014_rede_gephi_com_ipca_csv.py
|
Python
|
mit
| 3,896
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
version = '0.2.4'
setup(
name='lolbuddy',
version=version,
description='a cli tool to update league of legends itemsets and ability order from champion.gg',
author='Cyrus Roshan',
author_email='hello@cyrusroshan.com',
license='MIT',
keywords=['lol', 'league', 'league of legends', 'item', 'ability'],
url='https://github.com/CyrusRoshan/lolbuddy',
packages=find_packages(),
package_data={},
install_requires=[
'requests-futures >= 0.9.5',
],
entry_points={
'console_scripts': [
'lolbuddy=lolbuddy:main',
],
},
)
|
CyrusRoshan/lolbuddy
|
setup.py
|
Python
|
mit
| 674
|
from code_intelligence import graphql
import fire
import github3
import json
import logging
import os
import numpy as np
import pprint
import retrying
import json
TOKEN_NAME_PREFERENCE = ["INPUT_GITHUB_PERSONAL_ACCESS_TOKEN", "GITHUB_PERSONAL_ACCESS_TOKEN", "GITHUB_TOKEN"]
for token in TOKEN_NAME_PREFERENCE:
if os.getenv(token):
TOKEN_NAME = token
break
assert TOKEN_NAME, f"You must supply one of the following environment variables: {', '.join(TOKEN_NAME_PREFERENCE)}"
PULL_REQUEST_TYPE = "PullRequest"
# TODO(jlewi): Rewrite this code to use:
# i) graphql.unpack_and_split_nodes
# ii) graphql.shard_writer
def process_notification(n):
# Mark as read anything that isn't an explicit mention.
# For PRs there doesn't seem like a simple way to detect if the notice
# is because the state changed
#
# We exclude mentions on PR because that gets overwhelmed by "/assign"
# statements. We should potentially be more discerning and not mark the
# notification as read for PRs which aren't assigned to the user.
if n.reason == "mention":
if n.subject.get("type") != "PullRequest":
return
title = n.subject.get("title")
logging.info("Marking as read: type: %s reason: %s title: %s",
n.subject.get("type"), n.reason, title)
n.mark()
def process_issue_results(data):
"""Process the data returned by the issues GraphQL request.
Args:
data: The data returned
Returns:
issues: A list of dicts; each dict is the data for some of
the results
"""
edges = data.get("data").get("repository").get("issues").get("edges")
issues = []
for e in edges:
issues.append(e["node"])
return issues
class NotificationManager(object):
def mark_read(self, user):
token = os.getenv(TOKEN_NAME)
if not token:
raise ValueError(("Environment variable {0} needs to be set to a GitHub "
"token.").format(token))
client = github3.GitHub(username=user, token=token)
notifications = client.notifications()
# https://developer.github.com/v3/activity/notifications/
#
# How do we identify closed pull requests?
for n in notifications:
process_notification(n)
def write_notifications(self, user, output):
"""Write all notifications to a file.
Args:
user: Name of the user to get notifications for
output: The file to write notifications to.
Fetches all notifications, including ones marked read,
and writes them to the supplied file.
"""
token = os.getenv(TOKEN_NAME)
if not token:
raise ValueError(("Environment variable {0} needs to be set to a GitHub "
"token.").format(token))
client = github3.GitHub(username=user, token=token)
notifications = client.notifications(all=True)
# https://developer.github.com/v3/activity/notifications/
#
# How do we identify closed pull requests?
i = 0
with open(output, mode="w") as hf:
for n in notifications:
i += 1
hf.write(n.as_json())
hf.write("\n")
logging.info("Wrote %s notifications to %s", i, output)
def fetch_issues(self, org, repo, output):
"""Fetch issues for a repository
Args:
org: The org that owns the repository
repo: The directory for the repository
output: The directory to write the results
Writes the issues along with the first comments to a file in output
directory.
"""
client = graphql.GraphQLClient()
num_issues_per_page = 100
query_template = """{{
repository(owner: "{org}", name: "{repo}") {{
issues(first:{num_issues_per_page} {issues_cursor}) {{
totalCount
pageInfo {{
endCursor
hasNextPage
}}
edges{{
node {{
author {{
__typename
... on User {{
login
}}
... on Bot{{
login
}}
}}
title
body
comments(first:20, ){{
totalCount
edges {{
node {{
author {{
__typename
... on User {{
login
}}
... on Bot{{
login
}}
}}
body
createdAt
}}
}}
}}
}}
}}
}}
}}
}}
"""
shard = 0
num_pages = None
if not os.path.exists(output):
os.makedirs(output)
total_issues = None
has_next_issues_page = True
# TODO(jlewi): We should persist the cursors to disk so we can resume
# after errors
issues_cursor = None
while has_next_issues_page:
issues_cursor_text = ""
if issues_cursor:
issues_cursor_text = "after:\"{0}\"".format(issues_cursor)
query = query_template.format(org=org, repo=repo,
num_issues_per_page=num_issues_per_page,
issues_cursor=issues_cursor_text)
results = client.run_query(query)
if results.get("errors"):
logging.error("There was a problem issuing the query; errors:\n%s",
"\n".join(results.get("errors")))
return
if not total_issues:
total_issues = results["data"]["repository"]["issues"]["totalCount"]
num_pages = int(np.ceil(total_issues/float(num_issues_per_page)))
logging.info("%s/%s has a total of %s issues", org, repo, total_issues)
shard_file = os.path.join(
output, "issues-{0}-{1}-{2:03d}-of-{3:03d}.json".format(org, repo, shard,
num_pages))
issues = process_issue_results(results)
with open(shard_file, "w") as hf:
for i in issues:
json.dump(i, hf)
hf.write("\n")
logging.info("Wrote shard %s to %s", shard, shard_file)
shard += 1
page_info = results["data"]["repository"]["issues"]["pageInfo"]
issues_cursor = page_info["endCursor"]
has_next_issues_page = page_info["hasNextPage"]
def _create_client(self, user):
token = os.getenv(TOKEN_NAME)
if not token:
raise ValueError(("Environment variable {0} needs to be set to a GitHub "
"token.").format(token))
client = github3.GitHub(username=user, token=token)
return client
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(message)s|%(pathname)s|%(lineno)d|'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
fire.Fire(NotificationManager)
|
kubeflow/code-intelligence
|
py/notifications/notifications.py
|
Python
|
mit
| 6,630
|
import os
from typing import List
import click
from valohai_yaml.lint import lint_file
from valohai_cli.ctx import get_project
from valohai_cli.exceptions import CLIException
from valohai_cli.messages import success, warn
from valohai_cli.utils import get_project_directory
def validate_file(filename: str) -> int:
"""
Validate `filename`, print its errors, and return the number of errors.
:param filename: YAML filename
:return: Number of errors
"""
lr = lint_file(filename)
if not lr.messages:
success(f'{filename}: No errors')
return 0
click.secho('{filename}: {error_count} errors, {warning_count} warnings'.format(
filename=filename,
error_count=lr.error_count,
warning_count=lr.warning_count,
), fg='yellow', bold=True)
for message in lr.messages:
click.echo(' {type}: {message}'.format(**message))
click.echo()
return int(lr.error_count)
@click.command()
@click.argument('filenames', nargs=-1, type=click.Path(file_okay=True, exists=True, dir_okay=False))
def lint(filenames: List[str]) -> None:
"""
Lint (syntax-check) a valohai.yaml file.
The return code of this command will be the total number of errors found in all the files.
"""
if not filenames:
project = get_project()
directory = (project.directory if project else get_project_directory())
config_file = os.path.join(directory, 'valohai.yaml')
if not os.path.exists(config_file):
raise CLIException(f'There is no {config_file} file. Pass in the names of configuration files to lint?')
filenames = [config_file]
total_errors = 0
for filename in filenames:
total_errors += validate_file(filename)
if total_errors:
warn(f'There were {total_errors} total errors.')
click.get_current_context().exit(total_errors)
|
valohai/valohai-cli
|
valohai_cli/commands/lint.py
|
Python
|
mit
| 1,886
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# uFlash documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 17 19:01:47 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
import uflash
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "uFlash"
copyright = "2015, Nicholas H.Tollervey"
author = "Nicholas H.Tollervey"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = uflash.get_version()
# The full version, including alpha/beta/rc tags.
release = uflash.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "uFlashdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"uFlash.tex",
"uFlash Documentation",
"Nicholas H.Tollervey",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "uflash", "uFlash Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"uFlash",
"uFlash Documentation",
author,
"uFlash",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
|
ntoll/uflash
|
docs/conf.py
|
Python
|
mit
| 11,491
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ExportPipelinesOperations, ImportPipelinesOperations, Operations, PipelineRunsOperations, PrivateEndpointConnectionsOperations, RegistriesOperations, ReplicationsOperations, WebhooksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerRegistryManagementClient:
"""ContainerRegistryManagementClient.
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ReplicationsOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerRegistryManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.export_pipelines = ExportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
Azure/azure-sdk-for-python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_12_01_preview/aio/_container_registry_management_client.py
|
Python
|
mit
| 6,266
|
""" RUN RUN RUN !
"""
from buttersalt import create_app
from flask_script import Manager, Shell
app = create_app('default')
manager = Manager(app)
def make_shell_context():
return dict(app=app)
manager.add_command("shell", Shell(make_context=make_shell_context))
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == "__main__":
manager.run()
|
lfzyx/ButterSalt
|
manage.py
|
Python
|
mit
| 505
|
#!/usr/bin/env python
""" patrol_smach_iterator.py - Version 1.0 2013-10-23
Control a robot using SMACH to patrol a square area a specified number of times
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
import smach
from smach import StateMachine, Iterator
from smach_ros import SimpleActionState, IntrospectionServer
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist
from tf.transformations import quaternion_from_euler
from visualization_msgs.msg import Marker
from math import radians, pi
class main():
def __init__(self):
rospy.init_node('patrol_smach', anonymous=False)
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# Initialize a number of parameters and variables
self.init()
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
# Wait up to 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move_base action server")
# Track success rate of getting to the goal locations
self.n_succeeded = 0
self.n_aborted = 0
self.n_preempted = 0
self.n_patrols = 2
# Turn the waypoints into SMACH states
nav_states = list()
for waypoint in self.waypoints:
nav_goal = MoveBaseGoal()
nav_goal.target_pose.header.frame_id = 'map'
nav_goal.target_pose.pose = waypoint
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0),
server_wait_timeout=rospy.Duration(10.0))
nav_states.append(move_base_state)
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0))
# Initialize the top level state machine
self.sm = StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm:
# Initialize the iterator
self.sm_patrol_iterator = Iterator(outcomes = ['succeeded','preempted','aborted'],
input_keys = [],
it = lambda: range(0, self.n_patrols),
output_keys = [],
it_label = 'index',
exhausted_outcome = 'succeeded')
with self.sm_patrol_iterator:
# Initialize the patrol state machine
self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted','continue'])
# Add the states to the state machine with the appropriate transitions
with self.sm_patrol:
StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})
StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})
StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})
StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})
StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'continue','aborted':'continue','preempted':'continue'})
# Close the sm_patrol machine and add it to the iterator
Iterator.set_contained_state('PATROL_STATE', self.sm_patrol, loop_outcomes=['continue'])
# Close the top level state machine
StateMachine.add('PATROL_ITERATOR', self.sm_patrol_iterator, {'succeeded':'succeeded', 'aborted':'aborted'})
# Create and start the SMACH introspection server
intro_server = IntrospectionServer('patrol', self.sm, '/SM_ROOT')
intro_server.start()
# Execute the state machine
sm_outcome = self.sm.execute()
rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))
intro_server.stop()
def move_base_result_cb(self, userdata, status, result):
if status == actionlib.GoalStatus.SUCCEEDED:
self.n_succeeded += 1
elif status == actionlib.GoalStatus.ABORTED:
self.n_aborted += 1
elif status == actionlib.GoalStatus.PREEMPTED:
self.n_preempted += 1
try:
rospy.loginfo("Success rate: " + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))
except:
pass
def init(self):
# How big is the square we want the robot to patrol?
self.square_size = rospy.get_param("~square_size", 1.0) # meters
# How many times should we execute the patrol loop
self.n_patrols = rospy.get_param("~n_patrols", 3) # meters
# Create a list to hold the target quaternions (orientations)
quaternions = list()
# First define the corner orientations as Euler angles
euler_angles = (pi/2, pi, 3*pi/2, 0)
# Then convert the angles to quaternions
for angle in euler_angles:
q_angle = quaternion_from_euler(0, 0, angle, axes='sxyz')
q = Quaternion(*q_angle)
quaternions.append(q)
# Create a list to hold the waypoint poses
self.waypoints = list()
# Append each of the four waypoints to the list. Each waypoint
# is a pose consisting of a position and orientation in the map frame.
self.waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3]))
self.waypoints.append(Pose(Point(self.square_size, 0.0, 0.0), quaternions[0]))
self.waypoints.append(Pose(Point(self.square_size, self.square_size, 0.0), quaternions[1]))
self.waypoints.append(Pose(Point(0.0, self.square_size, 0.0), quaternions[2]))
# Initialize the waypoint visualization markers for RViz
self.init_waypoint_markers()
# Set a visualization marker at each waypoint
for waypoint in self.waypoints:
p = Point()
p = waypoint.position
self.waypoint_markers.points.append(p)
# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
rospy.loginfo("Starting SMACH test")
# Publish the waypoint markers
self.marker_pub.publish(self.waypoint_markers)
rospy.sleep(1)
self.marker_pub.publish(self.waypoint_markers)
def init_waypoint_markers(self):
# Set up our waypoint markers
marker_scale = 0.2
marker_lifetime = 0 # 0 is forever
marker_ns = 'waypoints'
marker_id = 0
marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
# Define a marker publisher.
self.marker_pub = rospy.Publisher('waypoint_markers', Marker)
# Initialize the marker points list.
self.waypoint_markers = Marker()
self.waypoint_markers.ns = marker_ns
self.waypoint_markers.id = marker_id
self.waypoint_markers.type = Marker.CUBE_LIST
self.waypoint_markers.action = Marker.ADD
self.waypoint_markers.lifetime = rospy.Duration(marker_lifetime)
self.waypoint_markers.scale.x = marker_scale
self.waypoint_markers.scale.y = marker_scale
self.waypoint_markers.color.r = marker_color['r']
self.waypoint_markers.color.g = marker_color['g']
self.waypoint_markers.color.b = marker_color['b']
self.waypoint_markers.color.a = marker_color['a']
self.waypoint_markers.header.frame_id = 'odom'
self.waypoint_markers.header.stamp = rospy.Time.now()
self.waypoint_markers.points = list()
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.sm_patrol.request_preempt()
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
rospy.loginfo("SMACH test finished.")
|
fujy/ROS-Project
|
src/rbx2/rbx2_tasks/nodes/patrol_smach_iterator.py
|
Python
|
mit
| 9,741
|
# Copyright (C) 2012-2019 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains various "context" definitions, which are essentially flags
set during the tokenization process, either on the current parse stack (local
contexts) or affecting all stacks (global contexts). They represent the context
the tokenizer is in, such as inside a template's name definition, or inside a
level-two heading. This is used to determine what tokens are valid at the
current point and also if the current parsing route is invalid.
The tokenizer stores context as an integer, with these definitions bitwise OR'd
to set them, AND'd to check if they're set, and XOR'd to unset them. The
advantage of this is that contexts can have sub-contexts (as ``FOO == 0b11``
will cover ``BAR == 0b10`` and ``BAZ == 0b01``).
Local (stack-specific) contexts:
* :const:`TEMPLATE`
* :const:`TEMPLATE_NAME`
* :const:`TEMPLATE_PARAM_KEY`
* :const:`TEMPLATE_PARAM_VALUE`
* :const:`ARGUMENT`
* :const:`ARGUMENT_NAME`
* :const:`ARGUMENT_DEFAULT`
* :const:`WIKILINK`
* :const:`WIKILINK_TITLE`
* :const:`WIKILINK_TEXT`
* :const:`EXT_LINK`
* :const:`EXT_LINK_URI`
* :const:`EXT_LINK_TITLE`
* :const:`HEADING`
* :const:`HEADING_LEVEL_1`
* :const:`HEADING_LEVEL_2`
* :const:`HEADING_LEVEL_3`
* :const:`HEADING_LEVEL_4`
* :const:`HEADING_LEVEL_5`
* :const:`HEADING_LEVEL_6`
* :const:`TAG`
* :const:`TAG_OPEN`
* :const:`TAG_ATTR`
* :const:`TAG_BODY`
* :const:`TAG_CLOSE`
* :const:`STYLE`
* :const:`STYLE_ITALICS`
* :const:`STYLE_BOLD`
* :const:`STYLE_PASS_AGAIN`
* :const:`STYLE_SECOND_PASS`
* :const:`DL_TERM`
* :const:`SAFETY_CHECK`
* :const:`HAS_TEXT`
* :const:`FAIL_ON_TEXT`
* :const:`FAIL_NEXT`
* :const:`FAIL_ON_LBRACE`
* :const:`FAIL_ON_RBRACE`
* :const:`FAIL_ON_EQUALS`
* :const:`HAS_TEMPLATE`
* :const:`TABLE`
* :const:`TABLE_OPEN`
* :const:`TABLE_CELL_OPEN`
* :const:`TABLE_CELL_STYLE`
* :const:`TABLE_TD_LINE`
* :const:`TABLE_TH_LINE`
* :const:`TABLE_CELL_LINE_CONTEXTS`
* :const:`HTML_ENTITY`
Global contexts:
* :const:`GL_HEADING`
Aggregate contexts:
* :const:`FAIL`
* :const:`UNSAFE`
* :const:`DOUBLE`
* :const:`NO_WIKILINKS`
* :const:`NO_EXT_LINKS`
"""
# Local contexts:
TEMPLATE_NAME = 1 << 0
TEMPLATE_PARAM_KEY = 1 << 1
TEMPLATE_PARAM_VALUE = 1 << 2
TEMPLATE = TEMPLATE_NAME + TEMPLATE_PARAM_KEY + TEMPLATE_PARAM_VALUE
ARGUMENT_NAME = 1 << 3
ARGUMENT_DEFAULT = 1 << 4
ARGUMENT = ARGUMENT_NAME + ARGUMENT_DEFAULT
WIKILINK_TITLE = 1 << 5
WIKILINK_TEXT = 1 << 6
WIKILINK = WIKILINK_TITLE + WIKILINK_TEXT
EXT_LINK_URI = 1 << 7
EXT_LINK_TITLE = 1 << 8
EXT_LINK = EXT_LINK_URI + EXT_LINK_TITLE
HEADING_LEVEL_1 = 1 << 9
HEADING_LEVEL_2 = 1 << 10
HEADING_LEVEL_3 = 1 << 11
HEADING_LEVEL_4 = 1 << 12
HEADING_LEVEL_5 = 1 << 13
HEADING_LEVEL_6 = 1 << 14
HEADING = (
HEADING_LEVEL_1
+ HEADING_LEVEL_2
+ HEADING_LEVEL_3
+ HEADING_LEVEL_4
+ HEADING_LEVEL_5
+ HEADING_LEVEL_6
)
TAG_OPEN = 1 << 15
TAG_ATTR = 1 << 16
TAG_BODY = 1 << 17
TAG_CLOSE = 1 << 18
TAG = TAG_OPEN + TAG_ATTR + TAG_BODY + TAG_CLOSE
STYLE_ITALICS = 1 << 19
STYLE_BOLD = 1 << 20
STYLE_PASS_AGAIN = 1 << 21
STYLE_SECOND_PASS = 1 << 22
STYLE = STYLE_ITALICS + STYLE_BOLD + STYLE_PASS_AGAIN + STYLE_SECOND_PASS
DL_TERM = 1 << 23
HAS_TEXT = 1 << 24
FAIL_ON_TEXT = 1 << 25
FAIL_NEXT = 1 << 26
FAIL_ON_LBRACE = 1 << 27
FAIL_ON_RBRACE = 1 << 28
FAIL_ON_EQUALS = 1 << 29
HAS_TEMPLATE = 1 << 30
SAFETY_CHECK = (
HAS_TEXT
+ FAIL_ON_TEXT
+ FAIL_NEXT
+ FAIL_ON_LBRACE
+ FAIL_ON_RBRACE
+ FAIL_ON_EQUALS
+ HAS_TEMPLATE
)
TABLE_OPEN = 1 << 31
TABLE_CELL_OPEN = 1 << 32
TABLE_CELL_STYLE = 1 << 33
TABLE_ROW_OPEN = 1 << 34
TABLE_TD_LINE = 1 << 35
TABLE_TH_LINE = 1 << 36
TABLE_CELL_LINE_CONTEXTS = TABLE_TD_LINE + TABLE_TH_LINE + TABLE_CELL_STYLE
TABLE = (
TABLE_OPEN
+ TABLE_CELL_OPEN
+ TABLE_CELL_STYLE
+ TABLE_ROW_OPEN
+ TABLE_TD_LINE
+ TABLE_TH_LINE
)
HTML_ENTITY = 1 << 37
# Global contexts:
GL_HEADING = 1 << 0
# Aggregate contexts:
FAIL = TEMPLATE + ARGUMENT + WIKILINK + EXT_LINK_TITLE + HEADING + TAG + STYLE + TABLE
UNSAFE = (
TEMPLATE_NAME
+ WIKILINK_TITLE
+ EXT_LINK_TITLE
+ TEMPLATE_PARAM_KEY
+ ARGUMENT_NAME
+ TAG_CLOSE
)
DOUBLE = TEMPLATE_PARAM_KEY + TAG_CLOSE + TABLE_ROW_OPEN
NO_WIKILINKS = TEMPLATE_NAME + ARGUMENT_NAME + WIKILINK_TITLE + EXT_LINK_URI
NO_EXT_LINKS = TEMPLATE_NAME + ARGUMENT_NAME + WIKILINK_TITLE + EXT_LINK
def describe(context):
"""Return a string describing the given context value, for debugging."""
flags = []
for name, value in globals().items():
if not isinstance(value, int) or name.startswith("GL_"):
continue
if bin(value).count("1") != 1:
continue # Hacky way to skip aggregate contexts
if context & value:
flags.append((name, value))
flags.sort(key=lambda it: it[1])
return "|".join(it[0] for it in flags)
|
earwig/mwparserfromhell
|
src/mwparserfromhell/parser/contexts.py
|
Python
|
mit
| 6,097
|
from django.conf import settings
from django.db import models
import jsonfield
class Feed(models.Model):
name = models.CharField(max_length=1024)
url = models.URLField()
homepage = models.URLField()
etag = models.CharField(max_length=1024, blank=True)
last_modified = models.DateTimeField(blank=True, null=True)
subscribers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='feeds',
related_query_name='feed',
)
def __str__(self):
return '{} ({})'.format(self.name, self.url)
class Entry(models.Model):
feed = models.ForeignKey(Feed)
entry_id = models.CharField(max_length=1024)
title = models.CharField(max_length=1024)
content = models.TextField()
link = models.URLField(max_length=1024)
time = models.DateTimeField()
json = jsonfield.JSONField()
updated = models.DateTimeField(auto_now=True)
class Meta:
unique_together = (('feed', 'entry_id'),)
verbose_name_plural = 'entries'
ordering = ['-time']
def __str__(self):
return '[{}] {}'.format(self.feed.name, self.title)
class UserEntryState(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='entry_states',
related_query_name='entry_state'
)
entry = models.ForeignKey(
Entry,
related_name='user_states',
related_query_name='user_state'
)
read = models.BooleanField(default=False)
expanded = models.BooleanField(default=False)
opened = models.BooleanField(default=False)
starred = models.BooleanField(default=False)
def __str__(self):
return '{} - {}'.format(self.user.username, self.entry.title)
class UserConfig(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
mode = models.CharField(
max_length=1,
choices=(('A', 'All Items'), ('U', 'Unread Only')),
default='A',
)
sorting = models.CharField(
max_length=1,
choices=(('T', 'Time'), ('I', 'Intelligence')),
default='T',
)
def __str__(self):
return 'user={}, mode={}, sorting={}'.format(
self.user.username, self.mode, self.sorting
)
|
yhlam/cobble
|
src/reader/models.py
|
Python
|
mit
| 2,241
|
import unittest
import markovify
import os
import operator
def get_sorted(chain_json):
return sorted(chain_json, key=operator.itemgetter(0))
class MarkovifyTestBase(unittest.TestCase):
__test__ = False
def test_text_too_small(self):
text = "Example phrase. This is another example sentence."
text_model = markovify.Text(text)
assert text_model.make_sentence() is None
def test_sherlock(self):
text_model = self.sherlock_model
sent = text_model.make_sentence()
assert len(sent) != 0
def test_json(self):
text_model = self.sherlock_model
json_model = text_model.to_json()
new_text_model = markovify.Text.from_json(json_model)
sent = new_text_model.make_sentence()
assert len(sent) != 0
def test_chain(self):
text_model = self.sherlock_model
chain_json = text_model.chain.to_json()
stored_chain = markovify.Chain.from_json(chain_json)
assert get_sorted(stored_chain.to_json()) == get_sorted(chain_json)
new_text_model = markovify.Text.from_chain(chain_json)
assert get_sorted(new_text_model.chain.to_json()) == get_sorted(chain_json)
sent = new_text_model.make_sentence()
assert len(sent) != 0
def test_make_sentence_with_start(self):
text_model = self.sherlock_model
start_str = "Sherlock Holmes"
sent = text_model.make_sentence_with_start(start_str)
assert sent is not None
assert start_str == sent[: len(start_str)]
def test_make_sentence_with_start_one_word(self):
text_model = self.sherlock_model
start_str = "Sherlock"
sent = text_model.make_sentence_with_start(start_str)
assert sent is not None
assert start_str == sent[: len(start_str)]
def test_make_sentence_with_start_one_word_that_doesnt_begin_a_sentence(self):
text_model = self.sherlock_model
start_str = "dog"
with self.assertRaises(KeyError):
text_model.make_sentence_with_start(start_str)
def test_make_sentence_with_word_not_at_start_of_sentence(self):
text_model = self.sherlock_model
start_str = "dog"
sent = text_model.make_sentence_with_start(start_str, strict=False)
assert sent is not None
assert start_str == sent[: len(start_str)]
def test_make_sentence_with_words_not_at_start_of_sentence(self):
text_model = self.sherlock_model_ss3
# " I was " has 128 matches in sherlock.txt
# " was I " has 2 matches in sherlock.txt
start_str = "was I"
sent = text_model.make_sentence_with_start(start_str, strict=False, tries=50)
assert sent is not None
assert start_str == sent[: len(start_str)]
def test_make_sentence_with_words_not_at_start_of_sentence_miss(self):
text_model = self.sherlock_model_ss3
start_str = "was werewolf"
with self.assertRaises(markovify.text.ParamError):
text_model.make_sentence_with_start(start_str, strict=False, tries=50)
def test_make_sentence_with_words_not_at_start_of_sentence_of_state_size(self):
text_model = self.sherlock_model_ss2
start_str = "was I"
sent = text_model.make_sentence_with_start(start_str, strict=False, tries=50)
assert sent is not None
assert start_str == sent[: len(start_str)]
def test_make_sentence_with_words_to_many(self):
text_model = self.sherlock_model
start_str = "dog is good"
with self.assertRaises(markovify.text.ParamError):
text_model.make_sentence_with_start(start_str, strict=False)
def test_make_sentence_with_start_three_words(self):
start_str = "Sherlock Holmes was"
text_model = self.sherlock_model
try:
text_model.make_sentence_with_start(start_str)
assert False
except markovify.text.ParamError:
assert True
with self.assertRaises(Exception):
text_model.make_sentence_with_start(start_str)
text_model = self.sherlock_model_ss3
sent = text_model.make_sentence_with_start("Sherlock", tries=50)
assert markovify.chain.BEGIN not in sent
def test_short_sentence(self):
text_model = self.sherlock_model
sent = None
while sent is None:
sent = text_model.make_short_sentence(45)
assert len(sent) <= 45
def test_short_sentence_min_chars(self):
sent = None
while sent is None:
sent = self.sherlock_model.make_short_sentence(100, min_chars=50)
assert len(sent) <= 100
assert len(sent) >= 50
def test_dont_test_output(self):
text_model = self.sherlock_model
sent = text_model.make_sentence(test_output=False)
assert sent is not None
def test_max_words(self):
text_model = self.sherlock_model
sent = text_model.make_sentence(max_words=0)
assert sent is None
def test_min_words(self):
text_model = self.sherlock_model
sent = text_model.make_sentence(min_words=5)
assert len(sent.split(" ")) >= 5
def test_newline_text(self):
with open(
os.path.join(os.path.dirname(__file__), "texts/senate-bills.txt")
) as f:
model = markovify.NewlineText(f.read())
model.make_sentence()
def test_bad_corpus(self):
with self.assertRaises(Exception):
markovify.Chain(corpus="testing, testing", state_size=2)
def test_bad_json(self):
with self.assertRaises(Exception):
markovify.Chain.from_json(1)
def test_custom_regex(self):
with self.assertRaises(Exception):
markovify.NewlineText(
"This sentence contains a custom bad character: #.", reject_reg=r"#"
)
with self.assertRaises(Exception):
markovify.NewlineText("This sentence (would normall fail")
markovify.NewlineText("This sentence (would normall fail", well_formed=False)
class MarkovifyTest(MarkovifyTestBase):
__test__ = True
with open(os.path.join(os.path.dirname(__file__), "texts/sherlock.txt")) as f:
sherlock_text = f.read()
sherlock_model = markovify.Text(sherlock_text)
sherlock_model_ss2 = markovify.Text(sherlock_text, state_size=2)
sherlock_model_ss3 = markovify.Text(sherlock_text, state_size=3)
class MarkovifyTestCompiled(MarkovifyTestBase):
__test__ = True
with open(os.path.join(os.path.dirname(__file__), "texts/sherlock.txt")) as f:
sherlock_text = f.read()
sherlock_model = (markovify.Text(sherlock_text)).compile()
sherlock_model_ss2 = (markovify.Text(sherlock_text, state_size=2)).compile()
sherlock_model_ss3 = (markovify.Text(sherlock_text, state_size=3)).compile()
def test_recompiling(self):
model_recompile = self.sherlock_model.compile()
sent = model_recompile.make_sentence()
assert len(sent) != 0
model_recompile.compile(inplace=True)
sent = model_recompile.make_sentence()
assert len(sent) != 0
class MarkovifyTestCompiledInPlace(MarkovifyTestBase):
__test__ = True
with open(os.path.join(os.path.dirname(__file__), "texts/sherlock.txt")) as f:
sherlock_text = f.read()
sherlock_model = markovify.Text(sherlock_text)
sherlock_model_ss2 = markovify.Text(sherlock_text, state_size=2)
sherlock_model_ss3 = markovify.Text(sherlock_text, state_size=3)
sherlock_model.compile(inplace=True)
sherlock_model_ss2.compile(inplace=True)
sherlock_model_ss3.compile(inplace=True)
if __name__ == "__main__":
unittest.main()
|
jsvine/markovify
|
test/test_basic.py
|
Python
|
mit
| 7,758
|
# coding=utf-8
import json
import os
import unittest
from collections import OrderedDict
from bs4 import BeautifulSoup
from ddt import ddt, data, unpack
from elifetools import parseJATS as parser
from elifetools import rawJATS as raw_parser
from elifetools.utils import date_struct
from tests.file_utils import (
sample_xml,
json_expected_file,
read_fixture,
read_sample_xml,
)
from tests import soup_body
@ddt
class TestParseJats(unittest.TestCase):
def setUp(self):
pass
def soup(self, filename):
# return soup
return parser.parse_document(sample_xml(filename))
def json_expected(self, filename, function_name):
json_expected = None
json_file = json_expected_file(filename, function_name)
try:
with open(json_file, "rb") as json_file_fp:
json_expected = json.loads(json_file_fp.read().decode("utf-8"))
except IOError:
# file may not exist or the value is None for this article
pass
return json_expected
@data("elife-kitchen-sink.xml")
def test_parse_document(self, filename):
soup = parser.parse_document(sample_xml(filename))
self.assertTrue(isinstance(soup, BeautifulSoup))
"""
Quick test cases during development checking syntax errors and coverage
"""
@unpack
@data(
(
"elife04493.xml",
"Neuron hemilineages provide the functional ground plan for the <i>Drosophila</i> ventral nervous system",
)
)
def test_full_title_json(self, filename, expected):
full_title_json = parser.full_title_json(self.soup(filename))
self.assertEqual(expected, full_title_json)
@unpack
@data(
(
"elife04490.xml",
"Both the frequency of sesquiterpene-emitting individuals and the defense capacity of individual plants determine the consequences of sesquiterpene volatile emission for individuals and their neighbors in populations of the wild tobacco <i>Nicotiana attenuata</i>.",
),
("elife_poa_e06828.xml", ""),
)
def test_impact_statement_json(self, filename, expected):
impact_statement_json = parser.impact_statement_json(self.soup(filename))
self.assertEqual(expected, impact_statement_json)
@unpack
@data(("elife-kitchen-sink.xml", 6), ("elife-02833-v2.xml", 0))
def test_ethics_json_by_file(self, filename, expected_length):
soup = parser.parse_document(sample_xml(filename))
self.assertEqual(len(parser.ethics_json(soup)), expected_length)
@unpack
@data(
(
read_fixture("test_ethics_json", "content_01.xml"),
read_fixture("test_ethics_json", "content_01_expected.py"),
),
)
def test_ethics_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.ethics_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(("elife-kitchen-sink.xml", list), ("elife_poa_e06828.xml", None))
def test_acknowledgements_json_by_file(self, filename, expected):
acknowledgements_json = parser.acknowledgements_json(self.soup(filename))
if expected is None:
self.assertEqual(expected, acknowledgements_json)
else:
self.assertEqual(expected, type(acknowledgements_json))
@unpack
@data(("elife04490.xml", 3))
def test_appendices_json_by_file(self, filename, expected_len):
soup = parser.parse_document(sample_xml(filename))
tag_content = parser.appendices_json(soup)
self.assertEqual(len(tag_content), expected_len)
@unpack
@data(
# example based on 14093 v1 with many sections and content
(
read_fixture("test_appendices_json", "content_01.xml"),
read_fixture("test_appendices_json", "content_01_expected.py"),
),
# example based on 14022 v3 having a section with no title in it, with some additional scenarios
(
read_fixture("test_appendices_json", "content_02.xml"),
read_fixture("test_appendices_json", "content_02_expected.py"),
),
# appendix with no sections, based on 00666 kitchen sink
(
read_fixture("test_appendices_json", "content_03.xml"),
read_fixture("test_appendices_json", "content_03_expected.py"),
),
# appendix with a section and a box, also based on 00666 kitchen sink
(
read_fixture("test_appendices_json", "content_04.xml"),
read_fixture("test_appendices_json", "content_04_expected.py"),
),
# appendix with a boxed-text in a subsequent section based on article
(
read_fixture("test_appendices_json", "content_05.xml"),
read_fixture("test_appendices_json", "content_05_expected.py"),
),
)
def test_appendices_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.appendices_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# appendix with inline-graphic, based on 17092 v1
(
read_fixture("test_appendices_json_base_url", "content_01.xml"),
None,
read_fixture("test_appendices_json_base_url", "content_01_expected.py"),
),
# appendix with inline-graphic, based on 17092 v1
(
read_fixture("test_appendices_json_base_url", "content_02.xml"),
"https://example.org/",
read_fixture("test_appendices_json_base_url", "content_02_expected.py"),
),
)
def test_appendices_json_with_base_url(self, xml_content, base_url, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.appendices_json(soup_body(soup), base_url)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"elife04490.xml",
[
"<i>Nicotiana attenuata</i>",
"<i>Manduca sexta</i>",
u"Geocoris spp.",
"<i>Trichobaris mucorea</i>",
u"direct and indirect defense",
u"diversity",
],
),
("elife07586.xml", []),
)
def test_keywords_json(self, filename, expected):
keywords_json = parser.keywords_json(self.soup(filename))
self.assertEqual(expected, keywords_json)
@unpack
@data(
('<root xmlns:xlink="http://www.w3.org/1999/xlink"/>', []),
(
'<root xmlns:xlink="http://www.w3.org/1999/xlink"><kwd-group kwd-group-type="research-organism"><title>Research organism</title><kwd><italic>A. thaliana</italic></kwd><kwd>Other</kwd></kwd-group></root>',
["<i>A. thaliana</i>"],
),
(
'<root xmlns:xlink="http://www.w3.org/1999/xlink"><kwd-group kwd-group-type="research-organism"><title>Research organism</title><kwd>None</kwd></kwd-group></root>',
[],
),
)
def test_research_organism_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.research_organism_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
("<root></root>", None),
("<root><ack></ack></root>", None),
(
"<root><ack><title>Acknowledgements</title><p>Paragraph</p></ack></root>",
[OrderedDict([("type", "paragraph"), ("text", u"Paragraph")])],
),
(
"<root><ack><title>Acknowledgements</title><p>Paragraph</p><p><italic>italic</italic></p></ack></root>",
[
OrderedDict([("type", "paragraph"), ("text", u"Paragraph")]),
OrderedDict([("type", "paragraph"), ("text", u"<i>italic</i>")]),
],
),
)
def test_acknowledgements_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.acknowledgements_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
("elife02304.xml", 2),
("elife02935.xml", 2),
)
def test_datasets_json_by_file(self, filename, expected_len):
soup = parser.parse_document(sample_xml(filename))
tag_content = parser.datasets_json(soup)
self.assertEqual(len(tag_content), expected_len)
@unpack
@data(
# Datasets from 00825 v1, has generated, used, etal and italic tags
(
read_fixture("test_datasets_json", "content_01.xml"),
read_fixture("test_datasets_json", "content_01_expected.py"),
),
# Datasets from 00666 kitchen sink, includes a DOI
(
read_fixture("test_datasets_json", "content_02.xml"),
read_fixture("test_datasets_json", "content_02_expected.py"),
),
# 10856 v2, excerpt, for adding dates to datasets missing a year value
(
read_fixture("test_datasets_json", "content_03.xml"),
read_fixture("test_datasets_json", "content_03_expected.py"),
),
# Datasets example with section sec-type data-availability
(
read_fixture("test_datasets_json", "content_04.xml"),
read_fixture("test_datasets_json", "content_04_expected.py"),
),
# Datasets example with a blank paragraph on some PoA XML files based 33420 v1
(
read_fixture("test_datasets_json", "content_05.xml"),
read_fixture("test_datasets_json", "content_05_expected.py"),
),
# Datasets example with section sec-type data-availability and using element-citation tag
(
read_fixture("test_datasets_json", "content_06.xml"),
read_fixture("test_datasets_json", "content_06_expected.py"),
),
# Datasets example for PoA XML in new style tagging, based 33420 v1
(
read_fixture("test_datasets_json", "content_07.xml"),
read_fixture("test_datasets_json", "content_07_expected.py"),
),
# Datasets example with new pub-id uri tagging
(
read_fixture("test_datasets_json", "content_08.xml"),
read_fixture("test_datasets_json", "content_08_expected.py"),
),
# Datasets example with multiple datasets availability paragraphs
(
read_fixture("test_datasets_json", "content_09.xml"),
read_fixture("test_datasets_json", "content_09_expected.py"),
),
)
def test_datasets_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.datasets_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# Example of fn-group and fn tags included and ignored in footnotes_json output
(
read_fixture("test_footnotes_json", "content_01.xml"),
read_fixture("test_footnotes_json", "content_01_expected.py"),
),
# Test for no back tag
("<article/>", None),
# Test for no fn-group tags
("<article><back/></article>", None),
# Only fn-group tag with a content-type
(
'<article><back><fn-group content-type="competing-interest"/></back></article>',
None,
),
)
def test_footnotes_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.footnotes_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
("elife02304.xml", 2),
("elife02935.xml", 6),
)
def test_supplementary_files_json_by_file(self, filename, expected_len):
soup = parser.parse_document(sample_xml(filename))
tag_content = parser.supplementary_files_json(soup)
self.assertEqual(len(tag_content), expected_len)
@unpack
@data(
# Datasets from 16996 v1 PoA
(
read_fixture("test_supplementary_files_json", "content_01.xml"),
read_fixture("test_supplementary_files_json", "content_01_expected.py"),
),
# Datasets from 08477 v1 VoR
(
read_fixture("test_supplementary_files_json", "content_02.xml"),
read_fixture("test_supplementary_files_json", "content_02_expected.py"),
),
# 02184 v1, older style PoA has supplementary files directly in the article-meta
(
read_fixture("test_supplementary_files_json", "content_03.xml"),
read_fixture("test_supplementary_files_json", "content_03_expected.py"),
),
# 04493 v1 PoA, multiple old style supplementary files
(
read_fixture("test_supplementary_files_json", "content_04.xml"),
read_fixture("test_supplementary_files_json", "content_04_expected.py"),
),
# 10110 v1 excerpt, should only extract the supplementary-material from the back matter
(
read_fixture("test_supplementary_files_json", "content_05.xml"),
read_fixture("test_supplementary_files_json", "content_05_expected.py"),
),
# 03405 v1, label and no title tag
(
read_fixture("test_supplementary_files_json", "content_06.xml"),
read_fixture("test_supplementary_files_json", "content_06_expected.py"),
),
# 00333 v1, mimetype contains a slash so ignore sub-mimetype
(
read_fixture("test_supplementary_files_json", "content_07.xml"),
read_fixture("test_supplementary_files_json", "content_07_expected.py"),
),
# 26759 v2, example of title tag and no label tag
(
read_fixture("test_supplementary_files_json", "content_08.xml"),
read_fixture("test_supplementary_files_json", "content_08_expected.py"),
),
)
def test_supplementary_files_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.supplementary_files_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"elife02304.xml",
"The funders had no role in study design, data collection and interpretation, or the decision to submit the work for publication.",
)
)
def test_funding_statement_json_by_file(self, filename, expected):
soup = parser.parse_document(sample_xml(filename))
tag_content = parser.funding_statement_json(soup)
self.assertEqual(tag_content, expected)
@unpack
@data(
('<root xmlns:xlink="http://www.w3.org/1999/xlink"></root>', None),
(
'<root xmlns:xlink="http://www.w3.org/1999/xlink"><funding-statement>Funding statement</funding-statement></root>',
"Funding statement",
),
(
'<root xmlns:xlink="http://www.w3.org/1999/xlink"><funding-statement><italic>Special</italic> funding statement</funding-statement></root>',
"<i>Special</i> funding statement",
),
)
def test_funding_statement_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.funding_statement_json(soup)
self.assertEqual(tag_content, expected)
@unpack
@data(
("elife02304.xml", 3),
("elife02935.xml", 16),
)
def test_funding_awards_json_by_file(self, filename, expected_len):
soup = parser.parse_document(sample_xml(filename))
tag_content = parser.funding_awards_json(soup)
self.assertEqual(len(tag_content), expected_len)
@unpack
@data(
# 07383 v1 has an institution as the recipient
(
read_fixture("test_funding_awards_json", "content_01.xml"),
read_fixture("test_funding_awards_json", "content_01_expected.py"),
),
# Funding from new kitchen sink
(
read_fixture("test_funding_awards_json", "content_02.xml"),
read_fixture("test_funding_awards_json", "content_02_expected.py"),
),
# 08245 v1 edge case, unusual principal-award-recipient
(
read_fixture("test_funding_awards_json", "content_03.xml"),
read_fixture("test_funding_awards_json", "content_03_expected.py"),
),
# 00801 v1 edge case, rewrite funding award
(
read_fixture("test_funding_awards_json", "content_04.xml"),
read_fixture("test_funding_awards_json", "content_04_expected.py"),
),
# 04250 v1 edge case, rewrite to add funding award recipients
(
read_fixture("test_funding_awards_json", "content_05.xml"),
read_fixture("test_funding_awards_json", "content_05_expected.py"),
),
# 06412 v2 edge case, rewrite to add funding award recipients
(
read_fixture("test_funding_awards_json", "content_06.xml"),
read_fixture("test_funding_awards_json", "content_06_expected.py"),
),
# 03609 v1 example funding award with multiple recipients
(
read_fixture("test_funding_awards_json", "content_07.xml"),
read_fixture("test_funding_awards_json", "content_07_expected.py"),
),
)
def test_funding_awards_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.funding_awards_json(soup)
self.assertEqual(tag_content, expected)
@unpack
@data(
# test for no sub-article
(
"<root/>",
OrderedDict(),
),
# example from elife 00666 kitchen sink XML
(
read_fixture("test_editor_evaluation", "content_01.xml"),
read_fixture("test_editor_evaluation", "content_01_expected.py"),
),
)
def test_editor_evaluation(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.editor_evaluation(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
("elife-kitchen-sink.xml", None, OrderedDict()),
("elife_poa_e06828.xml", OrderedDict(), None),
)
def test_decision_letter(self, filename, expected, not_expected):
sub_article_content = parser.decision_letter(self.soup(filename))
if expected is not None:
self.assertEqual(expected, sub_article_content)
if not_expected is not None:
self.assertNotEqual(not_expected, sub_article_content)
@unpack
@data(
# 04871 v2, excerpt, remove unwanted sections
(
read_fixture("test_decision_letter", "content_01.xml"),
read_fixture("test_decision_letter", "content_01_expected.py"),
),
# 10856 v2, excerpt, add missing description via a rewrite
(
read_fixture("test_decision_letter", "content_02.xml"),
read_fixture("test_decision_letter", "content_02_expected.py"),
),
)
def test_decision_letter_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.decision_letter(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
("elife-kitchen-sink.xml", None, OrderedDict()),
("elife_poa_e06828.xml", OrderedDict(), None),
)
def test_author_response(self, filename, expected, not_expected):
sub_article_content = parser.author_response(self.soup(filename))
if expected is not None:
self.assertEqual(expected, sub_article_content)
if not_expected is not None:
self.assertNotEqual(not_expected, sub_article_content)
@unpack
@data(
# 04871 v2, excerpt, remove unwanted sections
(
read_fixture("test_author_response", "content_01.xml"),
read_fixture("test_author_response", "content_01_expected.py"),
),
)
def test_author_response_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.author_response(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(("elife-kitchen-sink.xml", None, []), ("elife_poa_e06828.xml", [], None))
def test_body(self, filename, expected, not_expected):
body = parser.body(self.soup(filename))
if expected is not None:
self.assertEqual(expected, body)
if not_expected is not None:
self.assertNotEqual(not_expected, body)
@unpack
@data(
# very simple body, wrap in a section
(
read_fixture("test_body_json", "content_01.xml"),
read_fixture("test_body_json", "content_01_expected.py"),
),
# normal boxed-text and section, keep these
(
read_fixture("test_body_json", "content_02.xml"),
read_fixture("test_body_json", "content_02_expected.py"),
),
# boxed-text paragraphs inside the caption tag, based on 05519 v2
(
read_fixture("test_body_json", "content_03.xml"),
read_fixture("test_body_json", "content_03_expected.py"),
),
# 00301 v1 do not keep boxed-text and wrap in section
(
read_fixture("test_body_json", "content_04.xml"),
read_fixture("test_body_json", "content_04_expected.py"),
),
# 00646 v1 boxed text to keep, and wrap in section
(
read_fixture("test_body_json", "content_05.xml"),
read_fixture("test_body_json", "content_05_expected.py"),
),
# 02945 v1, correction article keep the boxed-text
(
read_fixture("test_body_json", "content_06.xml"),
read_fixture("test_body_json", "content_06_expected.py"),
),
# 12844 v1, based on, edge case to rewrite unacceptable sections that have no titles
(
read_fixture("test_body_json", "content_07.xml"),
read_fixture("test_body_json", "content_07_expected.py"),
),
# 09977 v2, based on, edge case to remove a specific section with no content
(
read_fixture("test_body_json", "content_08.xml"),
read_fixture("test_body_json", "content_08_expected.py"),
),
# 09977 v3, based on, edge case to keep a specific section that does have content
(
read_fixture("test_body_json", "content_09.xml"),
read_fixture("test_body_json", "content_09_expected.py"),
),
# 05519 v2, based on, edge case to remove an unwanted section
(
read_fixture("test_body_json", "content_10.xml"),
read_fixture("test_body_json", "content_10_expected.py"),
),
# 00013 v1, excerpt, add an id to a section
(
read_fixture("test_body_json", "content_11.xml"),
read_fixture("test_body_json", "content_11_expected.py"),
),
# 04232 v2, excerpt, remove an unwanted section
(
read_fixture("test_body_json", "content_12.xml"),
read_fixture("test_body_json", "content_12_expected.py"),
),
# 07157 v1, add title to a section
(
read_fixture("test_body_json", "content_13.xml"),
read_fixture("test_body_json", "content_13_expected.py"),
),
# excerpt of 23383 v1 where there is a boxed-text that was formerly stripped away, check it remains
(
read_fixture("test_body_json", "content_14.xml"),
read_fixture("test_body_json", "content_14_expected.py"),
),
)
def test_body_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.body_json(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_fixture("test_body_json_base_url", "content_01.xml"),
None,
read_fixture("test_body_json_base_url", "content_01_expected.py"),
),
(
read_fixture("test_body_json_base_url", "content_02.xml"),
"https://example.org/",
read_fixture("test_body_json_base_url", "content_02_expected.py"),
),
(
read_fixture("test_body_json_base_url", "content_03.xml"),
None,
read_fixture("test_body_json_base_url", "content_03_expected.py"),
),
(
read_fixture("test_body_json_base_url", "content_04.xml"),
"https://example.org/",
read_fixture("test_body_json_base_url", "content_04_expected.py"),
),
)
def test_body_json_with_base_url(self, xml_content, base_url, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.body_json(soup, base_url=base_url)
self.assertEqual(expected, tag_content)
@unpack
@data(
# 08647 v1 PoA editor has blank string in the affiliation tags
(
read_fixture("test_editors_json", "content_01.xml"),
read_fixture("test_editors_json", "content_01_expected.py"),
),
# 09560 v1 example, has two editors
(
read_fixture("test_editors_json", "content_02.xml"),
read_fixture("test_editors_json", "content_02_expected.py"),
),
# 23804 v3 example, has no role tag and is rewritten
(
read_fixture("test_editors_json", "content_03.xml"),
read_fixture("test_editors_json", "content_03_expected.py"),
),
# 22028 v1 example, has a country but no institution
(
read_fixture("test_editors_json", "content_04.xml"),
read_fixture("test_editors_json", "content_04_expected.py"),
),
# kitchen sink example, has senior editor and reviewers
(
read_fixture("test_editors_json", "content_05.xml"),
read_fixture("test_editors_json", "content_05_expected.py"),
),
# kitchen sink example, reviewing editor and senior editor is the same person
(
read_fixture("test_editors_json", "content_06.xml"),
read_fixture("test_editors_json", "content_06_expected.py"),
),
# reviewing editor and senior editor is the same person in both mentions plus a reviewer
(
read_fixture("test_editors_json", "content_07.xml"),
read_fixture("test_editors_json", "content_07_expected.py"),
),
)
def test_editors_json_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.editors_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# Author with phone number, 02833 v2
(
read_fixture("test_authors_json", "content_01.xml"),
read_fixture("test_authors_json", "content_01_expected.py"),
),
# 02935 v1, group authors (collab) but no members of those groups
(
read_fixture("test_authors_json", "content_02.xml"),
read_fixture("test_authors_json", "content_02_expected.py"),
),
# 02935 v2, excerpt for group author parsing
(
read_fixture("test_authors_json", "content_03.xml"),
read_fixture("test_authors_json", "content_03_expected.py"),
),
# 09376 v1, excerpt to rewrite an author ORCID
(
read_fixture("test_authors_json", "content_04.xml"),
read_fixture("test_authors_json", "content_04_expected.py"),
),
# 06956 v1, excerpt, add an affiliation name to an author
(
read_fixture("test_authors_json", "content_05.xml"),
read_fixture("test_authors_json", "content_05_expected.py"),
),
# 21337 v1, example to pick up the email of corresponding authors from authors notes
(
read_fixture("test_authors_json", "content_06.xml"),
read_fixture("test_authors_json", "content_06_expected.py"),
),
# 00007 v1, example with a present address
(
read_fixture("test_authors_json", "content_07.xml"),
read_fixture("test_authors_json", "content_07_expected.py"),
),
# 00666 kitchen sink (with extra whitespace removed), example to pick up the email of corresponding author
(
read_fixture("test_authors_json", "content_08.xml"),
read_fixture("test_authors_json", "content_08_expected.py"),
),
# 09594 v2 example of non-standard footnote fn-type other and id starting with 'fn'
(
read_fixture("test_authors_json", "content_09.xml"),
read_fixture("test_authors_json", "content_09_expected.py"),
),
# 21230 v1 example of author role to parse
(
read_fixture("test_authors_json", "content_10.xml"),
read_fixture("test_authors_json", "content_10_expected.py"),
),
# 21230 v1 as an example of competing interests rewrite rule for elife
(
read_fixture("test_authors_json", "content_11.xml"),
read_fixture("test_authors_json", "content_11_expected.py"),
),
# 00351 v1 example of author role to parse
(
read_fixture("test_authors_json", "content_12.xml"),
read_fixture("test_authors_json", "content_12_expected.py"),
),
# 21723 v1 another example of author role
(
read_fixture("test_authors_json", "content_13.xml"),
read_fixture("test_authors_json", "content_13_expected.py"),
),
# author with a bio based on kitchen sink 00777
(
read_fixture("test_authors_json", "content_14.xml"),
read_fixture("test_authors_json", "content_14_expected.py"),
),
# 02273 v1 example, equal contribution to parse
(
read_fixture("test_authors_json", "content_15.xml"),
read_fixture("test_authors_json", "content_15_expected.py"),
),
# 09148 v1 example, and author with two email addresses
(
read_fixture("test_authors_json", "content_16.xml"),
read_fixture("test_authors_json", "content_16_expected.py"),
),
# example of new kitchen sink group authors with multiple email addresses, based on 17044 v1
(
read_fixture("test_authors_json", "content_17.xml"),
read_fixture("test_authors_json", "content_17_expected.py"),
),
# example of inline email address
(
read_fixture("test_authors_json", "content_18.xml"),
read_fixture("test_authors_json", "content_18_expected.py"),
),
)
def test_authors_json_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.authors_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# 00855 v1, example of just person authors
(
read_fixture("test_author_line", "content_01.xml"),
u"Randy Schekman, Mark Patterson",
),
# 08714 v1, group authors only
(
read_fixture("test_author_line", "content_02.xml"),
u"MalariaGEN Plasmodium falciparum Community Project",
),
# elife00351.xml, one author
(
read_fixture("test_author_line", "content_03.xml"),
"Richard Smith",
),
# elife_poa_e06828.xml, multiple authors adds et al.
(
read_fixture("test_author_line", "content_04.xml"),
"Michael S Fleming et al.",
),
)
def test_author_line_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.author_line(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(None, None),
(["Randy Schekman"], "Randy Schekman"),
(["Randy Schekman", "Mark Patterson"], "Randy Schekman, Mark Patterson"),
(["Randy Schekman", "Mark Patterson", "eLife"], "Randy Schekman et al."),
)
def test_format_author_line(self, author_names, expected):
self.assertEqual(parser.format_author_line(author_names), expected)
@data(
# aff tag linked via an rid to id attribute
(
read_fixture("test_format_aff", "content_01.xml"),
read_fixture("test_format_aff", "content_01_expected.py"),
),
# inline aff tag example, no id attribute
(
read_fixture("test_format_aff", "content_02.xml"),
read_fixture("test_format_aff", "content_02_expected.py"),
),
# aff example mostly just text with no subtags
(
read_fixture("test_format_aff", "content_03.xml"),
read_fixture("test_format_aff", "content_03_expected.py"),
),
# aff example with ror institution-id
(
read_fixture("test_format_aff", "content_04.xml"),
read_fixture("test_format_aff", "content_04_expected.py"),
),
# edge case, no aff tag or the rid idoes not match an aff id
(None, (None, {})),
)
@unpack
def test_format_aff_edge_cases(self, xml_content, expected):
if xml_content:
soup = parser.parse_xml(xml_content)
aff_tag = soup_body(soup)
else:
# where the tag is None
aff_tag = xml_content
tag_content = parser.format_aff(aff_tag)
self.assertEqual(expected, tag_content)
@unpack
@data(
(None, []),
(
[
{"name": {"preferred": "Randy Schekman"}},
{"name": {"preferred": "Mark Patterson"}},
{"name": "eLife"},
],
["Randy Schekman", "Mark Patterson", "eLife"],
),
)
def test_extract_author_line_names(self, authors_json, expected):
self.assertEqual(parser.extract_author_line_names(authors_json), expected)
@data(
# standard expected author with name tag
(
read_fixture("test_format_contributor", "content_01.xml"),
read_fixture("test_format_contributor", "content_01_expected.py"),
),
# edge case, no valid contrib tags
(
read_fixture("test_format_contributor", "content_02.xml"),
read_fixture("test_format_contributor", "content_02_expected.py"),
),
# edge case, string-name wrapper
(
read_fixture("test_format_contributor", "content_03.xml"),
read_fixture("test_format_contributor", "content_03_expected.py"),
),
# edge case, incorrect aff tag xref values will not cause an error if aff tag is not found
(
read_fixture("test_format_contributor", "content_04.xml"),
read_fixture("test_format_contributor", "content_04_expected.py"),
),
)
@unpack
def test_format_contributor_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
contrib_tag = raw_parser.article_contributors(soup)[0]
tag_content = parser.format_contributor(contrib_tag, soup)
self.assertEqual(expected, tag_content)
@unpack
@data(("(+1) 800-555-5555", "+18005555555"))
def test_phone_number_json(self, phone, expected):
self.assertEqual(parser.phone_number_json(phone), expected)
@unpack
@data(
(None, OrderedDict(), OrderedDict()),
# example of clinical trial contributors
(
read_fixture("test_references_json_authors", "content_01.py"),
OrderedDict([("type", u"clinical-trial")]),
read_fixture("test_references_json_authors", "content_01_expected.py"),
),
# example of patent contributors
(
read_fixture("test_references_json_authors", "content_02.py"),
OrderedDict([("type", u"patent")]),
read_fixture("test_references_json_authors", "content_02_expected.py"),
),
# example of thesis contributors
(
read_fixture("test_references_json_authors", "content_03.py"),
OrderedDict([("type", u"thesis")]),
read_fixture("test_references_json_authors", "content_03_expected.py"),
),
)
def test_references_json_authors(self, ref_authors, ref_content, expected):
references_json = parser.references_json_authors(ref_authors, ref_content)
self.assertEqual(expected, references_json)
@data(
# Web reference with no title, use the uri from 01892
(
read_fixture("test_references_json", "content_01.xml"),
read_fixture("test_references_json", "content_01_expected.py"),
),
# Thesis title from 00626, also in converted to unknown because of its comment tag
(
read_fixture("test_references_json", "content_02.xml"),
read_fixture("test_references_json", "content_02_expected.py"),
),
# fpage value with usual characters from 00170
(
read_fixture("test_references_json", "content_03.xml"),
read_fixture("test_references_json", "content_03_expected.py"),
),
# fpage contains dots, 00569
(
read_fixture("test_references_json", "content_04.xml"),
read_fixture("test_references_json", "content_04_expected.py"),
),
# pages value of in press, 00109
(
read_fixture("test_references_json", "content_05.xml"),
read_fixture("test_references_json", "content_05_expected.py"),
),
# year value of in press, 02535
(
read_fixture("test_references_json", "content_06.xml"),
read_fixture("test_references_json", "content_06_expected.py"),
),
# conference, 03532 v3
(
read_fixture("test_references_json", "content_07.xml"),
read_fixture("test_references_json", "content_07_expected.py"),
),
# web with doi but no uri, 04775 v2
(
read_fixture("test_references_json", "content_08.xml"),
read_fixture("test_references_json", "content_08_expected.py"),
),
# Clinical trial example, from new kitchen sink 00666
(
read_fixture("test_references_json", "content_09.xml"),
read_fixture("test_references_json", "content_09_expected.py"),
),
# Journal reference with no article-title, 05462 v3
(
read_fixture("test_references_json", "content_10.xml"),
read_fixture("test_references_json", "content_10_expected.py"),
),
# book reference, gets converted to book-chapter, and has no editors, 16412 v1
(
read_fixture("test_references_json", "content_11.xml"),
read_fixture("test_references_json", "content_11_expected.py"),
),
# journal reference with no article title and no lpage, 11282 v2
(
read_fixture("test_references_json", "content_12.xml"),
read_fixture("test_references_json", "content_12_expected.py"),
),
# reference with no title uses detail as the titel, 00311 v1
(
read_fixture("test_references_json", "content_13.xml"),
read_fixture("test_references_json", "content_13_expected.py"),
),
# reference of type other with no details, 15266 v1
(
read_fixture("test_references_json", "content_14.xml"),
read_fixture("test_references_json", "content_14_expected.py"),
),
# reference of type journal with no journal name, 00340 v1
(
read_fixture("test_references_json", "content_15.xml"),
read_fixture("test_references_json", "content_15_expected.py"),
),
# reference of type book with no source, 00051 v1
(
read_fixture("test_references_json", "content_16.xml"),
read_fixture("test_references_json", "content_16_expected.py"),
),
# reference of type book with no publisher, 00031 v1
(
read_fixture("test_references_json", "content_17.xml"),
read_fixture("test_references_json", "content_17_expected.py"),
),
# reference of type book with no bookTitle, 03069 v2
(
read_fixture("test_references_json", "content_18.xml"),
read_fixture("test_references_json", "content_18_expected.py"),
),
# reference with unicode in collab tag, also gets turned into unknown type, 18023 v1
(
read_fixture("test_references_json", "content_19.xml"),
read_fixture("test_references_json", "content_19_expected.py"),
),
# data reference with no source, 16800 v2
(
read_fixture("test_references_json", "content_20.xml"),
read_fixture("test_references_json", "content_20_expected.py"),
),
# reference with an lpage and not fpage still gets pages value set, 13905 v2
(
read_fixture("test_references_json", "content_21.xml"),
read_fixture("test_references_json", "content_21_expected.py"),
),
# Reference with a collab using italic tag, from 05423 v2
(
read_fixture("test_references_json", "content_22.xml"),
read_fixture("test_references_json", "content_22_expected.py"),
),
# Reference with a non-numeric year, from 09215 v1
(
read_fixture("test_references_json", "content_23.xml"),
read_fixture("test_references_json", "content_23_expected.py"),
),
# no year value, 00051, with json rewriting enabled by adding elife XML metadata
(
read_fixture("test_references_json", "content_24.xml"),
read_fixture("test_references_json", "content_24_expected.py"),
),
# elife 12125 v3 bib11 will get deleted in the JSON rewriting
(
read_fixture("test_references_json", "content_25.xml"),
read_fixture("test_references_json", "content_25_expected.py"),
),
# 19532 v2 bib27 has a date-in-citation and no year tag, will get rewritten
(
read_fixture("test_references_json", "content_26.xml"),
read_fixture("test_references_json", "content_26_expected.py"),
),
# 00666 kitchen sink reference of type patent
(
read_fixture("test_references_json", "content_27.xml"),
read_fixture("test_references_json", "content_27_expected.py"),
),
# 20352 v2 reference of type patent, with a rewrite of the country value
(
read_fixture("test_references_json", "content_28.xml"),
read_fixture("test_references_json", "content_28_expected.py"),
),
# 20492 v3, report type reference with no publisher-name gets converted to unknown
(
read_fixture("test_references_json", "content_29.xml"),
read_fixture("test_references_json", "content_29_expected.py"),
),
# 15504 v2, reference with a pmid
(
read_fixture("test_references_json", "content_30.xml"),
read_fixture("test_references_json", "content_30_expected.py"),
),
# 15504 v2, reference with an isbn
(
read_fixture("test_references_json", "content_31.xml"),
read_fixture("test_references_json", "content_31_expected.py"),
),
# 18296 v3, reference of type preprint
(
read_fixture("test_references_json", "content_32.xml"),
read_fixture("test_references_json", "content_32_expected.py"),
),
# 16394 v2, reference of type thesis with no publisher, convert to unknown
(
read_fixture("test_references_json", "content_33.xml"),
read_fixture("test_references_json", "content_33_expected.py"),
),
# 09672 v2, reference of type conference-proceeding with no conference
(
read_fixture("test_references_json", "content_34.xml"),
read_fixture("test_references_json", "content_34_expected.py"),
),
# 07460 v1, reference rewriting test, rewrites date and authors
(
read_fixture("test_references_json", "content_35.xml"),
read_fixture("test_references_json", "content_35_expected.py"),
),
# 20522 v1, reference rewriting year value in json output
(
read_fixture("test_references_json", "content_36.xml"),
read_fixture("test_references_json", "content_36_expected.py"),
),
# 09520 v2, reference rewriting conference data
(
read_fixture("test_references_json", "content_37.xml"),
read_fixture("test_references_json", "content_37_expected.py"),
),
# from 00666 kitchen sink example, will add a uri to the references json from the doi value
(
read_fixture("test_references_json", "content_38.xml"),
read_fixture("test_references_json", "content_38_expected.py"),
),
# from 00666 kitchen sink example, reference of type periodical
(
read_fixture("test_references_json", "content_39.xml"),
read_fixture("test_references_json", "content_39_expected.py"),
),
# 00666 kitchen sink example with a version tag
(
read_fixture("test_references_json", "content_40.xml"),
read_fixture("test_references_json", "content_40_expected.py"),
),
# 23193 v2 book references has editors and no authors
(
read_fixture("test_references_json", "content_41.xml"),
read_fixture("test_references_json", "content_41_expected.py"),
),
# example of data citation with a pub-id accession, based on article 07836
(
read_fixture("test_references_json", "content_42.xml"),
read_fixture("test_references_json", "content_42_expected.py"),
),
# example of data citation with a object-id tag accession, gets converted to unknown because of the comment tag, based on article 07048
(
read_fixture("test_references_json", "content_43.xml"),
read_fixture("test_references_json", "content_43_expected.py"),
),
# example of data citation with a pub-id pub-id-type="archive", parse it as an accession number, based on 00666 kitchen sink example
(
read_fixture("test_references_json", "content_44.xml"),
read_fixture("test_references_json", "content_44_expected.py"),
),
# example of ref of type webpage, based on article 10.5334/sta.606, note: does not parse author names
(
read_fixture("test_references_json", "content_45.xml"),
read_fixture("test_references_json", "content_45_expected.py"),
),
# example of ref of type report, with a doi but no uri, uri gets filled in
(
read_fixture("test_references_json", "content_46.xml"),
read_fixture("test_references_json", "content_46_expected.py"),
),
# example of ref of type journal with no pages
(
read_fixture("test_references_json", "content_47.xml"),
read_fixture("test_references_json", "content_47_expected.py"),
),
# example of ref author having a suffix from the elife 00666 kitchen sink XML
(
read_fixture("test_references_json", "content_48.xml"),
read_fixture("test_references_json", "content_48_expected.py"),
),
# example of ref with an elocation-id, no pages, from elife-kitchen-sink.xml
(
read_fixture("test_references_json", "content_49.xml"),
read_fixture("test_references_json", "content_49_expected.py"),
),
# example of ref with a strange year tag value, json_rewrite invoked, from elife-09215-v1.xml
(
read_fixture("test_references_json", "content_50.xml"),
read_fixture("test_references_json", "content_50_expected.py"),
),
# example of thesis ref with a doi, its uri will be populated from the doi
(
read_fixture("test_references_json", "content_51.xml"),
read_fixture("test_references_json", "content_51_expected.py"),
),
)
@unpack
def test_references_json_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.references_json(soup)
self.assertEqual(expected, tag_content)
@unpack
@data((None, None, None))
def test_references_publisher(self, publisher_name, publisher_loc, expected):
self.assertEqual(
parser.references_publisher(publisher_name, publisher_loc), expected
)
@unpack
@data(
(None, 0),
("<root><italic></italic></root>", 0),
("<root><sec><p>Content</p></sec></root>", 1),
)
def test_body_blocks(self, xml_content, expected_len):
if xml_content:
soup = parser.parse_xml(xml_content)
body_tag = soup_body(soup_body(soup))
else:
body_tag = xml_content
body_block_tags = parser.body_blocks(body_tag)
self.assertEqual(len(body_block_tags), expected_len)
@unpack
@data(
(None, None, None, None, None, None, None, None, None),
("title", None, None, None, None, None, "title", None, None),
(None, "label", None, None, None, None, None, "label", None),
("title", "label", "caption", None, None, None, "title", "label", None),
("title", "label", "caption", True, None, None, "title", "label", "caption"),
(None, "label", None, None, True, None, "label", None, None),
(None, "label", None, None, True, True, "label", None, None),
("title", None, None, None, True, True, None, "title", None),
("title", None, None, None, None, True, None, "title", None),
("title", "label", None, None, True, None, "title", "label", None),
("title.", None, None, None, None, True, None, "title", None),
(None, "label:", None, None, True, None, "label:", None, None),
)
def test_body_block_title_label_caption(
self,
title_value,
label_value,
caption_content,
set_caption,
prefer_title,
prefer_label,
expected_title,
expected_label,
expected_caption,
):
tag_content = OrderedDict()
parser.body_block_title_label_caption(
tag_content,
title_value,
label_value,
caption_content,
set_caption,
prefer_title,
prefer_label,
)
self.assertEqual(tag_content.get("label"), expected_label)
self.assertEqual(tag_content.get("title"), expected_title)
self.assertEqual(tag_content.get("caption"), expected_caption)
@unpack
@data(
(
read_fixture("test_body_block_content", "content_01.xml"),
read_fixture("test_body_block_content", "content_01_expected.py"),
),
(
read_fixture("test_body_block_content", "content_02.xml"),
read_fixture("test_body_block_content", "content_02_expected.py"),
),
(
read_fixture("test_body_block_content", "content_03.xml"),
read_fixture("test_body_block_content", "content_03_expected.py"),
),
(
read_fixture("test_body_block_content", "content_04.xml"),
read_fixture("test_body_block_content", "content_04_expected.py"),
),
(
read_fixture("test_body_block_content", "content_05.xml"),
read_fixture("test_body_block_content", "content_05_expected.py"),
),
(
read_fixture("test_body_block_content", "content_06.xml"),
read_fixture("test_body_block_content", "content_06_expected.py"),
),
(
read_fixture("test_body_block_content", "content_07.xml"),
read_fixture("test_body_block_content", "content_07_expected.py"),
),
(
read_fixture("test_body_block_content", "content_08.xml"),
read_fixture("test_body_block_content", "content_08_expected.py"),
),
(
read_fixture("test_body_block_content", "content_09.xml"),
read_fixture("test_body_block_content", "content_09_expected.py"),
),
(
read_fixture("test_body_block_content", "content_10.xml"),
read_fixture("test_body_block_content", "content_10_expected.py"),
),
(
read_fixture("test_body_block_content", "content_11.xml"),
read_fixture("test_body_block_content", "content_11_expected.py"),
),
(
read_fixture("test_body_block_content", "content_12.xml"),
read_fixture("test_body_block_content", "content_12_expected.py"),
),
(
read_fixture("test_body_block_content", "content_13.xml"),
read_fixture("test_body_block_content", "content_13_expected.py"),
),
(
read_fixture("test_body_block_content", "content_14.xml"),
read_fixture("test_body_block_content", "content_14_expected.py"),
),
(
read_fixture("test_body_block_content", "content_15.xml"),
read_fixture("test_body_block_content", "content_15_expected.py"),
),
# example of copyright statements ending in a full stop, based on article 27041
(
read_fixture("test_body_block_content", "content_16.xml"),
read_fixture("test_body_block_content", "content_16_expected.py"),
),
# example of video with attributions, based on article 17243
(
read_fixture("test_body_block_content", "content_17.xml"),
read_fixture("test_body_block_content", "content_17_expected.py"),
),
(
read_fixture("test_body_block_content", "content_18.xml"),
read_fixture("test_body_block_content", "content_18_expected.py"),
),
(
read_fixture("test_body_block_content", "content_19.xml"),
read_fixture("test_body_block_content", "content_19_expected.py"),
),
(
read_fixture("test_body_block_content", "content_20.xml"),
read_fixture("test_body_block_content", "content_20_expected.py"),
),
(
read_fixture("test_body_block_content", "content_21.xml"),
read_fixture("test_body_block_content", "content_21_expected.py"),
),
(
read_fixture("test_body_block_content", "content_22.xml"),
read_fixture("test_body_block_content", "content_22_expected.py"),
),
(
read_fixture("test_body_block_content", "content_23.xml"),
read_fixture("test_body_block_content", "content_23_expected.py"),
),
(
read_fixture("test_body_block_content", "content_24.xml"),
read_fixture("test_body_block_content", "content_24_expected.py"),
),
# media tag that is not a video
(
read_fixture("test_body_block_content", "content_25.xml"),
read_fixture("test_body_block_content", "content_25_expected.py"),
),
(
read_fixture("test_body_block_content", "content_26.xml"),
read_fixture("test_body_block_content", "content_26_expected.py"),
),
(
read_fixture("test_body_block_content", "content_27.xml"),
read_fixture("test_body_block_content", "content_27_expected.py"),
),
(
read_fixture("test_body_block_content", "content_28.xml"),
read_fixture("test_body_block_content", "content_28_expected.py"),
),
# disp-quote content-type="editor-comment" is turned into an excerpt block
(
read_fixture("test_body_block_content", "content_29.xml"),
read_fixture("test_body_block_content", "content_29_expected.py"),
),
# 00109 v1, figure with a supplementary file that has multiple caption paragraphs
(
read_fixture("test_body_block_content", "content_30.xml"),
read_fixture("test_body_block_content", "content_30_expected.py"),
),
# code block, based on elife 20352 v2, contains new lines too
(
read_fixture("test_body_block_content", "content_31.xml"),
read_fixture("test_body_block_content", "content_31_expected.py"),
),
# example of a table with a break tag, based on 7141 v1
(
read_fixture("test_body_block_content", "content_32.xml"),
read_fixture("test_body_block_content", "content_32_expected.py"),
),
# example of a figure, based on 00007 v1
(
read_fixture("test_body_block_content", "content_33.xml"),
read_fixture("test_body_block_content", "content_33_expected.py"),
),
# example table with a caption and no title needs a title added, based on 05604 v1
(
read_fixture("test_body_block_content", "content_34.xml"),
read_fixture("test_body_block_content", "content_34_expected.py"),
),
# example video with only the DOI in the caption paragraph, based on 02277 v1
(
read_fixture("test_body_block_content", "content_35.xml"),
read_fixture("test_body_block_content", "content_35_expected.py"),
),
# example animated gif as a video in 00666 kitchen sink
(
read_fixture("test_body_block_content", "content_36.xml"),
read_fixture("test_body_block_content", "content_36_expected.py"),
),
# example of named-content to be converted to HTML from new kitchen sink 00666
(
read_fixture("test_body_block_content", "content_37.xml"),
read_fixture("test_body_block_content", "content_37_expected.py"),
),
# example of table author-callout-style styles to replace as a class attribute, based on 24231 v1
(
read_fixture("test_body_block_content", "content_38.xml"),
read_fixture("test_body_block_content", "content_38_expected.py"),
),
# example inline table adapted from 00666 kitchen sink
(
read_fixture("test_body_block_content", "content_39.xml"),
read_fixture("test_body_block_content", "content_39_expected.py"),
),
# example key resources inline table that has a label will be turned into a figure block
(
read_fixture("test_body_block_content", "content_40.xml"),
read_fixture("test_body_block_content", "content_40_expected.py"),
),
# test for stripping out comment tag content when it is inside a paragraph tag
(
read_fixture("test_body_block_content", "content_41.xml"),
read_fixture("test_body_block_content", "content_41_expected.py"),
),
# test ignoring nested fig title as a box-text title
(
read_fixture("test_body_block_content", "content_42.xml"),
read_fixture("test_body_block_content", "content_42_expected.py"),
),
)
def test_body_block_content(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
# find the first tag in the root with a name
for child in soup.root.children:
if child.name:
body_tag = child
break
tag_content = parser.body_block_content(body_tag)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_fixture("test_body_block_content_render", "content_01.xml"),
read_fixture("test_body_block_content_render", "content_01_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_02.xml"),
read_fixture("test_body_block_content_render", "content_02_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_03.xml"),
read_fixture("test_body_block_content_render", "content_03_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_04.xml"),
read_fixture("test_body_block_content_render", "content_04_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_05.xml"),
read_fixture("test_body_block_content_render", "content_05_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_06.xml"),
read_fixture("test_body_block_content_render", "content_06_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_07.xml"),
read_fixture("test_body_block_content_render", "content_07_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_08.xml"),
read_fixture("test_body_block_content_render", "content_08_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_09.xml"),
read_fixture("test_body_block_content_render", "content_09_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_10.xml"),
read_fixture("test_body_block_content_render", "content_10_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_11.xml"),
read_fixture("test_body_block_content_render", "content_11_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_12.xml"),
read_fixture("test_body_block_content_render", "content_12_expected.py"),
),
(
read_fixture("test_body_block_content_render", "content_13.xml"),
read_fixture("test_body_block_content_render", "content_13_expected.py"),
),
# disp-quote content-type="editor-comment" is turned into an excerpt block
(
read_fixture("test_body_block_content_render", "content_14.xml"),
read_fixture("test_body_block_content_render", "content_14_expected.py"),
),
# Boxed text with no title tag uses the first sentence of the caption paragraph, 00288 v1
(
read_fixture("test_body_block_content_render", "content_15.xml"),
read_fixture("test_body_block_content_render", "content_15_expected.py"),
),
# Example of boxed-text with content inside its caption tag
(
read_fixture("test_body_block_content_render", "content_16.xml"),
read_fixture("test_body_block_content_render", "content_16_expected.py"),
),
# code block, based on elife 20352 v2, contains new lines too
(
read_fixture("test_body_block_content_render", "content_17.xml"),
read_fixture("test_body_block_content_render", "content_17_expected.py"),
),
# Example of monospace tags
(
read_fixture("test_body_block_content_render", "content_18.xml"),
read_fixture("test_body_block_content_render", "content_18_expected.py"),
),
# example of a table to not pickup a child element title, based on 22264 v2
(
read_fixture("test_body_block_content_render", "content_19.xml"),
read_fixture("test_body_block_content_render", "content_19_expected.py"),
),
# example table: a label, no title, no caption
(
read_fixture("test_body_block_content_render", "content_20.xml"),
read_fixture("test_body_block_content_render", "content_20_expected.py"),
),
# example table: a label, a title, no caption
(
read_fixture("test_body_block_content_render", "content_21.xml"),
read_fixture("test_body_block_content_render", "content_21_expected.py"),
),
# example table: a label, no title, a caption
(
read_fixture("test_body_block_content_render", "content_22.xml"),
read_fixture("test_body_block_content_render", "content_22_expected.py"),
),
# example table: a label, a title, and a caption
(
read_fixture("test_body_block_content_render", "content_23.xml"),
read_fixture("test_body_block_content_render", "content_23_expected.py"),
),
# example table: no label, no title, and a caption
(
read_fixture("test_body_block_content_render", "content_24.xml"),
read_fixture("test_body_block_content_render", "content_24_expected.py"),
),
# example fig with a caption and no title, based on 00281 v1
(
read_fixture("test_body_block_content_render", "content_25.xml"),
read_fixture("test_body_block_content_render", "content_25_expected.py"),
),
# example media with a label and no title, based on 00007 v1
(
read_fixture("test_body_block_content_render", "content_26.xml"),
read_fixture("test_body_block_content_render", "content_26_expected.py"),
),
# example test from 02935 v2 of list within a list to not add child list-item to the parent list twice
(
read_fixture("test_body_block_content_render", "content_27.xml"),
read_fixture("test_body_block_content_render", "content_27_expected.py"),
),
# example list from 00666 kitchen sink with paragraphs and list inside a list-item
(
read_fixture("test_body_block_content_render", "content_28.xml"),
read_fixture("test_body_block_content_render", "content_28_expected.py"),
),
# example of a video inside a fig group based on 00666 kitchen sink
(
read_fixture("test_body_block_content_render", "content_29.xml"),
read_fixture("test_body_block_content_render", "content_29_expected.py"),
),
# example of a video as supplementary material inside a fig-group based on 06726 v2
(
read_fixture("test_body_block_content_render", "content_30.xml"),
read_fixture("test_body_block_content_render", "content_30_expected.py"),
),
# example of fig supplementary-material with only a title tag, based on elife-26759-v1.xml except
# this example is fixed so the caption tag wraps the entire caption, and values are abbreviated
# to test how the punctuation is stripped from the end of the supplementary-material title value
# when it is converted to a label
(
read_fixture("test_body_block_content_render", "content_31.xml"),
read_fixture("test_body_block_content_render", "content_31_expected.py"),
),
# example of disp-formula inside a disp-quote based on 55588
(
read_fixture("test_body_block_content_render", "content_32.xml"),
read_fixture("test_body_block_content_render", "content_32_expected.py"),
),
)
def test_body_block_content_render(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.body_block_content_render(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_fixture("test_render_raw_body", "content_01.xml"),
read_fixture("test_render_raw_body", "content_01_expected.py"),
),
(
read_fixture("test_render_raw_body", "content_02.xml"),
read_fixture("test_render_raw_body", "content_02_expected.py"),
),
(
read_fixture("test_render_raw_body", "content_03.xml"),
read_fixture("test_render_raw_body", "content_03_expected.py"),
),
# Below when there is a space between paragraph tags, it should not render as a paragraph
(
read_fixture("test_render_raw_body", "content_04.xml"),
read_fixture("test_render_raw_body", "content_04_expected.py"),
),
(
read_fixture("test_render_raw_body", "content_05.xml"),
read_fixture("test_render_raw_body", "content_05_expected.py"),
),
(
read_fixture("test_render_raw_body", "content_06.xml"),
read_fixture("test_render_raw_body", "content_06_expected.py"),
),
(
read_fixture("test_render_raw_body", "content_07.xml"),
read_fixture("test_render_raw_body", "content_07_expected.py"),
),
(
read_fixture("test_render_raw_body", "content_08.xml"),
read_fixture("test_render_raw_body", "content_08_expected.py"),
),
(
read_fixture("test_render_raw_body", "content_09.xml"),
read_fixture("test_render_raw_body", "content_09_expected.py"),
),
# excerpt from 00646 v1 with a boxed-text inline-graphic
(
read_fixture("test_render_raw_body", "content_10.xml"),
read_fixture("test_render_raw_body", "content_10_expected.py"),
),
)
def test_render_raw_body(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.render_raw_body(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_fixture("test_abstract_json", "content_01.xml"),
read_fixture("test_abstract_json", "content_01_expected.py"),
),
# executive-summary will return None
(
read_fixture("test_abstract_json", "content_02.xml"),
read_fixture("test_abstract_json", "content_02_expected.py"),
),
# test lots of inline tagging
(
read_fixture("test_abstract_json", "content_03.xml"),
read_fixture("test_abstract_json", "content_03_expected.py"),
),
# structured abstract example based on BMJ Open bmjopen-4-e003269.xml
(
read_fixture("test_abstract_json", "content_04.xml"),
read_fixture("test_abstract_json", "content_04_expected.py"),
),
# structured abstract elife example
(
read_fixture("test_abstract_json", "content_05.xml"),
read_fixture("test_abstract_json", "content_05_expected.py"),
),
)
def test_abstract_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.abstract_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_fixture("test_digest_json", "content_01.xml"),
read_fixture("test_digest_json", "content_01_expected.py"),
),
)
def test_digest_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.digest_json(soup_body(soup))
self.assertEqual(expected, tag_content)
"""
Unit test small or special cases
"""
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(read_fixture("", "article_dates.xml"), "pub", ("28", "02", "2014")),
)
def test_ymd(self, xml_content, test_date_type, expected):
soup = parser.parse_xml(xml_content)
date_tag = raw_parser.pub_date(soup, date_type=test_date_type)[0]
self.assertEqual(expected, parser.ymd(date_tag))
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(read_fixture("", "article_dates.xml"), "received", date_struct(2012, 6, 22)),
(read_fixture("", "article_dates.xml"), None, None),
(read_fixture("", "article_dates.xml"), "not_a_date_type", None),
)
def test_history_date(self, xml_content, date_type, expected):
soup = parser.parse_xml(xml_content)
self.assertEqual(expected, parser.history_date(soup, date_type))
"""
Functions that require more than one argument to test against json output
"""
@unpack
@data(
# typical eLife format
(
read_fixture("test_journal_issn", "content_01.xml"),
"electronic",
None,
"2050-084X",
),
# eLife format with specifying the publication format
(read_fixture("test_journal_issn", "content_02.xml"), None, None, "2050-084X"),
# a non-eLife format
(
read_fixture("test_journal_issn", "content_03.xml"),
None,
"epub",
"2057-4991",
),
)
def test_journal_issn(self, xml_content, pub_format, pub_type, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.journal_issn(
soup_body(soup), pub_format=pub_format, pub_type=pub_type
)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
None,
),
(
read_fixture("test_author_contributions", "content_01.xml"),
read_fixture("test_author_contributions", "content_01_expected.py"),
),
)
def test_author_contributions(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.author_contributions(soup, "con")
self.assertEqual(expected, tag_content)
@unpack
@data(
(
# snippet from elife-kitchen-sink.xml
read_fixture("test_competing_interests", "content_01.xml"),
read_fixture("test_competing_interests", "content_01_expected.py"),
),
(
# snippet from elife00190.xml
read_fixture("test_competing_interests", "content_02.xml"),
read_fixture("test_competing_interests", "content_02_expected.py"),
),
(
# snippet from elife-00666.xml
read_fixture("test_competing_interests", "content_03.xml"),
read_fixture("test_competing_interests", "content_03_expected.py"),
),
)
def test_competing_interests(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.competing_interests(soup, ["conflict", "COI-statement"])
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no author notes
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_author_notes.xml"),
read_fixture("test_full_author_notes", "content_01_expected.py"),
),
)
def test_full_author_notes(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_author_notes(soup)
self.assertEqual(expected, tag_content)
"""
Functions that only need soup to test them against json output
"""
@unpack
@data(
# example with no abstracts, such as a correction article
(
"<article/>",
[],
),
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_abstracts", "content_01_expected.py"),
),
# example from elife00013.xml
(
read_sample_xml("elife00013.xml"),
read_fixture("test_abstracts", "content_02_expected.py"),
),
# example from elife_poa_e06828.xml
(
read_sample_xml("elife_poa_e06828.xml"),
read_fixture("test_abstracts", "content_03_expected.py"),
),
)
def test_abstracts(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.abstracts(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example based on eLife format
(
read_fixture("test_abstract", "content_01.xml"),
read_fixture("test_abstract", "content_01_expected.py"),
),
# example based on BMJ Open bmjopen-4-e003269.xml
(
read_fixture("test_abstract", "content_02.xml"),
read_fixture("test_abstract", "content_02_expected.py"),
),
# example with no abstract, such as a correction article
(
"<article/>",
None,
),
)
def test_abstract_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.abstract(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
# very basic abstract
read_fixture("test_abstract_xml", "content_01.xml"),
read_fixture("test_abstract_xml", "content_01_expected.py"),
),
(
# abstract tag with id attribute and mathml tags
read_fixture("test_abstract_xml", "content_02.xml"),
read_fixture("test_abstract_xml", "content_02_expected.py"),
),
(
# structured abstract example
read_fixture("test_abstract_xml", "content_03.xml"),
read_fixture("test_abstract_xml", "content_03_expected.py"),
),
(
# no abstract tag
read_fixture("test_abstract_xml", "content_04.xml"),
read_fixture("test_abstract_xml", "content_04_expected.py"),
),
)
def test_abstract_xml(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.abstract_xml(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
"July 18, 2012",
),
)
@data("elife-kitchen-sink.xml")
def test_accepted_date_date(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.accepted_date_date(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
18,
),
)
@data("elife-kitchen-sink.xml")
def test_accepted_date_day(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.accepted_date_day(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
7,
),
)
@data("elife-kitchen-sink.xml")
def test_accepted_date_month(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.accepted_date_month(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
1342569600,
),
)
@data("elife-kitchen-sink.xml")
def test_accepted_date_timestamp(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.accepted_date_timestamp(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
2012,
),
)
@data("elife-kitchen-sink.xml")
def test_accepted_date_year(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.accepted_date_year(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no data
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
"""Acknowledgements
We thank Michael Fischbach, Richard Losick, and Russell Vance for critical reading of
the manuscript. NK is a Fellow in the Integrated Microbial Biodiversity Program of
the Canadian Institute for Advanced Research.""",
),
)
def test_ack(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.ack(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no data
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
"""Acknowledgements
We thank Michael Fischbach, Richard Losick, and Russell Vance for critical reading of
the manuscript. NK is a Fellow in the Integrated Microbial Biodiversity Program of
the Canadian Institute for Advanced Research.""",
),
)
def test_acknowledgements(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.acknowledgements(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
(
'<article xmlns:mml="http://www.w3.org/1998/Math/MathML" '
'xmlns:xlink="http://www.w3.org/1999/xlink" '
'article-type="research-article" dtd-version="1.1d3">'
),
"research-article",
),
)
def test_article_type(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.article_type(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no author notes
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_author_notes.xml"),
[
"\n†\nThese authors contributed equally to this work\n",
"\n‡\nThese authors also contributed equally to this work\n",
"\n**\nDeceased\n",
],
),
)
def test_author_notes(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.author_notes(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_authors", "content_01_expected.py"),
),
(
sample_xml("elife00013.xml"),
read_fixture("test_authors", "content_02_expected.py"),
),
(
sample_xml("elife_poa_e06828.xml"),
read_fixture("test_authors", "content_03_expected.py"),
),
(
sample_xml("elife02935.xml"),
read_fixture("test_authors", "content_04_expected.py"),
),
(
sample_xml("elife00270.xml"),
read_fixture("test_authors", "content_05_expected.py"),
),
(
sample_xml("elife00351.xml"),
read_fixture("test_authors", "content_06_expected.py"),
),
(
sample_xml("elife-00666.xml"),
read_fixture("test_authors", "content_07_expected.py"),
),
)
def test_authors(self, filename, expected):
soup = parser.parse_document(filename)
tag_content = parser.authors(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_authors_non_byline", "content_01_expected.py"),
),
(
read_sample_xml("elife-00666.xml"),
read_fixture("test_authors_non_byline", "content_02_expected.py"),
),
)
def test_authors_non_byline(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.authors_non_byline(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# 07383 v1 has a institution in the principal award recipient
(
read_fixture("test_award_groups", "content_01.xml"),
read_fixture("test_award_groups", "content_01_expected.py"),
),
# example from elife-kitchen-sink.xml
(
read_fixture("test_award_groups", "content_02.xml"),
read_fixture("test_award_groups", "content_02_expected.py"),
),
# example from elife-09215-v1.xml
(
read_fixture("test_award_groups", "content_03.xml"),
read_fixture("test_award_groups", "content_03_expected.py"),
),
# example from elife00013.xml
(
read_fixture("test_award_groups", "content_04.xml"),
read_fixture("test_award_groups", "content_04_expected.py"),
),
# example from elife-00666.xml
(
read_fixture("test_award_groups", "content_05.xml"),
read_fixture("test_award_groups", "content_05_expected.py"),
),
)
def test_award_groups(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.award_groups(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_fixture("", "article_meta.xml"),
["Cell biology", "Computational and systems biology"],
),
)
def test_category(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.category(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
2014,
),
# poa XML has no collection date
(
"<article/>",
None,
),
)
def test_collection_year(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.collection_year(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
("<root></root>", None),
(
"""
<root>
<pub-date pub-type="collection">
<year>2016</year>
</pub-date>
</root>""",
2016,
),
(
"""
<root>
<pub-date date-type="collection">
<year>2016</year>
</pub-date>
</root>""",
2016,
),
)
def test_collection_year_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.collection_year(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_component_doi", "content_01_expected.py"),
),
)
def test_component_doi(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.component_doi(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_components", "content_01_expected.py"),
),
(
sample_xml("elife02304.xml"),
read_fixture("test_components", "content_02_expected.py"),
),
(
sample_xml("elife05502.xml"),
read_fixture("test_components", "content_03_expected.py"),
),
(
sample_xml("elife04490.xml"),
read_fixture("test_components", "content_04_expected.py"),
),
(
sample_xml("elife-14093-v1.xml"),
read_fixture("test_components", "content_05_expected.py"),
),
(
sample_xml("elife-00666.xml"),
read_fixture("test_components", "content_06_expected.py"),
),
)
def test_components(self, filename, expected):
soup = parser.parse_document(filename)
tag_content = parser.components(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_fixture("test_conflict", "content_01.xml"),
read_fixture("test_conflict", "content_01_expected.py"),
),
)
def test_conflict(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.conflict(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_contributors", "content_01_expected.py"),
),
# example from elife-02833-v2.xml
(
read_sample_xml("elife-02833-v2.xml"),
read_fixture("test_contributors", "content_02_expected.py"),
),
# example from elife-00666.xml
(
read_sample_xml("elife-00666.xml"),
read_fixture("test_contributors", "content_03_expected.py"),
),
)
def test_contributors(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.contributors(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no permissions tag
("<root><article></article></root>", None),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_permissions.xml"),
"Alegado et al",
),
)
@unpack
def test_copyright_holder(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.copyright_holder(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no permissions tag
("<root><article></article></root>", None),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_permissions.xml"),
"Alegado et al.",
),
# example from elife00240.xml
(
read_fixture("test_copyright_holder_json", "content_01.xml"),
"Pickett",
),
# example from elife09853.xml
(
read_fixture("test_copyright_holder_json", "content_02.xml"),
"Becker and Gitler",
),
# example from elife02935.xml which is CC0 license
(
read_fixture("test_copyright_holder_json", "content_03.xml"),
None,
),
)
@unpack
def test_copyright_holder_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.copyright_holder_json(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no permissions tag
("<root><article></article></root>", None),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_permissions.xml"),
"© 2012, Alegado et al",
),
)
@unpack
def test_copyright_statement(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.copyright_statement(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no permissions tag
("<root><article></article></root>", None),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_permissions.xml"),
2012,
),
)
@unpack
def test_copyright_year_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.copyright_year(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no author notes
(
"<article/>",
[],
),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_author_notes.xml"),
[
"*For correspondence: jon_clardy@hms.harvard.edu(JC);",
"nking@berkeley.edu(NK);",
"mharrison@elifesciences.org(MH)",
],
),
)
def test_correspondence(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.correspondence(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no abstracts, such as a correction article
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_digest", "content_01_expected.py"),
),
# example from elife_poa_e06828.xml
(read_sample_xml("elife_poa_e06828.xml"), None),
)
def test_digest(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.digest(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_fixture("", "article_meta.xml"),
["Research article"],
),
)
def test_display_channel(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.display_channel(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(read_fixture("", "article_meta.xml"), "10.7554/eLife.00013"),
)
@data("elife-kitchen-sink.xml")
def test_doi(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.doi(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(read_fixture("", "article_meta.xml"), "e00013"),
)
@data("elife-kitchen-sink.xml")
def test_elocation_id(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.elocation_id(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no abstracts, such as a correction article
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_full_abstract", "content_01_expected.py"),
),
# example from elife00013.xml
(
read_sample_xml("elife00013.xml"),
read_fixture("test_full_abstract", "content_02_expected.py"),
),
# example from elife_poa_e06828.xml
(
read_sample_xml("elife_poa_e06828.xml"),
read_fixture("test_full_abstract", "content_03_expected.py"),
),
)
def test_full_abstract(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_abstract(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no affs
(
"<article/>",
[],
),
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_full_affiliation", "content_01_expected.py"),
),
)
def test_full_affiliation(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_affiliation(soup)
self.assertEqual(expected, tag_content)
@data(
# elife-kitchen-sink.xml example
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture(
"test_full_award_group_funding_source", "content_01_expected.py"
),
),
)
@unpack
def test_full_award_group_funding_source(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_award_group_funding_source(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no id attribute on award-group tag, and id will be generated, based on 10.1098/rsob.150230
(
read_fixture("test_full_award_groups", "content_01.xml"),
read_fixture("test_full_award_groups", "content_01_expected.py"),
),
# elife-kitchen-sink.xml example
(
read_fixture("test_full_award_groups", "content_02.xml"),
read_fixture("test_full_award_groups", "content_02_expected.py"),
),
)
@unpack
def test_full_award_groups(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_award_groups(soup_body(soup))
self.assertEqual(expected, tag_content)
@data(
# edge case, no id attribute on corresp tag, will be empty but not cause an error, based on 10.2196/resprot.3838
(
"<article><author-notes><corresp>Corresponding Author: Elisa J Gordon<email>eg@example.org</email></corresp></author-notes></article>",
{},
),
# example with no author notes
(
"<article/>",
{},
),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_author_notes.xml"),
{
"cor1": ["jon_clardy@hms.harvard.edu"],
"cor2": ["nking@berkeley.edu"],
"cor3": ["mharrison@elifesciences.org"],
},
),
# example elife-02833-v2.xml
(
read_sample_xml("elife-02833-v2.xml"),
{
"cor1": ["kingston@molbio.mgh.harvard.edu"],
"cor2": ["(+1) 617-432-1906"],
},
),
)
@unpack
def test_full_correspondence(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_correspondence(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no abstracts, such as a correction article
(
"<article/>",
None,
),
# example from elife-kitchen-sink.xml
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_full_digest", "content_01_expected.py"),
),
# example from elife_poa_e06828.xml
(read_sample_xml("elife_poa_e06828.xml"), None),
)
def test_full_digest(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_digest(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("test_full_funding_statement", "content_01.xml"),
read_fixture("test_full_funding_statement", "content_01_expected.py"),
),
)
def test_full_funding_statement(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_funding_statement(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
{},
),
(
read_fixture("", "article_meta.xml"),
{
"author-keywords": [
"<italic>Salpingoeca rosetta</italic>",
"Algoriphagus",
"bacterial sulfonolipid",
"multicellular development",
],
"research-organism": ["Mouse", "<italic>C. elegans</italic>", "Other"],
},
),
(
read_sample_xml("elife_poa_e06828.xml"),
{
"author-keywords": [
"neurotrophins",
"RET signaling",
"DRG neuron development",
"cis and trans activation",
],
"research-organism": ["Mouse"],
},
),
)
def test_full_keyword_groups(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_keyword_groups(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_fixture("", "article_meta.xml"),
[
"<italic>Salpingoeca rosetta</italic>",
"Algoriphagus",
"bacterial sulfonolipid",
"multicellular development",
],
),
(
read_sample_xml("elife_poa_e06828.xml"),
[
"neurotrophins",
"RET signaling",
"DRG neuron development",
"cis and trans activation",
],
),
)
def test_full_keywords(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_keywords(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no permissions tag
("<root><article></article></root>", None),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_permissions.xml"),
(
"This article is distributed under the terms of the "
'<ext-link ext-link-type="uri" '
'xlink:href="http://creativecommons.org/licenses/by/4.0/">'
"Creative Commons Attribution License</ext-link>, which permits "
"unrestricted use and redistribution provided that the original "
"author and source are credited."
),
),
)
@unpack
def test_full_license(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_license(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_fixture("", "article_meta.xml"),
["Mouse", "<italic>C. elegans</italic>", "Other"],
),
)
def test_full_research_organism(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_research_organism(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
{},
),
(
read_fixture("", "article_meta.xml"),
{
"display-channel": ["Research article"],
"heading": ["Cell biology", "Computational and systems biology"],
},
),
)
def test_full_subject_area(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_subject_area(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
(
"Bacterial regulation of colony development in the closest "
"living relatives of animals"
),
),
(
read_sample_xml("elife_poa_e06828.xml"),
(
"<italic>Cis</italic> and <italic>trans</italic> RET signaling control the "
"survival and central projection growth of rapidly adapting mechanoreceptors"
),
),
)
@data("elife-kitchen-sink.xml")
def test_full_title(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.full_title(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("test_funding_statement", "content_01.xml"),
read_fixture("test_funding_statement", "content_01_expected.py"),
),
)
def test_funding_statement(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.funding_statement(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_graphics", "content_01_expected.py"),
),
(
sample_xml("elife00013.xml"),
read_fixture("test_graphics", "content_02_expected.py"),
),
(
sample_xml("elife00240.xml"),
read_fixture("test_graphics", "content_03_expected.py"),
),
(
sample_xml("elife04953.xml"),
read_fixture("test_graphics", "content_04_expected.py"),
),
(
sample_xml("elife00133.xml"),
read_fixture("test_graphics", "content_05_expected.py"),
),
)
def test_graphics(self, filename, expected):
soup = parser.parse_document(filename)
tag_content = parser.graphics(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
"",
),
(
read_sample_xml("elife-kitchen-sink.xml"),
"The chemical nature of RIF-1 may reveal a new class of bacterial signaling molecules.",
),
(
read_sample_xml("elife_poa_e06828.xml"),
"",
),
)
def test_impact_statement(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.impact_statement(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_inline_graphics", "content_01_expected.py"),
),
(
sample_xml("elife00240.xml"),
read_fixture("test_inline_graphics", "content_02_expected.py"),
),
)
def test_inline_graphics(self, filename, expected):
soup = parser.parse_document(filename)
tag_content = parser.inline_graphics(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
True,
),
(
read_fixture("", "article_meta.xml"),
False,
),
(
read_sample_xml("elife_poa_e06828.xml"),
True,
),
)
def test_is_poa(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.is_poa(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
"eLife",
),
)
def test_journal_id(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.journal_id(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
"eLife",
),
)
def test_journal_title(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.journal_title(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_fixture("", "article_meta.xml"),
[
"Salpingoeca rosetta",
"Algoriphagus",
"bacterial sulfonolipid",
"multicellular development",
],
),
(
read_sample_xml("elife_poa_e06828.xml"),
[
"neurotrophins",
"RET signaling",
"DRG neuron development",
"cis and trans activation",
],
),
)
def test_keywords(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.keywords(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no permissions tag
("<root><article></article></root>", None),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_permissions.xml"),
(
"This article is distributed under the terms of the "
"Creative Commons Attribution License, which permits "
"unrestricted use and redistribution provided that "
"the original author and source are credited."
),
),
)
@unpack
def test_license(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.license(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# example license from 00666
(
read_fixture("test_license_json", "content_01.xml"),
read_fixture("test_license_json", "content_01_expected.py"),
),
# edge case, no permissions tag
("<root><article></article></root>", None),
)
def test_license_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.license_json(soup)
self.assertEqual(expected, tag_content)
@data(
# edge case, no permissions tag
("<root><article></article></root>", None),
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_permissions.xml"),
("http://creativecommons.org/licenses/by/4.0/"),
),
)
@unpack
def test_license_url(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.license_url(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
(
sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_media", "content_01_expected.py"),
),
(
sample_xml("elife02304.xml"),
read_fixture("test_media", "content_02_expected.py"),
),
(
sample_xml("elife00007.xml"),
read_fixture("test_media", "content_03_expected.py"),
),
(
sample_xml("elife04953.xml"),
read_fixture("test_media", "content_04_expected.py"),
),
(
sample_xml("elife00005.xml"),
read_fixture("test_media", "content_05_expected.py"),
),
(
sample_xml("elife05031.xml"),
read_fixture("test_media", "content_06_expected.py"),
),
(
sample_xml("elife04493.xml"),
read_fixture("test_media", "content_07_expected.py"),
),
(
sample_xml("elife06726.xml"),
read_fixture("test_media", "content_08_expected.py"),
),
)
def test_media(self, filename, expected):
soup = parser.parse_document(filename)
tag_content = parser.media(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# pub-date values from 00666 kitchen sink
(
read_fixture("test_pub_dates", "content_01.xml"),
read_fixture("test_pub_dates", "content_01_expected.py"),
),
# example from cstp77
(
read_fixture("test_pub_dates", "content_02.xml"),
read_fixture("test_pub_dates", "content_02_expected.py"),
),
# example from bmjopen-2013-003269
(
read_fixture("test_pub_dates", "content_03.xml"),
read_fixture("test_pub_dates", "content_03_expected.py"),
),
)
def test_pub_dates_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.pub_dates(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
1393545600,
),
# poa XML before pub-date is added
(
"<article/>",
None,
),
)
def test_pub_date_timestamp(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.pub_date_timestamp(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
"February 28, 2014",
),
# poa XML before pub-date is added
(
"<article/>",
None,
),
)
def test_pub_date_date(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.pub_date_date(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
28,
),
# poa XML before pub-date is added
(
"<article/>",
None,
),
)
def test_pub_date_day(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.pub_date_day(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
2,
),
# poa XML before pub-date is added
(
"<article/>",
None,
),
)
def test_pub_date_month(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.pub_date_month(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
2014,
),
# poa XML before pub-date is added
(
"<article/>",
None,
),
)
def test_pub_date_year(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.pub_date_year(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
"eLife Sciences Publications, Ltd",
),
)
def test_publisher(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.publisher(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
"00013",
),
)
def test_publisher_id(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.publisher_id(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
"June 22, 2012",
),
)
@data("elife-kitchen-sink.xml")
def test_received_date_date(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.received_date_date(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
22,
),
)
@data("elife-kitchen-sink.xml")
def test_received_date_day(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.received_date_day(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
6,
),
)
@data("elife-kitchen-sink.xml")
def test_received_date_month(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.received_date_month(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
1340323200,
),
)
@data("elife-kitchen-sink.xml")
def test_received_date_timestamp(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.received_date_timestamp(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# snippet of XML from elife-kitchen-sink.xml
(
read_fixture("", "article_dates.xml"),
2012,
),
)
@data("elife-kitchen-sink.xml")
def test_received_date_year(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.received_date_year(soup)
self.assertEqual(expected, tag_content)
def test_references(self):
# Alias of refs
soup = parser.parse_xml("<article/>")
self.assertEqual(parser.references(soup), [])
@unpack
@data(
# non-elife example with issue tag from cstp77
(
read_fixture("test_refs", "content_01.xml"),
read_fixture("test_refs", "content_01_expected.py"),
),
# mixed-citation example 1 from bmjopen
(
read_fixture("test_refs", "content_02.xml"),
read_fixture("test_refs", "content_02_expected.py"),
),
# mixed-citation example 2 from bmjopen
(
read_fixture("test_refs", "content_03.xml"),
read_fixture("test_refs", "content_03_expected.py"),
),
# mixed-citation example 3 from bmjopen
(
read_fixture("test_refs", "content_04.xml"),
read_fixture("test_refs", "content_04_expected.py"),
),
# citation example from redalyc - udea
(
read_fixture("test_refs", "content_05.xml"),
read_fixture("test_refs", "content_05_expected.py"),
),
# example of data citation with a pub-id accession, based on article 07836
(
read_fixture("test_refs", "content_06.xml"),
read_fixture("test_refs", "content_06_expected.py"),
),
# example of data citation with a object-id tag accession, based on article 07048
(
read_fixture("test_refs", "content_07.xml"),
read_fixture("test_refs", "content_07_expected.py"),
),
# example of mixed-citation with string-name, based on non-elife article
(
read_fixture("test_refs", "content_08.xml"),
read_fixture("test_refs", "content_08_expected.py"),
),
# example of data citation with a pub-id pub-id-type="archive", parse it as an accession number, based on 00666 kitchen sink example
(
read_fixture("test_refs", "content_09.xml"),
read_fixture("test_refs", "content_09_expected.py"),
),
# example of citation with a pub-id pub-id-type="pmid", from elife-kitchen-sink.xml
(
read_fixture("test_refs", "content_10.xml"),
read_fixture("test_refs", "content_10_expected.py"),
),
# example of person-group with a collab, from elife-kitchen-sink.xml
(
read_fixture("test_refs", "content_11.xml"),
read_fixture("test_refs", "content_11_expected.py"),
),
)
def test_refs_edge_cases(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.refs(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
[
{
"ext_link_type": "doi",
"related_article_type": "commentary",
"xlink_href": None,
}
],
),
)
def test_related_article(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.related_article(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_fixture("test_sub_articles", "content_01.xml"),
read_fixture("test_sub_articles", "content_01_expected.py"),
),
# editor evaluation sub-article parsing
(
read_fixture("test_sub_articles", "content_02.xml"),
read_fixture("test_sub_articles", "content_02_expected.py"),
),
)
def test_sub_articles(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.sub_articles(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_sample_xml("elife-kitchen-sink.xml"),
{"dataro1": {}, "dataro2": {}, "dataro3": {}},
),
(
read_sample_xml("elife_poa_e06828.xml"),
{},
),
)
def test_related_object_ids(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.related_object_ids(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_fixture("", "article_meta.xml"),
["Mouse", "C. elegans", "Other"],
),
)
def test_research_organism(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.research_organism(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
[{"content-type": "pdf", "type": "self-uri", "position": 1, "ordinal": 1}],
),
(
read_sample_xml("elife_poa_e06828.xml"),
[],
),
)
def test_self_uri(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.self_uri(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"<article/>",
[],
),
(
read_fixture("", "article_meta.xml"),
["Research article", "Cell biology", "Computational and systems biology"],
),
)
def test_subject_area(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.subject_area(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no data
(
"<article/>",
[],
),
(
read_sample_xml("elife-kitchen-sink.xml"),
read_fixture("test_supplementary_material", "content_01_expected.py"),
),
(
read_sample_xml("elife02304.xml"),
read_fixture("test_supplementary_material", "content_02_expected.py"),
),
)
def test_supplementary_material(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.supplementary_material(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
(
"Bacterial regulation of colony development in the closest "
"living relatives of animals"
),
),
)
@data("elife-kitchen-sink.xml")
def test_title(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.title(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example with no title prefix
(
read_fixture("test_title_prefix", "content_01.xml"),
read_fixture("test_title_prefix", "content_01_expected.py"),
),
# example from elife00240.xml
(
read_fixture("test_title_prefix", "content_02.xml"),
read_fixture("test_title_prefix", "content_02_expected.py"),
),
# example from elife00270.xml
(
read_fixture("test_title_prefix", "content_03.xml"),
read_fixture("test_title_prefix", "content_03_expected.py"),
),
# example from elife00351.xml
(
read_fixture("test_title_prefix", "content_04.xml"),
read_fixture("test_title_prefix", "content_04_expected.py"),
),
)
def test_title_prefix(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.title_prefix(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
('<root xmlns:xlink="http://www.w3.org/1999/xlink"></root>', None),
(read_fixture("test_title_prefix_json", "content_01.xml"), u"Breast Cancer"),
(
read_fixture("test_title_prefix_json", "content_02.xml"),
u"The Natural History of Model Organisms",
),
(
read_fixture("test_title_prefix_json", "content_03.xml"),
u"p53 Family Proteins",
),
(read_fixture("test_title_prefix_json", "content_04.xml"), u"TOR Signaling"),
# example from elife-27438-v1.xml has no sub-display-channel and title_prefix is None
(read_fixture("test_title_prefix_json", "content_05.xml"), None),
# example from elife-27438-v2.xml which does have a title_prefix and it rewritten
(read_fixture("test_title_prefix_json", "content_06.xml"), "Point of View"),
)
def test_title_prefix_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.title_prefix_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(read_fixture("", "article_meta.xml"), "Bacterial regulation"),
)
@data("elife-kitchen-sink.xml")
def test_title_short(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.title_short(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
"bacterial-regulation-of-colony-development-in-the-closest-living-relatives-of-animals",
),
)
@data("elife-kitchen-sink.xml")
def test_title_slug(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.title_slug(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example from elife-kitchen-sink.xml
(
read_fixture("", "article_meta.xml"),
"3",
),
(
read_sample_xml("elife_poa_e06828.xml"),
None,
),
)
def test_volume(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.volume(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example issue from a non-eLife article
(read_fixture("test_issue", "content_01.xml"), "1"),
# example of no article issue
(read_fixture("test_issue", "content_02.xml"), None),
)
def test_issue(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.issue(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example fpage from a non-eLife article
(read_fixture("test_fpage", "content_01.xml"), "1"),
# example of no article fpage
(read_fixture("test_fpage", "content_02.xml"), None),
)
def test_fpage(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.fpage(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
# example lpage from a non-eLife article
(read_fixture("test_lpage", "content_01.xml"), "2"),
# example of no article lpage
(read_fixture("test_lpage", "content_02.xml"), None),
)
def test_lpage(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.lpage(soup)
self.assertEqual(expected, tag_content)
def test_parse_mixed_citations(self):
data = parser.mixed_citations(self.soup("elife-kitchen-sink.xml"))
expected = read_fixture("test_parse_mixed_citations", "content_01_expected.py")
self.assertEqual(expected, data)
@unpack
@data(
# example with no history
(
read_fixture("test_version_history", "content_01.xml"),
read_fixture("test_version_history", "content_01_expected.py"),
),
# example based on 00666 kitchen sink
(
read_fixture("test_version_history", "content_02.xml"),
read_fixture("test_version_history", "content_02_expected.py"),
),
)
def test_version_history(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.version_history(soup)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
read_fixture("test_clinical_trials", "content_01.xml"),
read_fixture("test_clinical_trials", "content_01_expected.py"),
),
# eLife example
(
read_fixture("test_clinical_trials", "content_02.xml"),
read_fixture("test_clinical_trials", "content_02_expected.py"),
),
# example with all tag attributes and a related-object tag to ignore
(
read_fixture("test_clinical_trials", "content_03.xml"),
read_fixture("test_clinical_trials", "content_03_expected.py"),
),
)
def test_clinical_trials(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.clinical_trials(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
("", []),
(
read_fixture("test_pub_history", "content_01.xml"),
read_fixture("test_pub_history", "content_01_expected.py"),
),
(
read_fixture("test_pub_history", "content_02.xml"),
read_fixture("test_pub_history", "content_02_expected.py"),
),
(
read_fixture("test_pub_history", "content_03.xml"),
read_fixture("test_pub_history", "content_03_expected.py"),
),
)
def test_pub_history(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.pub_history(soup)
self.assertEqual(expected, tag_content)
if __name__ == "__main__":
unittest.main()
|
elifesciences/elife-tools
|
tests/test_parse_jats.py
|
Python
|
mit
| 132,828
|