repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
FilipeMaia/arrayfire-python
|
arrayfire/array.py
|
1
|
30063
|
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
arrayfire.Array class and helper functions.
"""
import inspect
from .library import *
from .util import *
from .util import _is_number
from .bcast import _bcast_var
from .base import *
from .index import *
from .index import _Index4
def _create_array(buf, numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_array(ct.pointer(out_arr), ct.c_void_p(buf),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def _create_empty_array(numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_handle(ct.pointer(out_arr),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def constant_array(val, d0, d1=None, d2=None, d3=None, dtype=Dtype.f32):
"""
Internal function to create a C array. Should not be used externall.
"""
if not isinstance(dtype, ct.c_int):
if isinstance(dtype, int):
dtype = ct.c_int(dtype)
elif isinstance(dtype, Dtype):
dtype = ct.c_int(dtype.value)
else:
raise TypeError("Invalid dtype")
out = ct.c_void_p(0)
dims = dim4(d0, d1, d2, d3)
if isinstance(val, complex):
c_real = ct.c_double(val.real)
c_imag = ct.c_double(val.imag)
if (dtype.value != Dtype.c32.value and dtype.value != Dtype.c64.value):
dtype = Dtype.c32.value
safe_call(backend.get().af_constant_complex(ct.pointer(out), c_real, c_imag,
4, ct.pointer(dims), dtype))
elif dtype.value == Dtype.s64.value:
c_val = ct.c_longlong(val.real)
safe_call(backend.get().af_constant_long(ct.pointer(out), c_val, 4, ct.pointer(dims)))
elif dtype.value == Dtype.u64.value:
c_val = ct.c_ulonglong(val.real)
safe_call(backend.get().af_constant_ulong(ct.pointer(out), c_val, 4, ct.pointer(dims)))
else:
c_val = ct.c_double(val)
safe_call(backend.get().af_constant(ct.pointer(out), c_val, 4, ct.pointer(dims), dtype))
return out
def _binary_func(lhs, rhs, c_func):
out = Array()
other = rhs
if (_is_number(rhs)):
ldims = dim4_to_tuple(lhs.dims())
rty = implicit_dtype(rhs, lhs.type())
other = Array()
other.arr = constant_array(rhs, ldims[0], ldims[1], ldims[2], ldims[3], rty.value)
elif not isinstance(rhs, Array):
raise TypeError("Invalid parameter to binary function")
safe_call(c_func(ct.pointer(out.arr), lhs.arr, other.arr, _bcast_var.get()))
return out
def _binary_funcr(lhs, rhs, c_func):
out = Array()
other = lhs
if (_is_number(lhs)):
rdims = dim4_to_tuple(rhs.dims())
lty = implicit_dtype(lhs, rhs.type())
other = Array()
other.arr = constant_array(lhs, rdims[0], rdims[1], rdims[2], rdims[3], lty.value)
elif not isinstance(lhs, Array):
raise TypeError("Invalid parameter to binary function")
c_func(ct.pointer(out.arr), other.arr, rhs.arr, _bcast_var.get())
return out
def _ctype_to_lists(ctype_arr, dim, shape, offset=0):
if (dim == 0):
return list(ctype_arr[offset : offset + shape[0]])
else:
dim_len = shape[dim]
res = [[]] * dim_len
for n in range(dim_len):
res[n] = _ctype_to_lists(ctype_arr, dim - 1, shape, offset)
offset += shape[0]
return res
def _slice_to_length(key, dim):
tkey = [key.start, key.stop, key.step]
if tkey[0] is None:
tkey[0] = 0
elif tkey[0] < 0:
tkey[0] = dim - tkey[0]
if tkey[1] is None:
tkey[1] = dim
elif tkey[1] < 0:
tkey[1] = dim - tkey[1]
if tkey[2] is None:
tkey[2] = 1
return int(((tkey[1] - tkey[0] - 1) / tkey[2]) + 1)
def _get_info(dims, buf_len):
elements = 1
numdims = len(dims)
idims = [1]*4
for i in range(numdims):
elements *= dims[i]
idims[i] = dims[i]
if (elements == 0):
if (buf_len != 0):
idims = [buf_len, 1, 1, 1]
numdims = 1
else:
raise RuntimeError("Invalid size")
return numdims, idims
def _get_indices(key):
S = Index(slice(None))
inds = _Index4(S, S, S, S)
if isinstance(key, tuple):
n_idx = len(key)
for n in range(n_idx):
inds[n] = Index(key[n])
else:
inds[0] = Index(key)
return inds
def _get_assign_dims(key, idims):
dims = [1]*4
for n in range(len(idims)):
dims[n] = idims[n]
if _is_number(key):
dims[0] = 1
return dims
elif isinstance(key, slice):
dims[0] = _slice_to_length(key, idims[0])
return dims
elif isinstance(key, ParallelRange):
dims[0] = _slice_to_length(key.S, idims[0])
return dims
elif isinstance(key, BaseArray):
# If the array is boolean take only the number of nonzeros
if(key.dtype() is Dtype.b8):
dims[0] = int(sum(key))
else:
dims[0] = key.elements()
return dims
elif isinstance(key, tuple):
n_inds = len(key)
for n in range(n_inds):
if (_is_number(key[n])):
dims[n] = 1
elif (isinstance(key[n], BaseArray)):
# If the array is boolean take only the number of nonzeros
if(key[n].dtype() is Dtype.b8):
dims[n] = int(sum(key[n]))
else:
dims[n] = key[n].elements()
elif (isinstance(key[n], slice)):
dims[n] = _slice_to_length(key[n], idims[n])
elif (isinstance(key[n], ParallelRange)):
dims[n] = _slice_to_length(key[n].S, idims[n])
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
return dims
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
def transpose(a, conj=False):
"""
Perform the transpose on an input.
Parameters
-----------
a : af.Array
Multi dimensional arrayfire array.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Returns
--------
out : af.Array
Containing the tranpose of `a` for all batches.
"""
out = Array()
safe_call(backend.get().af_transpose(ct.pointer(out.arr), a.arr, conj))
return out
def transpose_inplace(a, conj=False):
"""
Perform inplace transpose on an input.
Parameters
-----------
a : af.Array
- Multi dimensional arrayfire array.
- Contains transposed values on exit.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Note
-------
Input `a` needs to be a square matrix or a batch of square matrices.
"""
safe_call(backend.get().af_transpose_inplace(a.arr, conj))
class Array(BaseArray):
"""
A multi dimensional array container.
Parameters
----------
src : optional: array.array, list or C buffer. default: None.
- When `src` is `array.array` or `list`, the data is copied to create the Array()
- When `src` is None, an empty buffer is created.
dims : optional: tuple of ints. default: (0,)
- When using the default values of `dims`, the dims are caclulated as `len(src)`
dtype: optional: str or arrayfire.Dtype. default: None.
- if str, must be one of the following:
- 'f' for float
- 'd' for double
- 'b' for bool
- 'B' for unsigned char
- 'i' for signed 32 bit integer
- 'I' for unsigned 32 bit integer
- 'l' for signed 64 bit integer
- 'L' for unsigned 64 bit integer
- 'F' for 32 bit complex number
- 'D' for 64 bit complex number
- if arrayfire.Dtype, must be one of the following:
- Dtype.f32 for float
- Dtype.f64 for double
- Dtype.b8 for bool
- Dtype.u8 for unsigned char
- Dtype.s32 for signed 32 bit integer
- Dtype.u32 for unsigned 32 bit integer
- Dtype.s64 for signed 64 bit integer
- Dtype.u64 for unsigned 64 bit integer
- Dtype.c32 for 32 bit complex number
- Dtype.c64 for 64 bit complex number
- if None, Dtype.f32 is assumed
Attributes
-----------
arr: ctypes.c_void_p
ctypes variable containing af_array from arrayfire library.
Examples
--------
Creating an af.Array() from array.array()
>>> import arrayfire as af
>>> import array
>>> a = array.array('f', (1, 2, 3, 4))
>>> b = af.Array(a, (2,2))
>>> af.display(b)
[2 2 1 1]
1.0000 3.0000
2.0000 4.0000
Creating an af.Array() from a list
>>> import arrayfire as af
>>> import array
>>> a = [1, 2, 3, 4]
>>> b = af.Array(a)
>>> af.display(b)
[4 1 1 1]
1.0000
2.0000
3.0000
4.0000
Creating an af.Array() from numpy.array()
>>> import numpy as np
>>> import arrayfire as af
>>> a = np.random.random((2,2))
>>> a
array([[ 0.33042524, 0.36135449],
[ 0.86748649, 0.42199135]])
>>> b = af.Array(a.ctypes.data, a.shape, a.dtype.char)
>>> af.display(b)
[2 2 1 1]
0.3304 0.8675
0.3614 0.4220
Note
-----
- The class is currently limited to 4 dimensions.
- arrayfire.Array() uses column major format.
- numpy uses row major format by default which can cause issues during conversion
"""
def __init__(self, src=None, dims=(0,), dtype=None):
super(Array, self).__init__()
buf=None
buf_len=0
if dtype is not None:
if isinstance(dtype, str):
type_char = dtype
else:
type_char = to_typecode[dtype.value]
else:
type_char = None
_type_char='f'
backend.lock()
if src is not None:
if (isinstance(src, Array)):
safe_call(backend.get().af_retain_array(ct.pointer(self.arr), src.arr))
return
host = __import__("array")
if isinstance(src, host.array):
buf,buf_len = src.buffer_info()
_type_char = src.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, list):
tmp = host.array('f', src)
buf,buf_len = tmp.buffer_info()
_type_char = tmp.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, int) or isinstance(src, ct.c_void_p):
buf = src
numdims, idims = _get_info(dims, buf_len)
elements = 1
for dim in idims:
elements *= dim
if (elements == 0):
raise RuntimeError("Expected dims when src is data pointer")
if (type_char is None):
raise TypeError("Expected type_char when src is data pointer")
_type_char = type_char
else:
raise TypeError("src is an object of unsupported class")
if (type_char is not None and
type_char != _type_char):
raise TypeError("Can not create array of requested type from input data type")
self.arr = _create_array(buf, numdims, idims, to_dtype[_type_char])
else:
if type_char is None:
type_char = 'f'
numdims = len(dims)
idims = [1] * 4
for n in range(numdims):
idims[n] = dims[n]
self.arr = _create_empty_array(numdims, idims, to_dtype[type_char])
def copy(self):
"""
Performs a deep copy of the array.
Returns
-------
out: af.Array()
An identical copy of self.
"""
out = Array()
safe_call(backend.get().af_copy_array(ct.pointer(out.arr), self.arr))
return out
def __del__(self):
"""
Release the C array when going out of scope
"""
if self.arr.value:
backend.get().af_release_array(self.arr)
def device_ptr(self):
"""
Return the device pointer held by the array.
Returns
------
ptr : int
Contains location of the device pointer
Note
----
- This can be used to integrate with custom C code and / or PyCUDA or PyOpenCL.
- No mem copy is peformed, this function returns the raw device pointer.
"""
ptr = ct.c_void_p(0)
backend.get().af_get_device_ptr(ct.pointer(ptr), self.arr)
return ptr.value
def elements(self):
"""
Return the number of elements in the array.
"""
num = ct.c_ulonglong(0)
safe_call(backend.get().af_get_elements(ct.pointer(num), self.arr))
return num.value
def dtype(self):
"""
Return the data type as a arrayfire.Dtype enum value.
"""
dty = ct.c_int(Dtype.f32.value)
safe_call(backend.get().af_get_type(ct.pointer(dty), self.arr))
return to_dtype[typecodes[dty.value]]
def type(self):
"""
Return the data type as an int.
"""
return self.dtype().value
def dims(self):
"""
Return the shape of the array as a tuple.
"""
d0 = ct.c_longlong(0)
d1 = ct.c_longlong(0)
d2 = ct.c_longlong(0)
d3 = ct.c_longlong(0)
safe_call(backend.get().af_get_dims(ct.pointer(d0), ct.pointer(d1),
ct.pointer(d2), ct.pointer(d3), self.arr))
dims = (d0.value,d1.value,d2.value,d3.value)
return dims[:self.numdims()]
def numdims(self):
"""
Return the number of dimensions of the array.
"""
nd = ct.c_uint(0)
safe_call(backend.get().af_get_numdims(ct.pointer(nd), self.arr))
return nd.value
def is_empty(self):
"""
Check if the array is empty i.e. it has no elements.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_empty(ct.pointer(res), self.arr))
return res.value
def is_scalar(self):
"""
Check if the array is scalar i.e. it has only one element.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_scalar(ct.pointer(res), self.arr))
return res.value
def is_row(self):
"""
Check if the array is a row i.e. it has a shape of (1, cols).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_row(ct.pointer(res), self.arr))
return res.value
def is_column(self):
"""
Check if the array is a column i.e. it has a shape of (rows, 1).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_column(ct.pointer(res), self.arr))
return res.value
def is_vector(self):
"""
Check if the array is a vector i.e. it has a shape of one of the following:
- (rows, 1)
- (1, cols)
- (1, 1, vols)
- (1, 1, 1, batch)
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_vector(ct.pointer(res), self.arr))
return res.value
def is_complex(self):
"""
Check if the array is of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_complex(ct.pointer(res), self.arr))
return res.value
def is_real(self):
"""
Check if the array is not of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_real(ct.pointer(res), self.arr))
return res.value
def is_double(self):
"""
Check if the array is of double precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_double(ct.pointer(res), self.arr))
return res.value
def is_single(self):
"""
Check if the array is of single precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_single(ct.pointer(res), self.arr))
return res.value
def is_real_floating(self):
"""
Check if the array is real and of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_realfloating(ct.pointer(res), self.arr))
return res.value
def is_floating(self):
"""
Check if the array is of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_floating(ct.pointer(res), self.arr))
return res.value
def is_integer(self):
"""
Check if the array is of integer type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_integer(ct.pointer(res), self.arr))
return res.value
def is_bool(self):
"""
Check if the array is of type b8.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_bool(ct.pointer(res), self.arr))
return res.value
def __add__(self, other):
"""
Return self + other.
"""
return _binary_func(self, other, backend.get().af_add)
def __iadd__(self, other):
"""
Perform self += other.
"""
self = _binary_func(self, other, backend.get().af_add)
return self
def __radd__(self, other):
"""
Return other + self.
"""
return _binary_funcr(other, self, backend.get().af_add)
def __sub__(self, other):
"""
Return self - other.
"""
return _binary_func(self, other, backend.get().af_sub)
def __isub__(self, other):
"""
Perform self -= other.
"""
self = _binary_func(self, other, backend.get().af_sub)
return self
def __rsub__(self, other):
"""
Return other - self.
"""
return _binary_funcr(other, self, backend.get().af_sub)
def __mul__(self, other):
"""
Return self * other.
"""
return _binary_func(self, other, backend.get().af_mul)
def __imul__(self, other):
"""
Perform self *= other.
"""
self = _binary_func(self, other, backend.get().af_mul)
return self
def __rmul__(self, other):
"""
Return other * self.
"""
return _binary_funcr(other, self, backend.get().af_mul)
def __truediv__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __itruediv__(self, other):
"""
Perform self /= other.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rtruediv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __div__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __idiv__(self, other):
"""
Perform other / self.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rdiv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __mod__(self, other):
"""
Return self % other.
"""
return _binary_func(self, other, backend.get().af_mod)
def __imod__(self, other):
"""
Perform self %= other.
"""
self = _binary_func(self, other, backend.get().af_mod)
return self
def __rmod__(self, other):
"""
Return other % self.
"""
return _binary_funcr(other, self, backend.get().af_mod)
def __pow__(self, other):
"""
Return self ** other.
"""
return _binary_func(self, other, backend.get().af_pow)
def __ipow__(self, other):
"""
Perform self **= other.
"""
self = _binary_func(self, other, backend.get().af_pow)
return self
def __rpow__(self, other):
"""
Return other ** self.
"""
return _binary_funcr(other, self, backend.get().af_pow)
def __lt__(self, other):
"""
Return self < other.
"""
return _binary_func(self, other, backend.get().af_lt)
def __gt__(self, other):
"""
Return self > other.
"""
return _binary_func(self, other, backend.get().af_gt)
def __le__(self, other):
"""
Return self <= other.
"""
return _binary_func(self, other, backend.get().af_le)
def __ge__(self, other):
"""
Return self >= other.
"""
return _binary_func(self, other, backend.get().af_ge)
def __eq__(self, other):
"""
Return self == other.
"""
return _binary_func(self, other, backend.get().af_eq)
def __ne__(self, other):
"""
Return self != other.
"""
return _binary_func(self, other, backend.get().af_neq)
def __and__(self, other):
"""
Return self & other.
"""
return _binary_func(self, other, backend.get().af_bitand)
def __iand__(self, other):
"""
Perform self &= other.
"""
self = _binary_func(self, other, backend.get().af_bitand)
return self
def __or__(self, other):
"""
Return self | other.
"""
return _binary_func(self, other, backend.get().af_bitor)
def __ior__(self, other):
"""
Perform self |= other.
"""
self = _binary_func(self, other, backend.get().af_bitor)
return self
def __xor__(self, other):
"""
Return self ^ other.
"""
return _binary_func(self, other, backend.get().af_bitxor)
def __ixor__(self, other):
"""
Perform self ^= other.
"""
self = _binary_func(self, other, backend.get().af_bitxor)
return self
def __lshift__(self, other):
"""
Return self << other.
"""
return _binary_func(self, other, backend.get().af_bitshiftl)
def __ilshift__(self, other):
"""
Perform self <<= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftl)
return self
def __rshift__(self, other):
"""
Return self >> other.
"""
return _binary_func(self, other, backend.get().af_bitshiftr)
def __irshift__(self, other):
"""
Perform self >>= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftr)
return self
def __neg__(self):
"""
Return -self
"""
return 0 - self
def __pos__(self):
"""
Return +self
"""
return self
def __invert__(self):
"""
Return ~self
"""
return self == 0
def __nonzero__(self):
return self != 0
# TODO:
# def __abs__(self):
# return self
def __getitem__(self, key):
"""
Return self[key]
Note
----
Ellipsis not supported as key
"""
try:
out = Array()
n_dims = self.numdims()
inds = _get_indices(key)
safe_call(backend.get().af_index_gen(ct.pointer(out.arr),
self.arr, ct.c_longlong(n_dims), inds.pointer))
return out
except RuntimeError as e:
raise IndexError(str(e))
def __setitem__(self, key, val):
"""
Perform self[key] = val
Note
----
Ellipsis not supported as key
"""
try:
n_dims = self.numdims()
if (_is_number(val)):
tdims = _get_assign_dims(key, self.dims())
other_arr = constant_array(val, tdims[0], tdims[1], tdims[2], tdims[3], self.type())
del_other = True
else:
other_arr = val.arr
del_other = False
out_arr = ct.c_void_p(0)
inds = _get_indices(key)
safe_call(backend.get().af_assign_gen(ct.pointer(out_arr),
self.arr, ct.c_longlong(n_dims), inds.pointer,
other_arr))
safe_call(backend.get().af_release_array(self.arr))
if del_other:
safe_call(backend.get().af_release_array(other_arr))
self.arr = out_arr
except RuntimeError as e:
raise IndexError(str(e))
def to_ctype(self, row_major=False, return_shape=False):
"""
Return the data as a ctype C array after copying to host memory
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: The ctypes array of the appropriate type and length.
else :
(res, dims): tuple of the ctypes array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_ctype on empty array")
tmp = transpose(self) if row_major else self
ctype_type = to_c_type[self.type()] * self.elements()
res = ctype_type()
safe_call(backend.get().af_get_data_ptr(ct.pointer(res), self.arr))
if (return_shape):
return res, self.dims()
else:
return res
def to_array(self, row_major=False, return_shape=False):
"""
Return the data as array.array
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: array.array of the appropriate type and length.
else :
(res, dims): array.array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_array on empty array")
res = self.to_ctype(row_major, return_shape)
host = __import__("array")
h_type = to_typecode[self.type()]
if (return_shape):
return host.array(h_type, res[0]), res[1]
else:
return host.array(h_type, res)
def to_list(self, row_major=False):
"""
Return the data as list
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: list of the appropriate type and length.
else :
(res, dims): list and the shape of the array
"""
ct_array, shape = self.to_ctype(row_major, True)
return _ctype_to_lists(ct_array, len(shape) - 1, shape)
def __repr__(self):
"""
Displays the meta data of the arrayfire array.
Note
----
Use arrayfire.display(a) to display the contents of the array.
"""
# Having __repr__ directly print things is a bad idea
# Placeholder for when af_array_to_string is available
# safe_call(backend.get().af_array_to_string...
return 'Type: arrayfire.Array()\nShape: %s\nType char: %s' % \
(self.dims(), to_typecode[self.type()])
def __array__(self):
"""
Constructs a numpy.array from arrayfire.Array
"""
import numpy as np
res = np.empty(self.dims(), dtype=np.dtype(to_typecode[self.type()]), order='F')
safe_call(backend.get().af_get_data_ptr(ct.c_void_p(res.ctypes.data), self.arr))
return res
def display(a):
"""
Displays the contents of an array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array
"""
expr = inspect.stack()[1][-2]
try:
if (expr is not None):
st = expr[0].find('(') + 1
en = expr[0].rfind(')')
print('%s' % expr[0][st:en])
safe_call(backend.get().af_print_array(a.arr))
except:
safe_call(backend.get().af_print_array(a.arr))
from .algorithm import sum
|
bsd-3-clause
| -5,315,669,486,786,941,000
| 27.30791
| 100
| 0.520939
| false
| 3.725279
| false
| false
| false
|
ethansshaw/stellavitrum
|
ScienceFairProcess.py
|
1
|
10349
|
#!/usr/bin/env python
"""
Written by Ethan Shaw
"""
from astropy.io import fits
import sys, png, math, os
colors = ['red', 'green', 'blue']
# Build x_axis_len rows, each containing y_axis_len columns
# access with PNG_data[row][column]
def buildMatrix(x_axis_len, y_axis_len, greyscale=True):
# set up empty list (matrix) to hold pixels
PNG_data = []
for row in range(0, x_axis_len):
PNG_data.append([])
#start out with an empty list, then put another list in it so it looks like [[]]
#gives the value of x_axis_len empty lists inside the list PNG_data
for column in range (0, y_axis_len):
if ( greyscale ):
PNG_data[row].append(0)
#this is the grayscale value
else:
#Red,Green,Blue values
PNG_data[row].append(0)
PNG_data[row].append(0)
PNG_data[row].append(0)
return PNG_data
#Function defines ONLY color
def setPixel(PNG_data, red, green, blue, row, column):
PNG_data[row][column*3] = red
PNG_data[row][column*3 + 1] = green
PNG_data[row][column*3 + 2] = blue
def getPixelRange(PNG_data, x_axis_len, y_axis_len):
# determine the PNG_data range for scaling purposes
pixel_max = 0
pixel_min = pow(2,16)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
pixel_max = max(pixel_max, PNG_data[row][column])
pixel_min = min(pixel_min, PNG_data[row][column])
print "Pixel max: {0:.20f}, Pixel min: {0:.20f}".format(pixel_max, pixel_min)
return (pixel_max, pixel_min)
def getRawDataFromFile(file, color):
#this reads the file and structures into useable format
hdulist = fits.open(file)
entry = hdulist[0]
bits_per_pixel = entry.header['BITPIX']
number_axis = entry.header['NAXIS']
x_axis_len = entry.header['NAXIS2']
y_axis_len = entry.header['NAXIS1']
print "Data dimensions: (%d x %d) - %d axes, %d bpp" % (x_axis_len, y_axis_len, number_axis, bits_per_pixel)
# data is a bunch of columns, each containing one row
data = entry.data
pixelData = buildMatrix(x_axis_len, y_axis_len, greyscale=False)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
try:
image_value = data[row][column]
red, green, blue = ( 0,0,0 )
if ( color == 'red' ):
red = image_value
elif ( color == 'green' ):
green = image_value
elif ( color == 'blue' ):
blue = image_value
setPixel(pixelData, red, green, blue, row, column)
except Exception as e:
print "Error accessing (%d, %d) : %s" % (row, column, e)
raise SystemExit
return pixelData
def combineTwoDataSets(dataSet1, dataSet2):
print "Combining two data sets"
# step 1, make a new data set the size of the two
x_axis_len = len(dataSet1)
y_axis_len = len(dataSet1[0])
combinedData = buildMatrix(x_axis_len, y_axis_len)
# step 2, step over each pixel in the sets and ADD to the combined pixel value
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
combinedData[row][column] = dataSet1[row][column] + dataSet2[row][column]
# step 3, return the combined data set
return combinedData
def writePNGFile(PNGData, output_directory, dataset_name):
filename = '%s/out_data_%s.png' % ( output_directory, dataset_name)
f = open(filename, 'wb') # binary mode is important
w = png.Writer(len(PNGData[0])/3, len(PNGData), greyscale=False,alpha=False, bitdepth=16)
w.write(f, PNGData)
print "Image written to file %s" % filename
def linearScale(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 - 1) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
return val
def logarithmicScalePixel(value, min_value, max_value):
try:
val = abs(math.log(value))
# for min and max we use 0, 100 for now
return linearScalePixel(val, 0, 100)
except Exception as e:
return 0
def linearScalePixel(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 -1 ) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
if ( val < 0 or val > 65535 ):
print "value %d (orig: %f was outside range %.e, %.e" % ( val, value, min_value, max_value )
raise SystemExit
return val
def scaleDataSet(scalingFunction, dataSet):
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
pixel_max, pixel_min = getPixelRange(dataSet, x_axis_len, y_axis_len)
print "Max: %f, Min: %f" % (pixel_max, pixel_min)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataSet[row][column] = scalingFunction(dataSet[row][column], pixel_min, pixel_max)
return dataSet
def linearScaleDataSet(dataSet):
return scaleDataSet(linearScalePixel, dataSet)
def logScaleDataSet(dataSet):
return scaleDataSet(logarithmicScalePixel, dataSet)
def zeroOutliersInDataSet(dataSet, interQuartileScaleFactor=1.5):
(firstQuartile, median, thirdQuartile, interQuartile) = getQuartileValues(dataSet)
minAllowedValue = max(0, firstQuartile - (interQuartileScaleFactor * interQuartile))
maxAllowedValue = thirdQuartile + (interQuartileScaleFactor * interQuartile)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataValue = dataSet[row][column]
if (dataValue < minAllowedValue or dataValue > maxAllowedValue):
dataSet[row][column] = 0
return dataSet
def histogramData(dataSet, output_directory, dataset_folder="data"):
pixel_max, pixel_min = getPixelRange(dataSet, len(dataSet), len(dataSet[0]))
histogram = {}
number_of_groups = 10
group_size = (pixel_max - pixel_min) / (number_of_groups *1.0)
for i in range(0, number_of_groups):
histogram[int(i*group_size)] = 0
histogramKeys = histogram.keys()
histogramKeys.sort()
histogramKeys.reverse()
for x in range(0, len(dataSet)):
for y in range(0, len(dataSet[0])):
pixel = dataSet[x][y]
for key in histogramKeys:
if pixel < key:
histogram[key] = int(histogram[key] + 1)
continue
histogramKeys.reverse()
output_path = "%s/%s_histogram.csv" % (output_directory, dataset_folder)
outf = open(output_path, "w")
for key in histogramKeys:
kname = "Bucket %d" % key
outf.write("%s,%d\n" % (kname, histogram[key]))
outf.close()
print "Histogram written to file %s" % output_path
def getMean(dataSet):
sum = 0.0
count = 0
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > 0:
sum = sum + dataSet[row][column]
count = count + 1
return sum/count
def getMedian(dataSet):
dataList = []
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if (dataSet[row][column] > 0):
dataList.append(dataSet[row][column])
dataList.sort()
middleNumber = len(dataList)/2
return dataList[middleNumber]
def getQuartileValues(dataSet):
median = getMedian(dataSet)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
valuesLessThanMedian = []
valuesGreaterThanMedian = []
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > median:
valuesGreaterThanMedian.append(dataSet[row][column])
else:
valuesLessThanMedian.append(dataSet[row][column])
valuesGreaterThanMedian.sort()
valuesLessThanMedian.sort()
firstQuartile = valuesLessThanMedian[len(valuesLessThanMedian)/2]
thirdQuartile = valuesGreaterThanMedian[len(valuesGreaterThanMedian)/2]
interQuartile = thirdQuartile - firstQuartile
print "Quartiles: ", firstQuartile, median, thirdQuartile, interQuartile
return (firstQuartile, median, thirdQuartile, interQuartile)
def getMode(dataSet):
dataPoints = {}
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
point = dataSet[row][column]
if (point > 0):
if dataPoints.has_key(point):
dataPoints[point] = dataPoints[point] + 1
else:
dataPoints[point] = 1
maxCount = 0
maxValue = None
for (value, count) in dataPoints.items():
if count > maxCount:
maxCount = count
maxValue = value
print "%f was the max value and occurred %d times" % (maxValue, maxCount)
return maxValue
def outputToCSVFile(filename, dataSet):
outf = open(filename, 'w')
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
line = ""
for column in range (0, y_axis_len):
line = "%s%.7e," % (line, dataSet[row][column])
line = line + "\n"
outf.write(line)
outf.close()
print "Wrote to %s" % filename
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s <file1> <file2> ..." % sys.argv[0]
raise SystemExit
files = sys.argv[1:]
i = 0
PNGDataSets = []
#rData = getRawDataFromFile(files[0], "red")
#writePNGFile(rData, "red")
#raise SystemExit
full_path1 = os.path.abspath(files[0])
folder_path = os.path.split(full_path1)[0]
dataset_folder = os.path.basename(folder_path)
for file in files:
dataSet = getRawDataFromFile(file, colors[i])
i = i + 1
dataSetNormalized = zeroOutliersInDataSet(dataSet)
PNGDataSets.append(dataSetNormalized)
combinedSet = None
for dataSet in PNGDataSets:
if (combinedSet == None):
combinedSet = dataSet
else:
combinedSet = combineTwoDataSets(combinedSet, dataSet)
parent_directory = os.path.split(os.path.abspath(sys.argv[0]))[0]
output_directory = os.path.join(parent_directory, "Results")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print "Created directory %s" % output_directory
else:
print "Output directory %s exists" % output_directory
# now linear scale the outlier set
scaledSet = linearScaleDataSet(combinedSet)
histogramData(scaledSet, output_directory, dataset_folder)
#raise SystemExit
filename = "%s/dataset_%s.csv" % (output_directory, dataset_folder)
outputToCSVFile(filename, scaledSet)
writePNGFile(scaledSet, output_directory, dataset_folder) #old was writePNGFile(combinedSet, "combined")
print "Process complete"
|
mit
| 1,193,926,610,286,185,000
| 30.081081
| 109
| 0.689439
| false
| 2.839232
| false
| false
| false
|
sbelskie/symplicity
|
Symplicity/local_settings.py
|
1
|
2695
|
"""
Django settings for Symplicity project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'symptom_tracker',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Symplicity.urls'
WSGI_APPLICATION = 'Symplicity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'symplicity',
'USER':'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
|
apache-2.0
| 5,726,592,050,074,065,000
| 24.683168
| 77
| 0.687199
| false
| 3.294621
| false
| false
| false
|
oldm/OldMan
|
oldman/schema/hydra.py
|
1
|
1520
|
from uuid import uuid4
from rdflib import URIRef, RDF, RDFS
from oldman.vocabulary import OLDM_CORRESPONDING_CLASS
class HydraSchemaAdapter(object):
"""Updates some Hydra patterns in the schema graph:
- hydra:Link: create a hydra:Class, subclass of the link range that support the same operations
"""
def update_schema_graph(self, graph):
graph = graph.skolemize()
graph = self._update_links(graph)
return graph
@staticmethod
def _update_links(graph):
links = list(graph.subjects(RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Link")))
for link_property in links:
new_class_iri = URIRef(u"http://localhost/.well-known/genid/link_class/%s" % uuid4())
graph.add((new_class_iri, RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Class")))
graph.add((link_property, URIRef(OLDM_CORRESPONDING_CLASS), new_class_iri))
# Ranges --> upper classes
ranges = list(graph.objects(link_property, RDFS.range))
for range in ranges:
graph.add((new_class_iri, RDFS.subClassOf, range))
# supported Operations
supported_operation_property = URIRef(u"http://www.w3.org/ns/hydra/core#supportedOperation")
operations = list(graph.objects(link_property, supported_operation_property))
for operation in operations:
graph.add((new_class_iri, supported_operation_property, operation))
return graph
|
bsd-3-clause
| -5,559,144,035,788,686,000
| 35.190476
| 105
| 0.646053
| false
| 3.653846
| false
| false
| false
|
blackshirt/dompetku
|
dompetku/handler/services.py
|
1
|
3750
|
#!/usr/bin/env python
#
# Copyright @2014 blackshirtmuslim@yahoo.com
# Licensed: see Python license
"""Module to handle json services."""
import datetime
import json
import peewee
import tornado.web
import tornado.escape
from dompetku.handler import base
from dompetku.utils import jsonify
from dompetku.model import Transaksi, User
from dompetku.form import TransaksiForm
class TransaksiContainer(object):
def __init__(self, user):
self.user = user
def find_one(self, tid):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, Transaksi.tid == tid)
if trn.exists():
data = trn.get() # Transaksi instance
return data
return None
def find_data(self, *expr):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, *expr)
return trn # Transaksi QueryResultWrapper
return None
class DataSources(TransaksiContainer):
def __init__(self, user):
self.user = user
super().__init__(self.user)
def get_one(self, tid):
data = self.find_one(tid)
if data is not None:
results = {
'tid': data.tid,
'user': data.user.name,
'info': data.info,
'amount': data.amount,
'transdate': data.transdate,
'memo': data.memo
}
return results # dict of transaksi item
def get_data(self, *expr):
temporary = {}
results = []
data = self.find_data(*expr)
for item in data:
temporary = {
'tid': item.tid,
'user': item.user.name,
'info': item.info,
'transdate': item.transdate,
'amount': item.amount,
'memo': item.memo
}
results.append(temporary)
return results # list of dict of transaksi item
class ApiTransactions(base.BaseHandler):
def initialize(self):
self.dsc = DataSources(self.current_user)
@tornado.web.authenticated
def get(self, *kondisi):
if kondisi:
data = self.dsc.get_data(*kondisi)
else:
# get data bulan sekarang
today = datetime.date.today()
cur_month = today.month
expr = (Transaksi.transdate.month == cur_month,)
data = self.dsc.get_data(expr)
self.write(jsonify(data))
def post(self):
data = tornado.escape.json_decode(self.request.body)
info = data.get('info')
amount = data.get('amount')
memo = data.get('memo')
try:
active_user = User.get(User.name == self.current_user)
except peewee.DoesNotExist:
active_user = None
return
item = Transaksi.insert(info = info,
amount=amount,
tipe=10,
user=active_user.uid,
memo=memo )
last_id = item.execute()
transaksi = Transaksi.get(Transaksi.tid == last_id)
response = {'info': transaksi.info,
'user': transaksi.user.name,
'amount': transaksi.amount,
'memo': transaksi.memo,
'transdate': transaksi.transdate}
self.write(jsonify(response))
|
bsd-2-clause
| -4,487,325,386,750,957,000
| 29.241935
| 92
| 0.524267
| false
| 4.002134
| false
| false
| false
|
spel-uchile/SUCHAI-Flight-Software
|
sandbox/log_parser.py
|
1
|
1956
|
import re
import argparse
import pandas as pd
# General expressions
re_error = re.compile(r'\[ERROR\]\[(\d+)\]\[(\w+)\](.+)')
re_warning = re.compile(r'\[WARN \]\[(\d+)\]\[(\w+)\](.+)')
re_info = re.compile(r'\[INFO \]\[(\d+)\]\[(\w+)\](.+)')
re_debug = re.compile(r'\[DEBUG\]\[(\d+)\]\[(\w+)\](.+)')
re_verbose = re.compile(r'\[VERB \]\[(\d+)\]\[(\w+)\](.+)')
# Specific expressions
re_cmd_run = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Running the command: (.+)')
re_cmd_result = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Command result: (\d+)')
def get_parameters():
"""
Parse script arguments
"""
parser = argparse.ArgumentParser()
# General expressions
parser.add_argument('file', type=str, help="Log file")
parser.add_argument('--error', action="store_const", const=re_error)
parser.add_argument('--warning', action="store_const", const=re_warning)
parser.add_argument('--info', action="store_const", const=re_info)
parser.add_argument('--debug', action="store_const", const=re_debug)
parser.add_argument('--verbose', action="store_const", const=re_verbose)
# Specific expressions
parser.add_argument('--cmd-run', action="store_const", const=re_cmd_run)
parser.add_argument('--cmd-result', action="store_const", const=re_cmd_result)
return parser.parse_args()
def parse_text(text, regexp):
return regexp.findall(text)
def save_parsed(logs, file, format=None):
df = pd.DataFrame(logs)
# print(df)
df.to_csv(file)
if __name__ == "__main__":
args = get_parameters()
print("Reading file {}...".format(args.file))
with open(args.file) as logfile:
text = logfile.read()
args = vars(args)
print(args)
for type, regexp in args.items():
if type is not "file" and regexp is not None:
print("Parsing {}...", type)
logs = parse_text(text, regexp)
save_parsed(logs, args["file"]+type+".csv")
|
gpl-3.0
| 7,759,135,881,385,060,000
| 30.548387
| 83
| 0.599182
| false
| 3.233058
| false
| false
| false
|
pombreda/ruffus
|
ruffus/test/test_verbosity.py
|
1
|
8627
|
#!/usr/bin/env python
from __future__ import print_function
"""
test_verbosity.py
"""
temp_dir = "test_verbosity/"
import unittest
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = list(map(__import__, [ruffus_name]))[0]
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
import re
ruffus = __import__ (ruffus_name)
for attr in "pipeline_run", "pipeline_printout", "suffix", "transform", "split", "merge", "dbdict", "follows", "mkdir", "originate", "Pipeline":
globals()[attr] = getattr (ruffus, attr)
RethrownJobError = ruffus.ruffus_exceptions.RethrownJobError
RUFFUS_HISTORY_FILE = ruffus.ruffus_utility.RUFFUS_HISTORY_FILE
CHECKSUM_FILE_TIMESTAMPS = ruffus.ruffus_utility.CHECKSUM_FILE_TIMESTAMPS
#---------------------------------------------------------------
# create initial files
#
@mkdir(temp_dir + 'data/scratch/lg/what/one/two/three/')
@originate([ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ])
def create_initial_file_pairs(output_files):
# create both files as necessary
for output_file in output_files:
with open(output_file, "w") as oo: pass
#---------------------------------------------------------------
# first task
@transform(create_initial_file_pairs, suffix(".start"), ".output.1")
def first_task(input_files, output_file):
with open(output_file, "w"): pass
#---------------------------------------------------------------
# second task
@transform(first_task, suffix(".output.1"), ".output.2")
def second_task(input_files, output_file):
with open(output_file, "w"): pass
test_pipeline = Pipeline("test")
test_pipeline.originate(output = [ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ],
task_func = create_initial_file_pairs)
test_pipeline.transform(task_func = first_task, input = create_initial_file_pairs, filter = suffix(".start"), output = ".output.1")
test_pipeline.transform(input = first_task, filter = suffix(".output.1"), output = ".output.2", task_func= second_task)
decorator_syntax = 0
oop_syntax = 1
class Test_verbosity(unittest.TestCase):
#___________________________________________________________________________
#
# test_printout_abbreviated_path1
#___________________________________________________________________________
def test_printout_abbreviated_path1(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue(re.search('Job needs update:.*Missing files.*'
'\[\.\.\./job2\.a\.start, test_verbosity/job2\.b\.start, \.\.\./job2.a.output.1\]', ret, re.DOTALL) is not None)
#___________________________________________________________________________
#
# test_printout_abbreviated_path2
#___________________________________________________________________________
def test_printout_abbreviated_path2(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../three/job1.a.start, test_verbosity/job1.b.start, .../three/job1.a.output.1]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path3
#___________________________________________________________________________
def test_printout_abbreviated_path3(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../two/three/job1.a.start, test_verbosity/job1.b.start, .../two/three/job1.a.output.1]' in s.getvalue())
#___________________________________________________________________________
#
# test_printout_abbreviated_path9
#___________________________________________________________________________
def test_printout_abbreviated_path9(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[%sdata/scratch/lg/what/one/two/three/job2.a.start, test_verbosity/job2.b.start,' % temp_dir in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path0
#___________________________________________________________________________
def test_printout_abbreviated_path0(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
path_str = os.path.abspath('%sdata/scratch/lg/what/one/two/three/job2.a.start' % temp_dir)
path_str = '[[%s' % path_str
self.assertTrue(path_str in ret)
self.assertTrue(temp_dir + 'job2.b.start]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path_minus_60
#___________________________________________________________________________
def test_printout_abbreviated_path_minus_60(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[<???> ne/two/three/job2.a.start, test_verbosity/job2.b.start]' in ret)
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
unittest.main()
|
mit
| 8,074,170,189,742,017,000
| 44.405263
| 144
| 0.534833
| false
| 3.688328
| true
| false
| false
|
ewilazarus/snnm
|
snnm.py
|
1
|
2886
|
#!/usr/bin/python
"""
snnm
~~~~
This module contains the source code for `snnm`
Snnm is an utility tool created to fetch synonyms for a given expression from
the web and print them to the console.
"""
import bs4
import click
import requests
BASE_URL = 'http://www.thesaurus.com/browse/'
def _fetch_html(expression):
"""
Returns the HTML containing the synonyms for the given expression
"""
response = requests.get(BASE_URL + expression)
response.raise_for_status()
return response.text
def _parse_html(html):
"""
Returns a parsed list of synonyms out of a given HTML
"""
parser = bs4.BeautifulSoup(html, 'html.parser')
synonyms = []
divs = parser.find_all('div', class_='relevancy-list')
for div in divs:
spans = div.find_all('span', class_='text')
synonyms += [str(span.string) for span in spans]
return synonyms
def fetch_synonyms(expression):
"""
Returns a list of synonyms for a given expression
"""
try:
return _parse_html(_fetch_html(expression))
except requests.exceptions.HTTPError:
return []
def clean(synonyms):
"""
Returns the deduped, sorted list of synonyms
"""
deduped_synonyms = list(set([s.strip() for s in synonyms]))
deduped_synonyms.sort()
return deduped_synonyms
def print_synonyms_ugly(synonyms):
"""
Prints the list of synonyms to the screen
"""
for synonym in synonyms:
print(synonym)
def print_synonyms(synonyms):
"""
Prints the list of synonyms to the screen, using colors and breakpoints
"""
if not synonyms:
click.secho('-- NO RESULTS --', fg='red')
click.echo()
else:
height = click.get_terminal_size()[1] - 3
batch = [synonyms[i:i+height] for i in range(0, len(synonyms), height)]
for synonyms in batch:
for synonym in synonyms:
click.secho(synonym, fg='yellow')
click.echo()
if batch.index(synonyms) != len(batch) - 1:
click.echo('Press any key to continue ...', nl=False)
key = click.getchar()
if key == '\x03':
raise KeyboardInterrupt()
click.echo()
@click.command(name='snnm')
@click.argument('expression')
@click.option('-u', '--ugly-output', is_flag=True)
def main(expression, ugly_output):
"""
List synonyms for an expression
"""
try:
if not ugly_output:
click.echo('Synonyms for {}:'.format(click.style(expression,
fg='blue')))
synonyms = clean(fetch_synonyms(expression))
if ugly_output:
print_synonyms_ugly(synonyms)
else:
print_synonyms(synonyms)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
mit
| -3,771,467,267,364,749,000
| 24.767857
| 79
| 0.592862
| false
| 3.6075
| false
| false
| false
|
vmalloc/gossip
|
gossip/utils.py
|
2
|
2484
|
import itertools
from .exceptions import CannotResolveDependencies
from .helpers import DONT_CARE, FIRST
def topological_sort_registrations(registrations, unconstrained_priority=DONT_CARE):
graph = _build_dependency_graph(registrations, unconstrained_priority=unconstrained_priority)
returned_indices = _topological_sort(range(len(registrations)), graph)
assert len(returned_indices) == len(registrations)
return [registrations[idx] for idx in returned_indices]
def _topological_sort(indices, graph):
independent = sorted(set(indices) - set(m for n, m in graph), reverse=True)
returned = []
while independent:
n = independent.pop()
returned.append(n)
for m in indices:
edge = (n, m)
if m == n:
assert edge not in graph
continue
if edge in graph:
graph.remove(edge)
# check if m is now independent
for edge in graph:
if edge[1] == m:
# not indepdendent
break
else:
# no other incoming edges to m
independent.append(m)
if graph:
raise CannotResolveDependencies('Cyclic dependency detected')
return returned
def _build_dependency_graph(registrations, unconstrained_priority):
providers_by_name = {}
for index, registration in enumerate(registrations):
for name in registration.provides:
providers = providers_by_name.get(name)
if providers is None:
providers = providers_by_name[name] = []
providers.append(index)
graph = set()
for needer_index, registration in enumerate(registrations):
for need in registration.needs:
for provider_index in providers_by_name.get(need, []):
graph.add((provider_index, needer_index))
if unconstrained_priority != DONT_CARE:
caring_indices = set([idx for idx, r in enumerate(registrations) if r.needs or r.provides])
non_caring_indices = set(range(len(registrations))) - caring_indices
for caring_index, uncaring_index in itertools.product(caring_indices, non_caring_indices):
if unconstrained_priority == FIRST:
pair = (uncaring_index, caring_index)
else:
pair = (caring_index, uncaring_index)
graph.add(pair)
return graph
|
bsd-3-clause
| 8,597,037,287,050,342,000
| 36.636364
| 99
| 0.612721
| false
| 4.388693
| false
| false
| false
|
philanthropy-u/edx-platform
|
openedx/core/djangoapps/user_authn/views/tests/test_login.py
|
1
|
29762
|
# coding:utf-8
"""
Tests for student activation and login
"""
import json
import unicodedata
import unittest
import ddt
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseBadRequest
from django.test.client import Client
from django.test.utils import override_settings
from django.urls import NoReverseMatch, reverse
from mock import patch
from six import text_type
from six.moves import range
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.password_policy.compliance import (
NonCompliantPasswordException,
NonCompliantPasswordWarning
)
from openedx.core.djangoapps.user_api.config.waffle import PREVENT_AUTH_USER_WRITES, waffle
from openedx.core.djangoapps.user_authn.cookies import jwt_cookies
from openedx.core.djangoapps.user_authn.tests.utils import setup_login_oauth_client
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import RegistrationFactory, UserFactory, UserProfileFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class LoginTest(CacheIsolationTestCase):
"""
Test login_user() view
"""
ENABLED_CACHES = ['default']
LOGIN_FAILED_WARNING = 'Email or password is incorrect'
ACTIVATE_ACCOUNT_WARNING = 'In order to sign in, you need to activate your account'
username = 'test'
user_email = 'test@edx.org'
password = 'test_password'
def setUp(self):
"""Setup a test user along with its registration and profile"""
super(LoginTest, self).setUp()
self.user = UserFactory.build(username=self.username, email=self.user_email)
self.user.set_password(self.password)
self.user.save()
RegistrationFactory(user=self.user)
UserProfileFactory(user=self.user)
self.client = Client()
cache.clear()
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [self.user_email])
def test_login_success_unicode_email(self):
unicode_email = u'test' + six.unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(
unicode_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_last_login_updated(self):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert self.user.last_login > old_last_login
def test_login_success_prevent_auth_user_writes(self):
with waffle().override(PREVENT_AUTH_USER_WRITES, True):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert old_last_login == self.user.last_login
def test_login_fail_no_user_exists(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response(
self.user_email,
'wrong_password',
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response(
self.user_email,
self.password
)
self._assert_response(response, success=False,
value="In order to sign in, you need to activate your account.")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_not_activated_with_correct_credentials(self):
"""
Tests that when user login with the correct credentials but with an inactive
account, the system, send account activation email notification to the user.
"""
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(
self.user_email,
self.password,
)
self._assert_response(response, success=False, value=self.ACTIVATE_ACCOUNT_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
@patch('openedx.core.djangoapps.user_authn.views.login._log_and_raise_inactive_user_auth_error')
def test_login_inactivated_user_with_incorrect_credentials(self, mock_inactive_user_email_and_error):
"""
Tests that when user login with incorrect credentials and an inactive account,
the system does *not* send account activation email notification to the user.
"""
nonexistent_email = 'incorrect@email.com'
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(nonexistent_email, 'incorrect_password')
self.assertFalse(mock_inactive_user_email_and_error.called)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
def test_login_unicode_email(self):
unicode_email = self.user_email + six.unichr(40960)
response, mock_audit_log = self._login_response(
unicode_email,
self.password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = self.password + six.unichr(1972)
response, mock_audit_log = self._login_response(
self.user_email,
unicode_password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
self.assertEqual(user_info["version"], settings.EDXMKTG_USER_INFO_COOKIE_VERSION)
self.assertEqual(user_info["username"], self.user.username)
# Check that the URLs are absolute
for url in user_info["header_urls"].values():
self.assertIn("http://testserver/", url)
def test_logout_deletes_mktg_cookies(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Check that the marketing site cookies have been set
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
# Log out
logout_url = reverse('logout')
response = self.client.post(logout_url)
# Check that the marketing site cookies have been deleted
# (cookies are deleted by setting an expiration date in 1970)
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
cookie = self.client.cookies[cookie_name]
self.assertIn("01-Jan-1970", cookie.get('expires'))
@override_settings(
EDXMKTG_LOGGED_IN_COOKIE_NAME=u"unicode-logged-in",
EDXMKTG_USER_INFO_COOKIE_NAME=u"unicode-user-info",
)
def test_unicode_mktg_cookie_names(self):
# When logged in cookie names are loaded from JSON files, they may
# have type `unicode` instead of `str`, which can cause errors
# when calling Django cookie manipulation functions.
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
response = self.client.post(reverse('logout'))
expected = {
'target': '/',
}
self.assertDictContainsSubset(expected, response.context_data)
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_logout_logging_no_pii(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test'])
def test_login_ratelimited_success(self):
# Try (and fail) logging in with fewer attempts than the limit of 30
# and verify that you can still successfully log in afterwards.
for i in range(20):
password = u'test_password{0}'.format(i)
response, _audit_log = self._login_response(self.user_email, password)
self._assert_response(response, success=False)
# now try logging in with a valid password
response, _audit_log = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in range(30):
password = u'test_password{0}'.format(i)
self._login_response(self.user_email, password)
# check to see if this response indicates that this was ratelimited
response, _audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value='Too many failed login attempts')
@patch.dict("django.conf.settings.FEATURES", {"DISABLE_SET_JWT_COOKIES_FOR_TESTS": False})
def test_login_refresh(self):
def _assert_jwt_cookie_present(response):
self.assertEqual(response.status_code, 200)
self.assertIn(jwt_cookies.jwt_refresh_cookie_name(), self.client.cookies)
setup_login_oauth_client()
response, _ = self._login_response(self.user_email, self.password)
_assert_jwt_cookie_present(response)
response = self.client.post(reverse('login_refresh'))
_assert_jwt_cookie_present(response)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session(self):
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_no_user_profile(self):
"""
Assert that user login with cas (Central Authentication Service) is
redirect to dashboard in case of lms or upload_transcripts in case of
cms
"""
user = UserFactory.build(username='tester', email='tester@edx.org')
user.set_password(self.password)
user.save()
# Assert that no profile is created.
self.assertFalse(hasattr(user, 'profile'))
creds = {'email': 'tester@edx.org', 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
user = User.objects.get(pk=user.pk)
# Assert that profile is created.
self.assertTrue(hasattr(user, 'profile'))
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_url_not_having_login_required_decorator(self):
# accessing logout url as it does not have login-required decorator it will avoid redirect
# and go inside the enforce_single_login
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
url = reverse('logout')
response = client1.get(url)
self.assertEqual(response.status_code, 200)
def test_change_enrollment_400(self):
"""
Tests that a 400 in change_enrollment doesn't lead to a 404
and in fact just logs in the user without incident
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponseBadRequest("I am a 400")
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def test_change_enrollment_200_no_redirect(self):
"""
Tests "redirect_url" is None if change_enrollment returns a HttpResponse
with no content
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponse()
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance(self):
"""
Tests _enforce_password_policy_compliance succeeds when no exception is thrown
"""
enforce_compliance_path = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_path) as mock_check_password_policy_compliance:
mock_check_password_policy_compliance.return_value = HttpResponse()
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertTrue(response_content.get('success'))
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_exception(self):
"""
Tests _enforce_password_policy_compliance fails with an exception thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordException()
response, _ = self._login_response(
self.user_email,
self.password
)
response_content = json.loads(response.content)
self.assertFalse(response_content.get('success'))
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Password reset', mail.outbox[0].subject)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_warning(self):
"""
Tests _enforce_password_policy_compliance succeeds with a warning thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordWarning('Test warning')
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertIn('Test warning', self.client.session['_messages'])
self.assertTrue(response_content.get('success'))
@ddt.data(
('test_password', 'test_password', True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
(unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
)
@ddt.unpack
def test_password_unicode_normalization_login(self, password, password_entered, login_success):
"""
Tests unicode normalization on user's passwords on login.
"""
self.user.set_password(password)
self.user.save()
response, _ = self._login_response(self.user.email, password_entered)
self._assert_response(response, success=login_success)
def _login_response(self, email, password, patched_audit_log=None, extra_post_params=None):
"""
Post the login info
"""
if patched_audit_log is None:
patched_audit_log = 'openedx.core.djangoapps.user_authn.views.login.AUDIT_LOG'
post_params = {'email': email, 'password': password}
if extra_post_params is not None:
post_params.update(extra_post_params)
with patch(patched_audit_log) as mock_audit_log:
result = self.client.post(self.url, post_params)
return result, mock_audit_log
def _assert_response(self, response, success=None, value=None):
"""
Assert that the response had status 200 and returned a valid
JSON-parseable dict.
If success is provided, assert that the response had that
value for 'success' in the JSON dict.
If value is provided, assert that the response contained that
value for 'value' in the JSON dict.
"""
self.assertEqual(response.status_code, 200)
try:
response_dict = json.loads(response.content)
except ValueError:
self.fail("Could not parse response content as JSON: %s"
% str(response.content))
if success is not None:
self.assertEqual(response_dict['success'], success)
if value is not None:
msg = ("'%s' did not contain '%s'" %
(unicode(response_dict['value']), unicode(value)))
self.assertIn(value, response_dict['value'], msg)
def _assert_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertIn(log_string, format_string)
def _assert_not_in_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertNotIn(log_string, format_string)
class ExternalAuthShibTest(ModuleStoreTestCase):
"""
Tests how login_user() interacts with ExternalAuth, in particular Shib
"""
def setUp(self):
super(ExternalAuthShibTest, self).setUp()
self.course = CourseFactory.create(
org='Stanford',
number='456',
display_name='NO SHIB',
user_id=self.user.id,
)
self.shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.user.id,
)
self.user_w_map = UserFactory.create(email='withmap@stanford.edu')
self.extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='withmap@stanford.edu',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=self.user_w_map)
self.user_w_map.save()
self.extauth.save()
self.user_wo_map = UserFactory.create(email='womap@gmail.com')
self.user_wo_map.save()
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_page_redirect(self):
"""
Tests that when a shib user types their email address into the login page, they get redirected
to the shib login.
"""
response = self.client.post(reverse('login'), {'email': self.user_w_map.email, 'password': ''})
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertEqual(obj, {
'success': False,
'redirect': reverse('shib-login'),
})
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_required_dashboard(self):
"""
Tests redirects to when @login_required to dashboard, which should always be the normal login,
since there is no course context
"""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/login?next=/dashboard')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_externalauth_login_required_course_context(self):
"""
Tests the redirects when visiting course-specific URL with @login_required.
Should vary by course depending on its enrollment_domain
"""
target_url = reverse('courseware', args=[text_type(self.course.id)])
noshib_response = self.client.get(target_url, follow=True, HTTP_ACCEPT="text/html")
self.assertEqual(noshib_response.redirect_chain[-1],
('/login?next={url}'.format(url=target_url), 302))
self.assertContains(noshib_response, (u"Sign in or Register | {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
self.assertEqual(noshib_response.status_code, 200)
target_url_shib = reverse('courseware', args=[text_type(self.shib_course.id)])
shib_response = self.client.get(**{'path': target_url_shib,
'follow': True,
'REMOTE_USER': self.extauth.external_id,
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'HTTP_ACCEPT': "text/html"})
# Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain
# The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we
# won't test its contents
self.assertEqual(shib_response.redirect_chain[-3],
('/shib-login/?next={url}'.format(url=target_url_shib), 302))
self.assertEqual(shib_response.redirect_chain[-2],
(target_url_shib, 302))
self.assertEqual(shib_response.status_code, 200)
|
agpl-3.0
| -3,259,418,563,379,477,500
| 44.621538
| 118
| 0.643859
| false
| 3.836223
| true
| false
| false
|
bruteforce1/cryptopals
|
set2/ch10/implement_aes_cbc.py
|
1
|
2510
|
#!/usr/bin/python3
"""
CBC mode is a block cipher mode that allows us to encrypt irregularly-
sized messages, despite the fact that a block cipher natively only
transforms individual blocks.
In CBC mode, each ciphertext block is added to the next plaintext block
before the next call to the cipher core.
The first plaintext block, which has no associated previous ciphertext
block, is added to a "fake 0th ciphertext block" called the
initialization vector, or IV.
Implement CBC mode by hand by taking the ECB function you wrote
earlier, making it encrypt instead of decrypt (verify this by
decrypting whatever you encrypt to test), and using your XOR function
from the previous exercise to combine them.
The file here is intelligible (somewhat) when CBC decrypted against
"YELLOW SUBMARINE" with an IV of all ASCII 0 (\x00\x00\x00 &c)
"""
import argparse
import os
import sys
from utils.cpset2 import aes_cbc, make_b64_printable
def main(filename, key, iv):
print('Input File: ' + str(filename))
print('Key: ' + str(key))
print('IV: ' + str(iv))
crypt = ''
if not os.path.isfile(filename):
print(filename + ' is not a valid file.')
return -1
with open(filename, 'r') as infile:
for line in infile:
crypt += line
ret = aes_cbc(crypt, key, iv, 0)
if ret:
print('Decrypted Contents in: ' + filename + '.dec')
with open(filename + '.dec', 'w') as tf:
tf.write(ret.decode('utf-8'))
un_ret = make_b64_printable(aes_cbc(ret, key, iv))
if un_ret:
print('Encrypted Contents in: ' + filename + '.enc')
with open(filename + '.enc', 'w') as tf:
tf.write(un_ret.decode('utf-8'))
return 0
print('Error.')
return -1
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Implements AES CBC encryption and decryption manually.')
parser.add_argument('-f', '--inputfile', help='opt. file encrypted \
with AES in CBC mode',
default='10.txt')
parser.add_argument('-i', '--iv', help='opt. 16 byte initialization \
vector',
default=chr(0) * 16)
parser.add_argument('-k', '--key', help='opt. 16 byte encryption or \
decryption key',
default='YELLOW SUBMARINE')
args = parser.parse_args()
sys.exit(main(args.inputfile, args.key, args.iv))
|
mit
| 5,468,502,557,935,550,000
| 34.352113
| 77
| 0.61753
| false
| 3.808801
| false
| false
| false
|
charanpald/features
|
features/test/PrimalCCATest.py
|
1
|
3226
|
import unittest
import numpy
import scipy.linalg
from features.PrimalCCA import PrimalCCA
from features.KernelCCA import KernelCCA
from kernel.LinearKernel import LinearKernel
import logging
class PrimalCCATest(unittest.TestCase):
def setUp(self):
numpy.seterr(all='ignore')
pass
def testLearnModel(self):
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = X
tau = 0.0
tol = 10**--6
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
Y = X*2
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
#Rotate X to form Y
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
def testProject(self):
#Test if it is the same as KCCA
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = numpy.random.rand(numExamples, numFeatures)
tau = 0.0
tol = 10**--6
k = 5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas2 = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
#Seem to get an error in this for some reason
#self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
#self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
#Now try with different tau
tau = 0.5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(XU.T, XU) - numpy.ones(k)) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(YU.T, YU) - numpy.ones(k)) < tol)
def testGetY(self):
#Test if we can recover Y from X
numExamples = 10
numFeatures = 5
X = numpy.random.rand(numExamples, numFeatures)
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
tau = 0.0
cca = PrimalCCA(tau)
U, V, lmbdas = cca.learnModel(X, Y)
Yhat = X.dot(U).dot(V.T).dot(numpy.linalg.inv(numpy.dot(V, V.T)))
logging.debug((numpy.abs(Yhat- Y)))
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -873,620,248,228,570,200
| 27.298246
| 85
| 0.579045
| false
| 3.060721
| true
| false
| false
|
myt00seven/svrg
|
cifar/alexnet_lasagne/lasagne-googlenet-master/googlenet/layers/bn.py
|
1
|
4243
|
import numpy as np
import theano.tensor as T
import theano
from lasagne import init # from .. import init
from lasagne import nonlinearities # from .. import nonlinearities
from lasagne.layers.base import Layer # from .base import Layer
__all__ = [
"BNLayer",
]
class BNLayer(Layer):
"""
lasagne.layers.BNLayer(incoming, nonlinearity=lasagne.nonlinearities.rectify, **kwargs)
A batch normalization layer.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. If a shared variable or a
numpy array is provided the shape should be (num_inputs, num_units).
See :meth:`Layer.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If a shared variable or a
numpy array is provided the shape should be (num_units,).
If None is provided the layer will have no biases.
See :meth:`Layer.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = BNLayer(l_in)
Notes
-----
If the input to this layer has more than two axes, it will flatten the
trailing axes. This is useful for when a dense layer follows a
convolutional layer, for example. It is not necessary to insert a
:class:`FlattenLayer` in this case.
"""
def __init__(self, incoming, gamma=1.0, beta=0., nonlinearity=None, epsilon=1e-6,
**kwargs):
super(BNLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
# get output shape of incoming
#self.n_channels = self.input_shape[1]
#print self.input_shape
#raise NameError("Hi")
self.epsilon = epsilon
if len(self.input_shape) is 4:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0,'x','x'))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0,'x','x'))
elif len(self.input_shape) is 2:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0))
else: # input should be 4d tensor or 2d matrix
raise ValueError('input of BNLayer should be 4d tensor or 2d matrix')
# done init
def get_output_shape_for(self, input_shape):
#return (input_shape[0], self.num_units)
return input_shape
def get_output_for(self, input, **kwargs):
if input.ndim is 4: # 4d tensor
self.mean = T.mean(input, axis=[0, 2, 3], keepdims=True) #self.mean = T.mean(input, axis=[0, 2, 3]).dimshuffle(('x', 0, 'x', 'x'))
#self.var = T.std(input, axis=[0, 2, 3], keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=[0, 2, 3], keepdims=True) / np.array([self.input_shape[0] * self.input_shape[2] * self.input_shape[3]], dtype=theano.config.floatX)
else: # elif input.ndim is 2: # 2d matrix
self.mean = T.mean(input, axis=0, keepdims=True) #self.mean = T.mean(input, axis=0).dimshuffle(('x',0))
#self.var = T.std(input, axis=0, keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=0, keepdims=True) / np.array([self.input_shape[0]], dtype=theano.config.floatX)
activation = (input - self.mean) / T.sqrt(self.var + self.epsilon)
activation = self.gamma * activation + self.beta
return self.nonlinearity(activation)
|
mit
| 4,750,715,369,948,741,000
| 40.194175
| 191
| 0.634928
| false
| 3.638937
| false
| false
| false
|
qrsforever/workspace
|
python/learn/thinkstats/rankit.py
|
1
|
1807
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
import thinkstats
import myplot
import matplotlib.pyplot as pyplot
def Sample(n=6):
"""Generates a sample from a standard normal variate.
n: sample size
Returns: list of n floats
"""
t = [random.normalvariate(0.0, 1.0) for i in range(n)]
t.sort()
return t
def Samples(n=6, m=1000):
"""Generates m samples with size n each.
n: sample size
m: number of samples
Returns: list of m samples
"""
t = [Sample(n) for i in range(m)]
return t
def EstimateRankits(n=6, m=1000):
"""Estimates the expected values of sorted random samples.
n: sample size
m: number of iterations
Returns: list of n rankits
"""
t = Samples(n, m)
t = zip(*t)
means = [thinkstats.Mean(x) for x in t]
return means
def MakeNormalPlot(ys, root=None, line_options={}, **options):
"""Makes a normal probability plot.
Args:
ys: sequence of values
line_options: dictionary of options for pyplot.plot
options: dictionary of options for myplot.Save
"""
# TODO: when n is small, generate a larger sample and desample
n = len(ys)
xs = [random.normalvariate(0.0, 1.0) for i in range(n)]
pyplot.clf()
pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)
myplot.Save(root,
xlabel = 'Standard normal values',
legend=False,
**options)
def main():
means = EstimateRankits(84)
print(means)
if __name__ == "__main__":
main()
|
mit
| 151,881,123,661,949,600
| 21.308642
| 75
| 0.614278
| false
| 3.461686
| false
| false
| false
|
baliga-lab/weeder_patched
|
python/seqtools.py
|
1
|
3069
|
HAMMING_MAX = 9999
def read_sequences_from_fasta_string(fasta_string):
"""reads the sequences contained in a FASTA string"""
lines = fasta_string.split('\n')
sequences = []
seqbuffer = ""
seqname = None
for line in lines:
line = line.strip()
if line.startswith('>'):
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
seqbuffer = ""
seqname = line[1:]
elif line and len(line) > 0:
seqbuffer += line
# add the last line
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
return sequences
def read_sequences_from_fasta_file(filepath):
"""Read the sequences from the specified FASTA file"""
with open(filepath) as inputfile:
fasta_string = inputfile.read()
return read_sequences_from_fasta_string(fasta_string)
def revcomp(sequence):
"""compute the reverse complement of the input string"""
return "".join([revchar(c) for c in sequence[::-1]])
def overlap(str1, str2, checkreverse):
result = False
overlapping = True
for l in range(1, 3):
for i in range(len(str1) - l):
if i >= len(str2) or str1[i + l] != str2[i]:
overlapping = False
break
if overlapping:
result = True
overlapping = True
for i in range(len(str1) - l):
if (i + l) >= len(str2) or str1[i] != str2[i + l]:
overlapping = False
break
if overlapping:
result = True
if checkreverse:
rev_result = overlap(str1[::-1], str2, False)
if rev_result:
result = True
return result
def hamming_distance(str1, str2, checkreverse):
dist_forward = 0
dist_reverse = HAMMING_MAX
if len(str1) != len(str2) or str1 == str2:
return HAMMING_MAX
for i in range(len(str1)):
if str1[i] != str2[i]:
dist_forward += 1
if not checkreverse:
return dist_forward
else:
rev = str1[::-1]
for i in range(len(str1)):
if rev[i] != str2[i]:
dist_reverse += 1
if dist_reverse < dist_forward:
return dist_reverse
else:
return dist_forward
def inside(str1, str2, checkreverse):
len1 = len(str1)
len2 = len(str2)
result = False
if (len2 - len1) != 2:
return False
for i in range(len2 - len1 + 1):
match = True
for j in range(i, i + len1):
if str1[j - i] != str2[j]:
match = False
break
if match:
result = True
if checkreverse:
rev_result = inside(str1[::-1], str2, False)
if rev_result:
result = True
return result
def char_to_int(c):
c = c.lower()
if c == 'a':
return 0;
elif c == 'c':
return 1;
elif c == 'g':
return 2;
elif c == 't':
return 3;
elif c == '$':
return 4;
else:
return -1;
|
gpl-3.0
| -7,517,009,544,331,105,000
| 24.789916
| 62
| 0.525904
| false
| 3.675449
| false
| false
| false
|
mohanprasath/Course-Work
|
data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part03-e05_correlation/test/test_correlation.py
|
1
|
2795
|
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, get_out
module_name="src.correlation"
correlations = load(module_name, "correlations")
lengths = load(module_name, "lengths")
def patch_name(m, d):
import importlib
parts=d.split(".")
try:
getattr(importlib.import_module(m), parts[-1])
p=".".join([m, parts[-1]])
except ModuleNotFoundError:
raise
except AttributeError:
if len(parts) == 1:
raise
try:
getattr(importlib.import_module(m), parts[-2])
p=".".join([m] + parts[-2:])
except AttributeError:
if len(parts) == 2:
raise
getattr(importlib.import_module(m), parts[-3])
p=".".join([m] + parts[-3:])
return p
class Correlation(unittest.TestCase):
@points('p03-05.1')
def test_lengths(self):
result = lengths()
self.assertAlmostEqual(result, 0.8717537758865832, places=4, msg="Wrong correlation!")
@points('p03-05.1')
def test_lengths_calls(self):
with patch(patch_name(module_name, "scipy.stats.pearsonr")) as pcorr:
result = lengths()
pcorr.assert_called()
@points('p03-05.2')
def test_correlations(self):
result = correlations()
n, m = result.shape
for r in range(n):
for c in range(r):
self.assertAlmostEqual(result[r,c], result[c,r], places=4,
msg="The correlation matrix is not symmetric!")
self.assertAlmostEqual(result[r,r], 1, places=4, msg="Values on the diagonal should be one!")
self.assertAlmostEqual(result[0,1], -0.11756978, places=4,
msg="Incorrect value in position [0,1]!")
self.assertAlmostEqual(result[0,2], 0.87175378, places=4,
msg="Incorrect value in position [0,2]!")
self.assertAlmostEqual(result[0,3], 0.81794113, places=4,
msg="Incorrect value in position [0,3]!")
self.assertAlmostEqual(result[1,2], -0.4284401, places=4,
msg="Incorrect value in position [1,2]!")
self.assertAlmostEqual(result[1,3], -0.36612593, places=4,
msg="Incorrect value in position [1,3]!")
self.assertAlmostEqual(result[2,3], 0.96286543, places=4,
msg="Incorrect value in position [2,3]!")
@points('p03-05.2')
def test_lengths_calls(self):
with patch(patch_name(module_name, "np.corrcoef")) as pcorr:
result = correlations()
pcorr.assert_called()
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -656,753,270,546,592,900
| 33.085366
| 105
| 0.563148
| false
| 3.813097
| true
| false
| false
|
NeoRazorX/ubuntufaq
|
public.py
|
1
|
15820
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of ubuntufaq
# Copyright (C) 2011 Carlos Garcia Gomez neorazorx@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, logging
# cargamos django 1.2
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext.webapp import template
from google.appengine.ext import db, webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users, memcache
from recaptcha.client import captcha
from base import *
from preguntas import *
from enlaces import *
class Portada(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_portada( users.get_current_user() )
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Ubuntu FAQ',
'descripcion': APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'urespuestas': self.sc.get_ultimas_respuestas(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'stats': self.sc.get_stats()
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write( template.render(path, template_values) )
class Populares(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_populares()
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Populares - Ubuntu FAQ',
'descripcion': 'Listado de preguntas y noticias populares de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'stats': self.sc.get_stats(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio
}
path = os.path.join(os.path.dirname(__file__), 'templates/populares.html')
self.response.out.write( template.render(path, template_values) )
class Ayuda(Pagina):
def get(self):
Pagina.get(self)
template_values = {
'titulo': 'Ayuda de Ubuntu FAQ',
'descripcion': u'Sección de ayuda de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'karmalist': memcache.get('pending-users'),
'foco': 'ayuda'
}
path = os.path.join(os.path.dirname(__file__), 'templates/ayuda.html')
self.response.out.write(template.render(path, template_values))
class Nueva_publicacion(Pagina):
def get(self):
Pagina.get(self)
# el captcha
if users.get_current_user():
chtml = ''
else:
chtml = captcha.displayhtml(
public_key = RECAPTCHA_PUBLIC_KEY,
use_ssl = False,
error = None)
if self.request.get('tipo') == 'pregunta':
foco = 'pregunta'
elif self.request.get('tipo') == 'enlace':
foco = 'enlace'
else:
foco = 'pensamiento'
template_values = {
'titulo': 'Publicar...',
'descripcion': u'Formulario de publicación de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'captcha': chtml,
'tipo': self.request.get('tipo'),
'contenido': self.request.get('contenido'),
'url2': self.request.get('url'),
'foco': foco
}
path = os.path.join(os.path.dirname(__file__), 'templates/nueva.html')
self.response.out.write(template.render(path, template_values))
class Pagina_buscar(Pagina):
def get(self, tag=None):
Pagina.get(self)
# para corregir fallos de codificación en el tag
if isinstance(tag, str):
tag = unicode( urllib.unquote(tag), 'utf-8')
else:
tag = unicode( urllib.unquote(tag) )
template_values = {
'titulo': 'Ubuntu FAQ: ' + tag,
'descripcion': u'Páginas relacionadas con ' + tag,
'tag': tag,
'tags': 'problema, duda, ayuda, ' + tag,
'relacionadas': self.sc.paginas_relacionadas(tag, True),
'alltags': self.sc.get_alltags(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
def post(self, ntag=None):
Pagina.get(self)
query = urllib.unquote( self.request.get('query') )
template_values = {
'titulo': 'Ubuntu FAQ: ' + query,
'descripcion': u'Resultados de: ' + query,
'tag': query,
'buscando': True,
'tags': 'problema, duda, ayuda, ' + query,
'relacionadas': self.sc.buscar( query ),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
class Guardar_voto(Pagina):
def get(self, tipo='x', keye=None, voto='-1'):
try:
if self.request.environ['HTTP_USER_AGENT'].lower().find('googlebot') != -1:
logging.info('Googlebot!')
self.redirect('/')
else:
if tipo == 'r':
elemento = Respuesta.get( keye )
elif tipo == 'c':
elemento = Comentario.get( keye )
else:
elemento = False
if not elemento: # no hay elemento a votar
logging.warning('Elemento no encontrado!')
self.redirect('/error/404')
elif self.request.remote_addr in elemento.ips and self.request.remote_addr != '127.0.0.1': # ya se ha votado desde esta IP
logging.info('Voto ya realizado')
self.redirect( elemento.get_link() )
else: # voto válido
ips = elemento.ips
ips.append( self.request.remote_addr )
elemento.ips = ips
if voto == '0':
elemento.valoracion -= 1
logging.info('Voto negativo')
elif voto == '1':
elemento.valoracion += 1
logging.info('Voto positivo')
else:
logging.info('Voto no válido: ' + str(voto))
elemento.put()
elemento.borrar_cache()
# actualizamos la estadistica
stats = self.sc.get_stats()
if voto in ['0', '1']:
try:
stats['votos'] += 1
except:
stats['votos'] = 1
memcache.replace('stats', stats)
self.redirect( elemento.get_link() )
except:
self.redirect('/error/503')
class Rss(Pagina):
def get(self):
template_values = {
'portada': self.sc.get_portada(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss.html')
self.response.out.write(template.render(path, template_values))
class Rssr(Pagina):
def get(self):
template_values = {
'respuestas': self.sc.get_ultimas_respuestas(),
'comentarios': self.sc.get_ultimos_comentarios(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss-respuestas.html')
self.response.out.write(template.render(path, template_values))
class Sitemap(Pagina):
def get(self):
portada = self.sc.get_portada()
print 'Content-Type: text/xml'
print ''
print '<?xml version="1.0" encoding="UTF-8"?>'
print '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
for p in portada:
print '<url><loc>' + p['link'] + '</loc><lastmod>' + str(p['fecha']).split(' ')[0] + '</lastmod><changefreq>always</changefreq><priority>0.9</priority></url>'
print '</urlset>'
class Perror(Pagina):
def get(self, cerror='404'):
Pagina.get(self)
derror = {
'403': 'Permiso denegado',
'403c': 'Permiso denegado - error en el captcha',
'404': u'Página no encontrada en Ubuntu FAQ',
'503': 'Error en Ubuntu FAQ',
'606': 'Idiota detectado'
}
merror = {
'403': '403 - Permiso denegado',
'403c': u'<img src="/img/fuuu_face.png" alt="fuuu"/><br/><br/>403 - Permiso denegado: debes repetir el captcha.<br/>Evita los captchas iniciando sesión.',
'404': u'404 - Página no encontrada en Ubuntu FAQ',
'503': '<img src="/img/fuuu_face.png" alt="explosión"/><br/><br/>503 - Error en Ubuntu FAQ,<br/>consulta el estado en: http://code.google.com/status/appengine',
'606': u'<img src="/img/troll_face.png" alt="troll"/><br/><br/>606 - ¿Por qué no pruebas a escribir algo diferente?'
}
if cerror == '503':
logging.error( '503' )
else:
logging.warning( cerror )
template_values = {
'titulo': str(cerror) + ' - Ubuntu FAQ',
'descripcion': derror.get(cerror, 'Error desconocido'),
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario': self.formulario,
'error': merror.get(cerror, 'Error desconocido'),
'cerror': cerror,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write(template.render(path, template_values))
def main():
application = webapp.WSGIApplication([('/', Portada),
('/inicio', Todas_preguntas),
('/preguntas', Todas_preguntas),
(r'/preguntas/(.*)', Todas_preguntas),
('/populares', Populares),
('/sin-solucionar', Sin_solucionar),
('/actualidad', Actualidad),
(r'/actualidad/(.*)', Actualidad),
(r'/p/(.*)', Redir_pregunta),
(r'/question/(.*)', Detalle_pregunta),
('/nueva', Nueva_publicacion),
('/add_p', Nueva_pregunta),
('/mod_p', Detalle_pregunta),
('/del_p', Borrar_pregunta),
('/add_r', Responder),
('/mod_r', Modificar_respuesta),
('/del_r', Borrar_respuesta),
(r'/e/(.*)', Acceder_enlace),
(r'/de/(.*)', Redir_enlace),
(r'/story/(.*)', Detalle_enlace),
('/add_e', Actualidad),
('/mod_e', Detalle_enlace),
('/hun_e', Hundir_enlace),
('/del_e', Borrar_enlace),
('/add_c', Comentar),
('/mod_c', Modificar_comentario),
('/del_c', Borrar_comentario),
('/ayuda', Ayuda),
(r'/search/(.*)', Pagina_buscar),
(r'/votar/(.*)/(.*)/(.*)', Guardar_voto),
('/rss', Rss),
('/rss-respuestas', Rssr),
('/sitemap', Sitemap),
('/sitemap.xml', Sitemap),
(r'/error/(.*)', Perror),
('/.*', Perror),
],
debug=DEBUG_FLAG)
webapp.template.register_template_library('filters.filtros_django')
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
agpl-3.0
| -7,409,657,376,595,017,000
| 43.784703
| 179
| 0.49864
| false
| 3.704077
| false
| false
| false
|
DataDog/integrations-core
|
openstack_controller/tests/common.py
|
1
|
14615
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import datetime
import os
CHECK_NAME = 'openstack'
FIXTURES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures')
ALL_IDS = ['server-1', 'server-2', 'other-1', 'other-2']
EXCLUDED_NETWORK_IDS = ['server-1', 'other-.*']
EXCLUDED_SERVER_IDS = ['server-2', 'other-.*']
FILTERED_NETWORK_ID = 'server-2'
FILTERED_SERVER_ID = 'server-1'
FILTERED_BY_PROJ_SERVER_ID = ['server-1', 'server-2']
CONFIG_FILE_INSTANCE = {
'name': 'test_name',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
'openstack_config_file_path': os.path.abspath('./tests/fixtures/openstack_config.yaml'),
'openstack_cloud_name': 'test_cloud',
}
KEYSTONE_INSTANCE = {
'name': 'test_name',
'keystone_server_url': 'http://10.0.2.15:5000',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
}
MOCK_CONFIG = {'init_config': {}, 'instances': [KEYSTONE_INSTANCE]}
EXAMPLE_AUTH_RESPONSE = {
u'token': {
u'methods': [u'password'],
u'roles': [
{u'id': u'f20c215f5a4d47b7a6e510bc65485ced', u'name': u'datadog_monitoring'},
{u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'},
],
u'expires_at': u'2015-11-02T15: 57: 43.911674Z',
u'project': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'0850707581fe4d738221a72db0182876',
u'name': u'admin',
},
u'catalog': [
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'354e35ed19774e398f80dc2a90d07f4b',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'36e8e2bf24384105b9d56a65b0900172',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'de93edcbf7f9446286687ec68423c36f',
},
],
u'type': u'compute',
u'id': u'2023bd4f451849ba8abeaaf283cdde4f',
u'name': u'nova',
},
{
u'endpoints': [
{
u'url': u'http://10.0.3.111:8776/v1/***************************4bfc1',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************2452f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************8239f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************7caa1',
},
],
u'type': u'volume',
u'id': u'***************************e7e16',
u'name': u'cinder',
},
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'7c1e318d8f7f42029fcb591598df2ef5',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'afcc88b1572f48a38bb393305dc2b584',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'd9730dbdc07844d785913219da64a197',
},
],
u'type': u'network',
u'id': u'21ad241f26194bccb7d2e49ee033d5a2',
u'name': u'neutron',
},
],
u'extras': {},
u'user': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'5f10e63fbd6b411186e561dc62a9a675',
u'name': u'datadog',
},
u'audit_ids': [u'OMQQg9g3QmmxRHwKrfWxyQ'],
u'issued_at': u'2015-11-02T14: 57: 43.911697Z',
}
}
EXAMPLE_PROJECTS_RESPONSE = {
"projects": [
{
"domain_id": "1789d1",
"enabled": True,
"id": "263fd9",
"links": {"self": "https://example.com/identity/v3/projects/263fd9"},
"name": "Test Group",
}
],
"links": {"self": "https://example.com/identity/v3/auth/projects", "previous": None, "next": None},
}
# .. server/network
SERVERS_CACHE_MOCK = {
'servers': {
"server-1": {"id": "server-1", "name": "server-name-1", "status": "ACTIVE", "project_name": "testproj"},
"server-2": {"id": "server-2", "name": "server-name-2", "status": "ACTIVE", "project_name": "testproj"},
"other-1": {"id": "other-1", "name": "server-name-other-1", "status": "ACTIVE", "project_name": "blacklist_1"},
"other-2": {"id": "other-2", "name": "server-name-other-2", "status": "ACTIVE", "project_name": "blacklist_2"},
},
'change_since': datetime.datetime.utcnow().isoformat(),
}
EMPTY_NOVA_SERVERS = []
# One example from MOCK_NOVA_SERVERS to emulate pagination
MOCK_NOVA_SERVERS_PAGINATED = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
}
]
# Example response from - https://developer.openstack.org/api-ref/compute/#list-servers-detailed
# ID and server-name values have been changed for test readability
MOCK_NOVA_SERVERS = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "DELETED",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-2",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server_newly_added",
"metadata": {"My Server Name": "Apache1"},
"name": "newly_added_server",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
]
EXAMPLE_GET_FLAVORS_DETAIL_RETURN_VALUE = [
{'id': u'10', 'disk': 10, 'vcpus': 2, 'ram': 1024, 'OS-FLV-EXT-DATA:ephemeral': 0, 'swap': 0},
{
'id': u'625c2e4b-0a1f-4236-bb67-5ceee1a766e5',
'disk': 48,
'vcpus': 8,
'ram': 5934,
'OS-FLV-EXT-DATA:ephemeral': 0,
'swap': 0,
},
]
EXAMPLE_GET_OS_AGGREGATES_RETURN_VALUE = [{'hosts': ["compute"], 'name': "name", 'availability_zone': "london"}]
EXAMPLE_GET_OS_HYPERVISORS_RETURN_VALUE = [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": ["pge", "clflush"],
"topology": {"cores": 1, "threads": 1, "sockets": 4},
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host1",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 2,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {"host": "host1", "id": 7, "disabled_reason": None},
"vcpus": 2,
"vcpus_used": 0,
}
]
EXAMPLE_GET_PROJECT_LIMITS_RETURN_VALUE = {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 1,
"totalServerGroupsUsed": 0,
}
EXAMPLE_GET_NETWORKS_RETURN_VALUE = [
{
'id': u'2755452c-4fe8-4ba1-9b26-8898665b0958',
'name': u'net2',
'tenant_id': u'680031a39ce040e1b81289ea8c73fb11',
'admin_state_up': True,
}
]
DEFAULT_METRICS = [
'openstack.controller',
'openstack.nova.current_workload',
'openstack.nova.disk_available_least',
'openstack.nova.free_disk_gb',
'openstack.nova.free_ram_mb',
'openstack.nova.hypervisor_load.1',
'openstack.nova.hypervisor_load.15',
'openstack.nova.hypervisor_load.5',
'openstack.nova.limits.max_image_meta',
'openstack.nova.limits.max_personality',
'openstack.nova.limits.max_personality_size',
'openstack.nova.limits.max_security_group_rules',
'openstack.nova.limits.max_security_groups',
'openstack.nova.limits.max_server_meta',
'openstack.nova.limits.max_total_cores',
'openstack.nova.limits.max_total_floating_ips',
'openstack.nova.limits.max_total_instances',
'openstack.nova.limits.max_total_keypairs',
'openstack.nova.limits.max_total_ram_size',
'openstack.nova.limits.total_cores_used',
'openstack.nova.limits.total_floating_ips_used',
'openstack.nova.limits.total_instances_used',
'openstack.nova.limits.total_ram_used',
'openstack.nova.limits.total_security_groups_used',
'openstack.nova.local_gb',
'openstack.nova.local_gb_used',
'openstack.nova.memory_mb',
'openstack.nova.memory_mb_used',
'openstack.nova.running_vms',
'openstack.nova.vcpus',
'openstack.nova.vcpus_used',
]
|
bsd-3-clause
| 3,778,650,173,125,150,700
| 37.159269
| 119
| 0.515156
| false
| 3.039725
| true
| false
| false
|
asimshankar/tensorflow
|
tensorflow/python/saved_model/save.py
|
1
|
34278
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a SavedModel from a Checkpointable Python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import saved_object_graph_pb2
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
def _find_function_to_export(root):
"""Iterate over `root`'s attributes, finding traced functions."""
exported_function = None
previous_attribute_name = None
for attribute_name in dir(root):
attribute_value = getattr(root, attribute_name, None)
if isinstance(attribute_value, def_function.PolymorphicFunction):
if exported_function is not None:
raise ValueError(
("Exporting an object with no "
"tf.saved_model.save(..., signatures=...) "
"argument specified, and with more than one "
"@tf.function-decorated method attached to it: {}. The signature "
"keys for these functions are ambiguous. Specify signature "
"functions explicitly.").format(
[previous_attribute_name, attribute_name]))
exported_function = attribute_value
previous_attribute_name = attribute_name
if exported_function is None:
exported_function = getattr(root, DEFAULT_SIGNATURE_ATTR, None)
if exported_function is None:
raise ValueError(
("Exporting an object with no tf.saved_model.save(..., signatures=...) "
"argument specified, and with no @tf.function-decorated methods "
"attached to it. In the future this will be a supported use-case for "
"Python re-import, but at the moment saving a SavedModel without "
"signatures does not make sense, as the only consumers will expect "
"signatures. Either decorate a method or specify a signature function "
"explicitly."))
return exported_function
def _canonicalize_signatures(signatures):
"""Converts `signatures` into a dictionary of concrete functions."""
if not isinstance(signatures, collections.Mapping):
signatures = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
concrete_signatures = {}
for serving_key, signature_function in signatures.items():
if isinstance(signature_function, (defun.PolymorphicFunction,
def_function.PolymorphicFunction)):
input_signature = signature_function._input_signature # pylint: disable=protected-access
if input_signature is None:
raise ValueError(
("Unable to use the function {} as a signature directly. Functions "
"used to generate serving signatures must either have an "
"`input_signature=` specified when constructed, or must be "
"converted to concrete functions using "
"`f.get_concrete_function(...)`.").format(signature_function))
signature_function = signature_function.get_concrete_function()
elif not isinstance(signature_function, defun.Function):
raise ValueError(
("Expected a TensorFlow function to generate a signature for, but "
"got {}. Python functions may be decorated with "
"`@tf.function(input_signature=...)` and passed as signatures "
"directly, or created without a signature using `@tf.function` "
"and then converted to a concrete TensorFlow function using "
"`f.get_concrete_function(...)`.").format(signature_function))
concrete_signatures[serving_key] = signature_function
return concrete_signatures
def _is_flat(sequence):
sequence_flat = nest.flatten(sequence)
try:
nest.assert_same_structure(sequence_flat, sequence)
return True
except ValueError:
return False
except TypeError:
return False
def _normalize_outputs(outputs, function_name, signature_key):
"""Construct an output dictionary from unnormalized function outputs."""
if isinstance(outputs, collections.Mapping):
for key, value in outputs.items():
if not isinstance(value, ops.Tensor):
raise ValueError(
("Got a dictionary containing non-Tensor value {} for key {} "
"in the output of the function {} used to generate a SavedModel "
"signature. Dictionaries outputs for functions used as signatures "
"should have one Tensor output per string key.")
.format(value, key, compat.as_str_any(function_name)))
return outputs
else:
original_outputs = outputs
if not isinstance(outputs, collections.Sequence):
outputs = [outputs]
if not _is_flat(outputs):
raise ValueError(
("Got non-flat outputs '{}' from '{}' for SavedModel "
"signature '{}'. Signatures have one Tensor per output, so "
"to have predictable names Python functions used to generate "
"these signatures should avoid outputting Tensors in nested "
"structures.")
.format(original_outputs, function_name, signature_key))
return {("output_{}".format(output_index)): output
for output_index, output
in enumerate(outputs)}
def _tensor_dict_to_tensorinfo(tensor_dict):
return {key: utils_impl.build_tensor_info(value)
for key, value in tensor_dict.items()}
def _map_captures_to_created_tensors(
original_captures, resource_map):
"""Maps eager tensors captured by a function to Graph resources for export.
Args:
original_captures: A dictionary mapping from tensors captured by the
function to interior placeholders for those tensors (inside the function
body).
resource_map: A dictionary mapping from resource tensors owned by the eager
context to resource tensors in the exported graph.
Returns:
A list of stand-in tensors which belong to the exported graph, corresponding
to the function's captures.
Raises:
AssertionError: If the function references a resource which is not part of
`resource_map`.
"""
export_captures = []
for exterior, interior in original_captures.items():
mapped_resource = resource_map.get(exterior, None)
if mapped_resource is None:
if exterior.dtype == dtypes.resource:
raise AssertionError(
("Tried to export a function which references untracked stateful "
"object {}. Stateful TensorFlow objects (e.g. tf.Variable) must "
"be tracked by the main object. Objects may be tracked by "
"assigning them to an attribute of another tracked object, or to "
"an attribute of the main object directly.")
.format(interior))
else:
# This is a captured Tensor, but it's not a resource. We'll just add it
# to the graph as a constant.
mapped_resource = constant_op.constant(exterior.numpy())
export_captures.append(mapped_resource)
return export_captures
def _map_function_arguments_to_created_inputs(
function_arguments, signature_key, function_name):
"""Creates exterior placeholders in the exported graph for function arguments.
Functions have two types of inputs: tensors captured from the outside (eager)
context, and arguments to the function which we expect to receive from the
user at each call. `_map_captures_to_created_tensors` replaces
captured tensors with stand-ins (typically these are resource dtype tensors
associated with variables). `_map_function_inputs_to_created_inputs` runs over
every argument, creating a new placeholder for each which will belong to the
exported graph rather than the function body.
Args:
function_arguments: A list of argument placeholders in the function body.
signature_key: The name of the signature being exported, for error messages.
function_name: The name of the function, for error messages.
Returns:
A tuple of (mapped_inputs, exterior_placeholders)
mapped_inputs: A list with entries corresponding to `function_arguments`
containing all of the inputs of the function gathered from the exported
graph (both captured resources and arguments).
exterior_argument_placeholders: A dictionary mapping from argument names
to placeholders in the exported graph, containing the explicit arguments
to the function which a user is expected to provide.
Raises:
ValueError: If argument names are not unique.
"""
# `exterior_argument_placeholders` holds placeholders which are outside the
# function body, directly contained in a MetaGraph of the SavedModel. The
# function body itself contains nearly identical placeholders used when
# running the function, but these exterior placeholders allow Session-based
# APIs to call the function using feeds and fetches which name Tensors in the
# MetaGraph.
exterior_argument_placeholders = {}
mapped_inputs = []
for placeholder in function_arguments:
# `export_captures` contains an exhaustive set of captures, so if we don't
# find the input there then we now know we have an argument.
user_input_name = compat.as_str_any(
placeholder.op.get_attr("_user_specified_name"))
# If the internal placeholders for a function have names which were
# uniquified by TensorFlow, then a single user-specified argument name
# must refer to multiple Tensors. The resulting signatures would be
# confusing to call. Instead, we throw an exception telling the user to
# specify explicit names.
if user_input_name != placeholder.op.name:
# This should be unreachable, since concrete functions may not be
# generated with non-unique argument names.
raise ValueError(
("Got non-flat/non-unique argument names for SavedModel "
"signature '{}': more than one argument to '{}' was named '{}'. "
"Signatures have one Tensor per named input, so to have "
"predictable names Python functions used to generate these "
"signatures should avoid *args and Tensors in nested "
"structures unless unique names are specified for each. Use "
"tf.TensorSpec(..., name=...) to provide a name for a Tensor "
"input.")
.format(signature_key, compat.as_str_any(function_name),
user_input_name))
arg_placeholder = array_ops.placeholder(
shape=placeholder.shape,
dtype=placeholder.dtype,
name="{}_{}".format(signature_key, user_input_name))
exterior_argument_placeholders[user_input_name] = arg_placeholder
mapped_inputs.append(arg_placeholder)
return mapped_inputs, exterior_argument_placeholders
def _call_function_with_mapped_captures(function, args, resource_map):
"""Calls `function` in the exported graph, using mapped resource captures."""
export_captures = _map_captures_to_created_tensors(
function.graph.captures, resource_map)
mapped_inputs = args + export_captures
# Calls the function quite directly, since we have new captured resource
# tensors we need to feed in which weren't part of the original function
# definition.
# pylint: disable=protected-access
outputs = function._build_call_outputs(
function._inference_function.call(context.context(), mapped_inputs))
return outputs
def _generate_signatures(signature_functions, resource_map):
"""Validates and calls `signature_functions` in the default graph.
Args:
signature_functions: A dictionary mapping string keys to concrete TensorFlow
functions (e.g. from `_canonicalize_signatures`) which will be used to
generate SignatureDefs.
resource_map: A dictionary mapping from resource tensors in the eager
context to resource tensors in the Graph being exported. This dictionary
is used to re-bind resources captured by functions to tensors which will
exist in the SavedModel.
Returns:
Each function in the `signature_functions` dictionary is called with
placeholder Tensors, generating a function call operation and output
Tensors. The placeholder Tensors, the function call operation, and the
output Tensors from the function call are part of the default Graph.
This function then returns a dictionary with the same structure as
`signature_functions`, with the concrete functions replaced by SignatureDefs
implicitly containing information about how to call each function from a
TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference
the generated placeholders and Tensor outputs by name.
The caller is expected to include the default Graph set while calling this
function as a MetaGraph in a SavedModel, including the returned
SignatureDefs as part of that MetaGraph.
"""
signatures = {}
for signature_key, function in sorted(signature_functions.items()):
if function.graph.captures:
argument_inputs = function.graph.inputs[:-len(function.graph.captures)]
else:
argument_inputs = function.graph.inputs
mapped_inputs, exterior_argument_placeholders = (
_map_function_arguments_to_created_inputs(
argument_inputs, signature_key, function.name))
outputs = _normalize_outputs(
_call_function_with_mapped_captures(
function, mapped_inputs, resource_map),
function.name, signature_key)
signatures[signature_key] = signature_def_utils.build_signature_def(
_tensor_dict_to_tensorinfo(exterior_argument_placeholders),
_tensor_dict_to_tensorinfo(outputs))
return signatures
def _trace_resource_initializers(accessible_objects):
"""Create concrete functions from `TrackableResource` objects."""
resource_initializers = []
def _wrap_initializer(obj):
obj.initialize()
return constant_op.constant(1.) # Dummy control output
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
resource_initializers.append(def_function.function(
functools.partial(_wrap_initializer, obj),
# All inputs are captures.
input_signature=[]).get_concrete_function())
return resource_initializers
_AssetInfo = collections.namedtuple(
"_AssetInfo", [
# List of AssetFileDef protocol buffers
"asset_defs",
# Map from asset variable resource Tensors to their init ops
"asset_initializers_by_resource",
# Map from base asset filenames to full paths
"asset_filename_map",
# Map from TrackableAsset to index of corresponding AssetFileDef
"asset_index"])
def _process_asset(trackable_asset, asset_info, resource_map):
"""Add `trackable_asset` to `asset_info` and `resource_map`."""
original_variable = trackable_asset.asset_path
with context.eager_mode():
original_path = original_variable.numpy()
path = builder_impl.get_asset_filename_to_add(
asset_filepath=original_path,
asset_filename_map=asset_info.asset_filename_map)
# TODO(andresp): Instead of mapping 1-1 between trackable asset
# and asset in the graph def consider deduping the assets that
# point to the same file.
asset_path_initializer = array_ops.placeholder(
shape=original_variable.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
asset_info.asset_filename_map[path] = original_path
asset_def = meta_graph_pb2.AssetFileDef()
asset_def.filename = path
asset_def.tensor_info.name = asset_path_initializer.name
asset_info.asset_defs.append(asset_def)
asset_info.asset_initializers_by_resource[original_variable.handle] = (
asset_variable.initializer)
asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1
resource_map[original_variable.handle] = asset_variable.handle
def _map_resources(accessible_objects):
"""Makes new resource handle ops corresponding to existing resource tensors.
Creates resource handle ops in the current default graph, whereas
`accessible_objects` will be from an eager context. Resource mapping adds
resource handle ops to the main GraphDef of a SavedModel, which allows the C++
loader API to interact with variables.
Args:
accessible_objects: A list of objects, some of which may contain resources,
to create replacements for.
Returns:
A tuple of (object_map, resource_map, asset_info):
object_map: A dictionary mapping from object in `accessible_objects` to
replacement objects created to hold the new resource tensors.
resource_map: A dictionary mapping from resource tensors extracted from
`accessible_objects` to newly created resource tensors.
asset_info: An _AssetInfo tuple describing external assets referenced from
accessible_objects.
"""
# TODO(allenl): Handle MirroredVariables and other types of variables which
# may need special casing.
object_map = {}
resource_map = {}
asset_info = _AssetInfo(
asset_defs=[],
asset_initializers_by_resource={},
asset_filename_map={},
asset_index={})
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
new_resource = obj.create_resource()
resource_map[obj.resource_handle] = new_resource
elif resource_variable_ops.is_resource_variable(obj):
new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj)
object_map[obj] = new_variable
resource_map[obj.handle] = new_variable.handle
elif isinstance(obj, tracking.TrackableAsset):
_process_asset(obj, asset_info, resource_map)
return object_map, resource_map, asset_info
def _fill_meta_graph_def(meta_graph_def, obj, signature_functions,
object_saver):
"""Generates a MetaGraph which calls `signature_functions`.
Args:
meta_graph_def: The MetaGraphDef proto to fill.
obj: The checkpointable object being exported.
signature_functions: A dictionary mapping signature keys to concrete
functions containing signatures to add to the MetaGraph.
object_saver: A CheckpointableSaver to add to the MetaGraph.
Returns:
An _AssetInfo, which contains information to help creating the SavedModel.
"""
signatures = {}
# List objects from the eager context to make sure Optimizers give us the
# right Graph-dependent variables.
accessible_objects = util.list_objects(obj)
resource_initializer_functions = _trace_resource_initializers(
accessible_objects)
exported_graph = ops.Graph()
resource_initializer_ops = []
with exported_graph.as_default():
object_map, resource_map, asset_info = _map_resources(accessible_objects)
for resource_initializer_function in resource_initializer_functions:
asset_dependencies = []
for capture in resource_initializer_function.graph.external_captures:
asset_initializer = asset_info.asset_initializers_by_resource.get(
capture, None)
if asset_initializer is not None:
asset_dependencies.append(asset_initializer)
with ops.control_dependencies(asset_dependencies):
resource_initializer_ops.append(
_call_function_with_mapped_captures(
resource_initializer_function, [], resource_map))
with ops.control_dependencies(resource_initializer_ops):
init_op = control_flow_ops.no_op()
# Add the same op to the main_op collection and to the init_op
# signature. The collection is for compatibility with older loader APIs;
# only one will be executed.
meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(
init_op.name)
meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(
signature_def_utils.op_signature_def(
init_op, constants.INIT_OP_SIGNATURE_KEY))
# Saving an object-based checkpoint again gathers variables. We need to do the
# gathering from the eager context so Optimizers save the right set of
# variables, but want any operations associated with the save/restore to be in
# the exported graph (thus the `to_graph` argument).
saver = object_saver.freeze(object_map=object_map, to_graph=exported_graph)
# We must resolve the concrete function to add to MetaGraph while in eager
# mode.
concrete_functions = []
for accessible_object in accessible_objects:
for function in function_serialization.list_all_polymorphic_functions(
accessible_object).values():
concrete_functions.extend(
function_serialization.list_all_concrete_functions(function))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)
for concrete_function in concrete_functions:
concrete_function.add_to_graph()
saver_def = saver.to_proto()
meta_graph_def.saver_def.CopyFrom(saver_def)
graph_def = exported_graph.as_graph_def(add_shapes=True)
# Clean reference cycles so repeated export()s don't make work for the garbage
# collector.
ops.dismantle_graph(exported_graph)
meta_graph_def.graph_def.CopyFrom(graph_def)
meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)
meta_graph_def.asset_file_def.extend(asset_info.asset_defs)
for signature_key, signature in signatures.items():
meta_graph_def.signature_def[signature_key].CopyFrom(signature)
meta_graph.strip_graph_default_valued_attrs(meta_graph_def)
return asset_info
def _write_object_graph(root, export_dir, asset_file_def_index):
"""Save a SavedObjectGraph proto for `root`."""
# SavedObjectGraph is similar to the CheckpointableObjectGraph proto in the
# checkpoint. It will eventually go into the SavedModel.
proto = saved_object_graph_pb2.SavedObjectGraph()
checkpointable_objects, node_ids, slot_variables = util.find_objects(root)
util.fill_object_graph_proto(checkpointable_objects, node_ids, slot_variables,
proto)
for obj, obj_proto in zip(checkpointable_objects, proto.nodes):
_write_object_proto(obj, obj_proto, asset_file_def_index)
function_serialization.add_polymorphic_functions_to_object_graph_proto(
checkpointable_objects, proto)
extra_asset_dir = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.EXTRA_ASSETS_DIRECTORY))
file_io.recursive_create_dir(extra_asset_dir)
object_graph_filename = os.path.join(
extra_asset_dir, compat.as_bytes("object_graph.pb"))
file_io.write_string_to_file(object_graph_filename, proto.SerializeToString())
def _write_object_proto(obj, proto, asset_file_def_index):
"""Saves an object into SavedObject proto."""
if isinstance(obj, tracking.TrackableAsset):
proto.asset.SetInParent()
proto.asset.asset_file_def_index = asset_file_def_index[obj]
else:
proto.user_object.SetInParent()
@tf_export("saved_model.save", v1=["saved_model.experimental.save"])
def save(obj, export_dir, signatures=None):
# pylint: disable=line-too-long
"""Exports the Checkpointable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).
Example usage:
```python
class Adder(tf.train.Checkpoint):
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def add(self, x):
return x + x + 1.
to_export = Adder()
tf.saved_model.save(to_export, '/tmp/adder')
```
The resulting SavedModel is then servable with an input named "x", its value
having any shape and dtype float32.
The optional `signatures` argument controls which methods in `obj` will be
available to programs which consume `SavedModel`s, for example serving
APIs. Python functions may be decorated with
`@tf.function(input_signature=...)` and passed as signatures directly, or
lazily with a call to `get_concrete_function` on the method decorated with
`@tf.function`.
If the `signatures` argument is omitted, `obj` will be searched for
`@tf.function`-decorated methods. If exactly one `@tf.function` is found, that
method will be used as the default signature for the SavedModel. This behavior
is expected to change in the future, when a corresponding
`tf.saved_model.load` symbol is added. At that point signatures will be
completely optional, and any `@tf.function` attached to `obj` or its
dependencies will be exported for use with `load`.
When invoking a signature in an exported SavedModel, `Tensor` arguments are
identified by name. These names will come from the Python function's argument
names by default. They may be overridden by specifying a `name=...` argument
in the corresponding `tf.TensorSpec` object. Explicit naming is required if
multiple `Tensor`s are passed through a single argument to the Python
function.
The outputs of functions used as `signatures` must either be flat lists, in
which case outputs will be numbered, or a dictionary mapping string keys to
`Tensor`, in which case the keys will be used to name outputs.
Since `tf.keras.Model` objects are also Checkpointable, this function can be
used to export Keras models. For example, exporting with a signature
specified:
```python
class Model(tf.keras.Model):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def serve(self, serialized):
...
m = Model()
tf.saved_model.save(m, '/tmp/saved_model/')
```
Exporting from a function without a fixed signature:
```python
class Model(tf.keras.Model):
@tf.function
def call(self, x):
...
m = Model()
tf.saved_model.save(
m, '/tmp/saved_model/',
signatures=m.call.get_concrete_function(
tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name="inp")))
```
`tf.keras.Model` instances constructed from inputs and outputs already have a
signature and so do not require a `@tf.function` decorator or a `signatures`
argument. If neither are specified, the model's forward pass is exported.
```python
x = input_layer.Input((4,), name="x")
y = core.Dense(5, name="out")(x)
model = training.Model(x, y)
tf.saved_model.save(model, '/tmp/saved_model/')
# The exported SavedModel takes "x" with shape [None, 4] and returns "out"
# with shape [None, 5]
```
Variables must be tracked by assigning them to an attribute of a tracked
object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers
from `tf.keras.layers`, optimizers from `tf.train`) track their variables
automatically. This is the same tracking scheme that `tf.train.Checkpoint`
uses, and an exported `Checkpoint` object may be restored as a training
checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's
"variables/" subdirectory. Currently variables are the only stateful objects
supported by `tf.saved_model.save`, but others (e.g. tables) will be supported
in the future.
`tf.function` does not hard-code device annotations from outside the function
body, instead using the calling context's device. This means for example that
exporting a model which runs on a GPU and serving it on a CPU will generally
work, with some exceptions. `tf.device` annotations inside the body of the
function will be hard-coded in the exported model; this type of annotation is
discouraged. Device-specific operations, e.g. with "cuDNN" in the name or with
device-specific layouts, may cause issues. Currently a `DistributionStrategy`
is another exception: active distribution strategies will cause device
placements to be hard-coded in a function. Exporting a single-device
computation and importing under a `DistributionStrategy` is not currently
supported, but may be in the future.
SavedModels exported with `tf.saved_model.save` [strip default-valued
attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes)
automatically, which removes one source of incompatibilities when the consumer
of a SavedModel is running an older TensorFlow version than the
producer. There are however other sources of incompatibilities which are not
handled automatically, such as when the exported model contains operations
which the consumer does not have definitions for.
The current implementation of `tf.saved_model.save` targets serving use-cases,
but omits information which will be necessary for the planned future
implementation of `tf.saved_model.load`. Exported models using the current
`save` implementation, and other existing SavedModels, will not be compatible
with `tf.saved_model.load` when it is implemented. Further, `save` will in the
future attempt to export `@tf.function`-decorated methods which it does not
currently inspect, so some objects which are exportable today will raise
exceptions on export in the future (e.g. due to complex/non-serializable
default arguments). Such backwards-incompatible API changes are expected only
prior to the TensorFlow 2.0 release.
Args:
obj: A checkpointable object to export.
export_dir: A directory in which to write the SavedModel.
signatures: Optional, either a `tf.function` with an input signature
specified or the result of `f.get_concrete_function` on a
`@tf.function`-decorated function `f`, in which case `f` will be used to
generate a signature for the SavedModel under the default serving
signature key. `signatures` may also be a dictionary, in which case it
maps from signature keys to either `tf.function` instances with input
signatures or concrete functions. The keys of such a dictionary may be
arbitrary strings, but will typically be from the
`tf.saved_model.signature_constants` module.
Raises:
ValueError: If `obj` is not checkpointable.
@compatibility(eager)
Not supported when graph building. From TensorFlow 1.x,
`tf.enable_eager_execution()` must run first. May not be called from within a
function body.
@end_compatibility
"""
if not context.executing_eagerly():
with ops.init_scope():
if context.executing_eagerly():
raise AssertionError(
"tf.saved_model.save is not supported inside a traced "
"@tf.function. Move the call to the outer eagerly-executed "
"context.")
else:
raise AssertionError(
"tf.saved_model.save is not supported when graph building. "
"tf.enable_eager_execution() must run first when calling it from "
"TensorFlow 1.x.")
# pylint: enable=line-too-long
if not isinstance(obj, base.CheckpointableBase):
raise ValueError(
"Expected a Checkpointable object for export, got {}.".format(obj))
if signatures is None:
# Note that we run this before saving the checkpoint, since looping over
# attributes may have the side effect of creating variables in some cases.
signatures = _find_function_to_export(obj)
signatures = _canonicalize_signatures(signatures)
# TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x
# compatible (no sessions) and share it with this export API rather than
# making a SavedModel proto and writing it directly.
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
object_saver = util.CheckpointableSaver(obj)
asset_info = _fill_meta_graph_def(
meta_graph_def, obj, signatures, object_saver)
saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
# So far we've just been generating protocol buffers with no I/O. Now we write
# the checkpoint, copy assets into the assets directory, and write out the
# SavedModel proto itself.
utils_impl.get_or_create_variables_dir(export_dir)
object_saver.save(utils_impl.get_variables_path(export_dir))
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,
export_dir)
path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, saved_model.SerializeToString())
_write_object_graph(obj, export_dir, asset_info.asset_index)
|
apache-2.0
| -1,015,136,350,667,706,800
| 45.010738
| 162
| 0.722212
| false
| 4.222989
| false
| false
| false
|
stephane-caron/ijhr-2016
|
pymanoid/cone.py
|
1
|
2305
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Stephane Caron <stephane.caron@normalesup.org>
#
# This file is part of pymanoid.
#
# pymanoid is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
from cdd import Matrix, Polyhedron, RepType
from numpy import array, hstack, zeros
NUMBER_TYPE = 'float' # 'float' or 'fraction'
class ConeException(Exception):
def __init__(self, M):
self.M = M
class NotConeFace(ConeException):
def __str__(self):
return "Matrix is not a cone face"
class NotConeSpan(ConeException):
def __str__(self):
return "Matrix is not a cone span"
def face_of_span(S):
"""
Returns the face matrix S^F of the span matrix S,
that is, a matrix such that
{x = S z, z >= 0} if and only if {S^F x <= 0}.
"""
V = hstack([zeros((S.shape[1], 1)), S.T])
# V-representation: first column is 0 for rays
V_cdd = Matrix(V, number_type=NUMBER_TYPE)
V_cdd.rep_type = RepType.GENERATOR
P = Polyhedron(V_cdd)
H = array(P.get_inequalities())
b, A = H[:, 0], H[:, 1:]
for i in xrange(H.shape[0]):
if b[i] != 0:
raise NotConeSpan(S)
return -A
def span_of_face(F):
"""
Compute the span matrix F^S of the face matrix F,
that is, a matrix such that
{F x <= 0} if and only if {x = F^S z, z >= 0}.
"""
b, A = zeros((F.shape[0], 1)), -F
# H-representation: A x + b >= 0
F_cdd = Matrix(hstack([b, A]), number_type=NUMBER_TYPE)
F_cdd.rep_type = RepType.INEQUALITY
P = Polyhedron(F_cdd)
V = array(P.get_generators())
for i in xrange(V.shape[0]):
if V[i, 0] != 0: # 1 = vertex, 0 = ray
raise NotConeFace(F)
return V[:, 1:]
|
gpl-3.0
| 8,395,834,680,207,245,000
| 25.802326
| 79
| 0.625163
| false
| 3.153215
| false
| false
| false
|
cs98jrb/Trinity
|
mysite/events/forms/booking.py
|
1
|
2961
|
__author__ = 'james'
from django.utils.translation import ugettext as _
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from events.models import Booking
from orders.models import Order, OrderItem
class BookingForm(forms.ModelForm):
# set the css of required fields
required_css_class = 'required'
email = forms.EmailField(
max_length=254,
label="Contact email",
required=True,
help_text="This is required so we can contact you."
)
tandc = forms.BooleanField(
label="Accept terms and conditions",
required=True,
)
def __init__(self, request, *args, **kwargs):
booking = super(BookingForm, self).__init__(*args, **kwargs)
# add label
self.fields['quantity'].label = "Number of people"
try:
if not request.user.is_anonymous():
self.fields['email'].initial = request.user.email
except User.DoesNotExist:
pass
class Meta:
model = Booking
fields = ['email', 'quantity', ]
def save(self, event, price, user, commit=True):
from django.contrib.contenttypes.models import ContentType
#
booking = super(BookingForm, self).save(commit=False)
booking.booked_by = user
booking.event = event
booking.price = price
total_booked = 0
open_order_list = Order.objects.open_order(user=user)
if open_order_list:
order = open_order_list[0]
for item in order.orderitem_set.all():
total_booked += item.content_object.quantity
if not(event.pricing_set.all().filter(online_book=True)
and not event.fully_booked):
raise ValidationError(
_('This event is fully booked'),
code='Fully Booked'
)
commit = False
elif event.num_spaces < (booking.quantity + total_booked):
places = booking.quantity + total_booked
raise ValidationError(
_('Not enough spaces for %(places)s people.'),
code='No Space',
params={'places': places},
)
commit = False
if commit:
booking.save()
# Add to open order
if not open_order_list:
order = Order(ordered_by=user)
order.save()
order_item = OrderItem(
order=order,
description=event.__unicode__(),
value=(price.value*booking.quantity),
vat=price.vat,
content_type=ContentType.objects.get_for_model(booking),
object_id=booking.id
)
order_item.save()
return booking
def clean(self):
return self.cleaned_data
|
gpl-2.0
| -287,810,376,454,806,340
| 28.039216
| 72
| 0.570078
| false
| 4.367257
| false
| false
| false
|
Midnighter/pyorganism
|
setup.py
|
1
|
2511
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
==================
PyOrganism Package
==================
:Authors:
Moritz Emanuel Beber
:Date:
2012-05-22
:Copyright:
Copyright(c) 2012 Jacobs University of Bremen. All rights reserved.
:File:
setup.py
"""
import sys
from os.path import join
from setuptools import (setup, Extension)
try:
from Cython.Distutils import build_ext
except ImportError as err:
sys.exit("Apologies, you need 'Cython' to install 'pyorganism'.")
if __name__ == "__main__":
# continuous
sources = ["continuous_wrapper.pyx", "continuous.c"]
c_path = join("pyorganism", "regulation", "src")
continuous = Extension("pyorganism.regulation.continuous_wrapper",
sources=[join(c_path, src) for src in sources],
include_dirs=[c_path]
)
setup(
name="pyorganism",
version="0.2.5",
license="BSD",
description="analyze organisational principles in living organisms",
author="Moritz Emanuel Beber",
author_email="moritz (dot) beber (at) gmail (dot) com",
url="http://github.com/Midnighter/pyorganism",
zip_safe=False,
install_requires=[
"future",
"networkx",
"numpy",
"pandas"
],
packages=["pyorganism",
"pyorganism.io",
"pyorganism.metabolism",
"pyorganism.regulation",
],
# package_data = {"pyorganism": ["data/*.xml", "data/*.txt", "data/*.tsv"]},
ext_modules=[continuous],
cmdclass={"build_ext": build_ext},
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
|
bsd-3-clause
| 7,473,944,302,607,792,000
| 29.621951
| 94
| 0.562724
| false
| 3.966825
| false
| false
| false
|
abice-sbr/adaptsearch
|
blastalign.py
|
1
|
4394
|
import string, re
# Written by Robert Belshaw (School of Biomedical & Healthcare Sciences, University of Plymouth) & Aris Katzourakis (Department of Zoology, University of Oxford)
# For more information and to cite see Belshaw, R & Katzourakis, A (2005) BlastAlign: a program that uses blast to align problematic nucleotide sequences. Bioinformatics 21:122-123.
# Please send any comments to robert.belshaw@plymouth.ac.uk or aris.katzourakis@zoo.ox.ac.uk
file = open('blast_out', 'r')
buffer = file.readlines()
def Calculate_hits():
Number_of_landmarks = len(Permanent_dictionary[KeyList[0]]) # use legth of first entry
counter = 1
while counter < Number_of_landmarks: # Less than because list starts from zero
number_of_hits = 0
for item in KeyList:
list = Permanent_dictionary[item]
landmark = list[counter]
if landmark != '*':
number_of_hits = number_of_hits + 1
List_of_hits.append(number_of_hits)
counter = counter +1
return List_of_hits
def doInsertRoutine(list, value):
no_ast = 0
old_diff = 0
switch = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
new_diff = (item - value)*(item - value)
if item < value:
no_ast = 0
else:
i = list.index(item)
if new_diff > old_diff:
i = i-no_ast
list.insert(i, value)
else:
list.insert(i, value)
switch = 1
break
old_diff = new_diff
if switch == 0:
no_ast = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
no_ast = 0
i = len(list) - no_ast # Finds position before any trailing asterisks
list.insert(i, value)
return list, i
def go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos):
tempKeyList = []
for item in KeyList:
tempKeyList.append(item)
tempKeyList.remove(tempKey)
for item in tempKeyList:
tempList = []
for subitem in Permanent_dictionary[item]:
tempList.append(subitem)
if Library_dictionary.has_key(item):
tempList.insert(LandmarkInsertPos, Library_dictionary[item])
Permanent_dictionary[item] = tempList
else:
tempList.insert(LandmarkInsertPos, '*')
Permanent_dictionary[item] = tempList
def process_previous_block(tempKey, tempValue, Library_dictionary):
landmark = 0
tempList = []
for item in (Permanent_dictionary[tempKey]):
tempList.append(item)
for item in (Permanent_dictionary[tempKey]):
if item != '*':
if (tempValue >= item-30) and (tempValue <= item+30):
landmark = 1
else:
pass
if landmark == 0:
theAnswer = doInsertRoutine(tempList, tempValue)
tempList = theAnswer[0]
LandmarkInsertPos = theAnswer[1]
Permanent_dictionary[tempKey] = tempList
go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos)
def makeOutFile():
theOutFile = open('blast_out_python', 'w')
theOutFile.write('\t\t') # Just to line up entries for ease of viewing
for item in List_of_hits:
theOutFile.write('%s\t' %item)
theOutFile.write('\n')
for item in KeyList:
theOutFile.write('%s\t' %item)
for listItem in Permanent_dictionary[item]:
theOutFile.write('%s\t' %listItem)
theOutFile.write('\n')
Query_dictionary = {}
Library_dictionary = {}
Permanent_dictionary = {}
KeyList = []
list = [0]
List_of_hits = [] # To note whether entries are unique or not
for line in buffer:
if line[0] == '*':
entry = ""
entry = line[1:-1]
Permanent_dictionary[entry] = list
KeyList.append(entry)
n=0
previousKey = "null" # Need in case have identical sequences & then need to avoid unassigned variable
for line in buffer:
tempList = []
if line[0:5] == 'Query':
if n >= 1:
process_previous_block(QueryKey, QueryValue, Library_dictionary)
Library_dictionary = {}
line = string.split(line)
QueryKey = (line[0])[5:]
QueryValue = string.atoi(line[1])
Query_dictionary[QueryKey] = QueryValue
n=n+1
elif line[0:7] == 'Library':
line = string.split(line)
LibraryKey = (line[0])[7:]
LibraryValue = string.atoi(line[1])
if LibraryKey != QueryKey:
if previousKey == LibraryKey:
previousDist = (previousValue-QueryValue)*(previousValue-QueryValue)
currentDist = (LibraryValue-QueryValue)*(LibraryValue-QueryValue)
if currentDist < previousDist:
Library_dictionary[LibraryKey] = LibraryValue
else:
Library_dictionary[LibraryKey] = LibraryValue
previousKey = (line[0])[7:]
previousValue = string.atoi(line[1])
Calculate_hits()
makeOutFile()
|
gpl-3.0
| 5,974,816,698,575,030,000
| 28.891156
| 181
| 0.69117
| false
| 2.913793
| false
| false
| false
|
chrismamil/chowda
|
test/test_chowda.py
|
1
|
2201
|
import unittest
import os
import chowda.parsing as parse
import datetime
import pandas as pd
from chowda.load import load_file
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEST_FILE = "CTL1 wk3 exp1 RAW data.txt"
TEST_1 = os.path.join(DATA_DIR, TEST_FILE)
class TestChowda(unittest.TestCase):
def setup(self):
test_file = os.path.join(DATA_DIR, TEST_FILE)
with open(test_file) as in_handle:
self.in_data = in_handle.readlines()
def test_parse_experiment_time(self):
result = parse.parse_experiment_time(self.in_data[0])
self.assertEquals(result.keys()[0], "Experiment Started")
def test_parse_subject(self):
result = parse.parse_subject(self.in_data[1])
self.assertEquals(result["Subject"], "CNS1")
def test_parse_mass(self):
result = parse.parse_subject_mass(self.in_data[2])
self.assertEquals(result["Subject Mass"], 34.26)
def test_load_file(self):
from chowda.load import load_file
result = load_file(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
def test_get_header(self):
from chowda.load import get_header
result = get_header(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(result[-1].split(",")[0].strip(), '"========"')
def test_get_data(self):
from chowda.load import get_data
result = get_data(TEST_1)
self.assertEquals(result[0].split(",", 1)[0], "Interval")
def test_partition_file(self):
from chowda.load import partition_file
header, data = partition_file(TEST_1)
self.assertEquals(header[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(header[-1].split(",")[0].strip(), '"========"')
self.assertEquals(data[0].split(",", 1)[0], "Interval")
def test_load_dataframe(self):
from chowda.load import load_dataframe
result = load_dataframe(parse.get_data(self.in_data))
self.assertEquals(result["Interval"].ix[0], "001")
|
mit
| -1,276,954,630,114,679,000
| 35.081967
| 73
| 0.613358
| false
| 3.510367
| true
| false
| false
|
CDKGlobal/cd-performance-promotion
|
cd_perf_promotion/engines/comparisonengine.py
|
1
|
19434
|
import json
import operator
class ComparisonEngine:
"""
Queries the performance tools' APIs and determines if the build passes
the target requirements.
"""
def check_health_severity(self, violation):
"""
Fails the build if the defined severity is found in the health rule
violations
Keyword arguments:
violation - dictionary that contains all of the information for a single
violation (as determined by AppDynamics)
"""
# Add the violation to the output file after removing unecessary data
self.output_json["appdynamics"]["healthrule_violations"].append(violation)
# Fail the build
self.output_json["promotion_gates"]["appdynamics_health"] = False
self.build_status_passed = False
def compare_appdynamics(self, healthrule_violations, warning, critical):
"""
Performs the comparison between the defined violation severity settings
and the violations that occurred
Keyword arguments:
healthrule_violations - Dictionary that contains all of the AppDynamics
health violations
warning - Boolean that indicates whether the user thinks
that health rule violations with a status of
"WARNING" are important enough to evaluate
critical - Boolean that indicates whether the user thinks
that health rule violations with a status of
"CRITICAL" are important enough to evaluate
"""
# Set the health to True by default and flip it if necessary
self.output_json["promotion_gates"]["appdynamics_health"] = True
for violation in healthrule_violations:
# Check if the severity settings that we care about exist in the health rule violations
if ((warning == True) and (violation["severity"] == "WARNING")):
self.check_health_severity(violation)
if ((critical == True) and (violation["severity"] == "CRITICAL")):
self.check_health_severity(violation)
def compare_blazemeter(self, metric_title, target_data, metric_data, transaction_index, operator):
"""
Performs the comparison between configuration promotion gates and the
actual blazemeter test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
transaction_index - The index of the transaction in the list of
transactions
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Add the data to the output file
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = False
self.build_status_passed = False
def compare_webpagetest(self, metric_title, target_data, metric_data, run_index, view, operator):
"""
Performs the comparison between configuration promotion gates and the
actual WebPageTest test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
view - Either first_view or repeat_view
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Convert the metric data to an int (WebPageTest's XML output makes everything a string)
metric_data = int(metric_data)
# Add the data to the output file
if (run_index == None):
# Data from the averages section
self.output_json["webpagetest"]["average"][view][metric_title] = metric_data
else:
# Data from the runs section
self.output_json["webpagetest"]["runs"][run_index][view][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
if ((metric_title_passed in self.output_json["promotion_gates"] and self.output_json["promotion_gates"][metric_title_passed] != False) or (metric_title_passed not in self.output_json["promotion_gates"])):
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = True
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = False
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = False
self.build_status_passed = False
def process_data(self, config_data, perf_data):
"""
Determines if the build meets promotion gate criteria based off of the
information in the config file (retrieved by configengine) and the data
from the modules (retrieved by dataengine)
Keyword Arguments:
config_data - dictionary that contains all of the information retrieved
by the config engine
perf_data - dictionary that contains all of the information retrieved
by the data engine
"""
# Prepare the output file promotion gates section
self.output_json["promotion_gates"] = {}
# AppDynamics Module
if (config_data["appdynamics"]["exists"] == True):
# Check for AppDynamics Health Violations (only if the user cares)
if ((config_data["promotion_gates"]["warning"] == True) or (config_data["promotion_gates"]["critical"] == True)):
# Output something so that the user isn't confused, regardless of whether health violations were found
self.output_json["appdynamics"] = {"healthrule_violations": []}
if (perf_data["appdynamics"]["healthrule_violations"] != []):
# Uh-oh, there's something wrong with the build
self.compare_appdynamics(perf_data["appdynamics"]["healthrule_violations"], config_data["promotion_gates"]["warning"], config_data["promotion_gates"]["critical"])
else:
# No health violations, good to go!
self.output_json["promotion_gates"]["appdynamics_health"] = True
# BlazeMeter Module
if (config_data["blazemeter"]["exists"] == True):
# Compare BlazeMeter metrics
# Add BlazeMeter into the output file
self.output_json["blazemeter"] = {"transactions": []}
for index, transaction in enumerate(perf_data["blazemeter"]["transactions"]):
# Add transaction information into the output
self.output_json["blazemeter"]["transactions"].append({"transaction_id": transaction["transaction_id"], "transaction_name": transaction["transaction_name"]})
# Average Response Time
self.compare_blazemeter("response_time_avg", config_data["promotion_gates"]["response_time_avg"], transaction["response_time_avg"], index, operator.lt)
# Max Response Time
self.compare_blazemeter("response_time_max", config_data["promotion_gates"]["response_time_max"], transaction["response_time_max"], index, operator.lt)
# Response Time Geometric Mean
self.compare_blazemeter("response_time_geomean", config_data["promotion_gates"]["response_time_geomean"], transaction["response_time_geomean"], index, operator.lt)
# Response Time Standard Deviation
self.compare_blazemeter("response_time_stdev", config_data["promotion_gates"]["response_time_stdev"], transaction["response_time_stdev"], index, operator.lt)
# Response Time 90% Line
self.compare_blazemeter("response_time_tp90", config_data["promotion_gates"]["response_time_tp90"], transaction["response_time_tp90"], index, operator.lt)
# Response Time 95% Line
self.compare_blazemeter("response_time_tp95", config_data["promotion_gates"]["response_time_tp95"], transaction["response_time_tp95"], index, operator.lt)
# Response Time 99% Line
self.compare_blazemeter("response_time_tp99", config_data["promotion_gates"]["response_time_tp99"], transaction["response_time_tp99"], index, operator.lt)
# Maximum Latency
self.compare_blazemeter("latency_max", config_data["promotion_gates"]["latency_max"], transaction["latency_max"], index, operator.lt)
# Average Latency
self.compare_blazemeter("latency_avg", config_data["promotion_gates"]["latency_avg"], transaction["latency_avg"], index, operator.lt)
# Latency Standard Deviation
self.compare_blazemeter("latency_stdev", config_data["promotion_gates"]["latency_stdev"], transaction["latency_stdev"], index, operator.lt)
# Average Bandwidth
self.compare_blazemeter("bandwidth_avg", config_data["promotion_gates"]["bandwidth_avg"], transaction["bandwidth_avg"], index, operator.lt)
# Transaction Rate
self.compare_blazemeter("transaction_rate", config_data["promotion_gates"]["transaction_rate"], transaction["transaction_rate"], index, operator.gt)
# WebPageTest Module
if (config_data["webpagetest"]["exists"] == True):
# Compare WebPageTest metrics
# Add WebPageTest into the output file
self.output_json["webpagetest"] = {"average": {}, "runs": []}
# Keep track of the views for looping purposes
views = ["first_view", "repeat_view"]
# Make sure that we care about the data before processing it
if (("first_view" in config_data["promotion_gates"]) or ("repeat_view" in config_data["promotion_gates"])):
# Check out the averages for the runs
# This is less for failing the build and more for adding the data into the output file
for view in views:
if (view in config_data["promotion_gates"]):
# Set up average first_view
self.output_json["webpagetest"]["average"][view] = {}
# Speed Index (Average)
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["average"][view]["SpeedIndex"], None, view, operator.gt)
# Time to First Paint (Average)
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["average"][view]["firstPaint"], None, view, operator.lt)
# Time to First Byte (Average)
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["average"][view]["TTFB"], None, view, operator.lt)
# Time to Fully Loaded (Average)
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["average"][view]["fullyLoaded"], None, view, operator.lt)
# Time to Visual Complete (Average)
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["average"][view]["visualComplete"], None, view, operator.lt)
# Time to Start Render (Average)
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["average"][view]["render"], None, view, operator.lt)
# Time to Last Visual Change (Average)
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["average"][view]["lastVisualChange"], None, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["average"][view]["titleTime"], None, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["average"][view]["bytesIn"], None, view, operator.lt)
# Loop over all of the runs
# Most of the time there will likely be only one
for run_id, run in enumerate(perf_data["webpagetest"]["runs"]):
# Add transaction information into the output
self.output_json["webpagetest"]["runs"].append({"run_id": run["run_id"]})
# Loop over all of the views for each run
for view in views:
if (view in config_data["promotion_gates"]):
# Set up first_view for the run
self.output_json["webpagetest"]["runs"][run_id][view] = {}
# Speed Index
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["runs"][run_id][view]["SpeedIndex"], run_id, view, operator.gt)
# Time to First Paint
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["runs"][run_id][view]["firstPaint"], run_id, view, operator.lt)
# Time to First Byte
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["runs"][run_id][view]["TTFB"], run_id, view, operator.lt)
# Time to Fully Loaded
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["runs"][run_id][view]["fullyLoaded"], run_id, view, operator.lt)
# Time to Visual Complete
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["runs"][run_id][view]["visualComplete"], run_id, view, operator.lt)
# Time to Start Render
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["runs"][run_id][view]["render"], run_id, view, operator.lt)
# Time to Last Visual Change
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["runs"][run_id][view]["lastVisualChange"], run_id, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["runs"][run_id][view]["titleTime"], run_id, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["runs"][run_id][view]["bytesIn"], run_id, view, operator.lt)
# Set the overall status in the output JSON file
self.output_json["promotion_gates"]["passed"] = self.build_status_passed
# We're done!
print("Processed performance data")
return self.output_json
def __init__(self):
"""
Class starting point
"""
# Build Status
self.build_status_passed = True
# Output JSON report data
# Later appended by the AppDynamics and BlazeMeter processing functions
self.output_json = {}
|
mit
| -1,119,515,072,854,985,200
| 64.877966
| 229
| 0.591129
| false
| 4.58241
| true
| false
| false
|
Frky/scat
|
src/shell/memory/addrtable.py
|
1
|
1331
|
#-*- coding: utf-8 -*-
class AddrTable(object):
TABLE_SIZE = 10000
def __init__(self, dic=False):
self.__addr = list()
self.__dic = dic
for i in xrange(AddrTable.TABLE_SIZE):
if self.__dic:
self.__addr.append(dict())
else:
self.__addr.append(list())
self.__curr_key = None
self.__curr_addr = None
def contains(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
return addr in self.__addr[key].keys()
else:
return addr in self.__addr[key]
def add(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
self.__addr[key][addr] = list()
else:
self.__addr[key].append(addr)
def remove(self, addr):
key = addr % AddrTable.TABLE_SIZE
self.__addr[key].remove(addr)
def add_dic(self, addr, fid):
if not self.__dic:
raise Exception
key = addr % AddrTable.TABLE_SIZE
self.__addr[key][addr].append(fid)
def items(self):
for key in self.__addr:
if self.__dic:
for addr, call in key.items():
yield addr, call
else:
for addr in key:
yield addr
|
mit
| -7,471,110,895,551,581,000
| 26.163265
| 50
| 0.486852
| false
| 3.996997
| false
| false
| false
|
BenjaminSchubert/web-polls
|
backend/errors/http.py
|
1
|
1737
|
"""
This module contains a collection of commonly encountered HTTP exceptions.
This allows all these http exceptions to be treated in the same way and simplifies the return of errors to the user.
"""
from errors import ErrorMessage
__author__ = "Benjamin Schubert <ben.c.schubert@gmail.com>"
class BaseHTTPException(Exception):
"""
This is the base HTTP Exception.
It should not be used as is, as it signifies that the server had an unexpected error.
"""
status_code = 500 # type: int
def __init__(self, payload: ErrorMessage = None, status_code: int = None):
"""
Create a new `BaseHTTPException`.
:param payload: payload to send to explain the error to the user.
:param status_code: HTTP status code to send. If not given, will fallback to `self.status_code`.
"""
super().__init__(self)
if payload is None:
payload = dict()
self.payload = payload
if status_code is not None:
self.status_code = status_code
class ForbiddenException(BaseHTTPException):
def __init__(self):
super().__init__({}, 401)
class BadRequestException(BaseHTTPException):
"""This is an exception to throw to return a 400 BAD REQUEST to the user."""
def __init__(self, payload: ErrorMessage):
"""
Create a new `BadRequestException`.
:param payload: payload to send to explain the error to the user.
"""
super().__init__(payload, 400)
class NotFoundException(BaseHTTPException):
"""This is an exception to throw to return a 404 NOT FOUND to the user."""
def __init__(self):
"""Create a new `NotFoundException`."""
super().__init__(None, 404)
|
mit
| -7,086,277,074,543,223,000
| 27.95
| 116
| 0.639033
| false
| 4.320896
| false
| false
| false
|
ralbayaty/KaggleRetina
|
testing/censureHistCalc.py
|
1
|
4517
|
from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np
import cv2
import sys
from PIL import Image, ImageDraw
def draw_keypoints(img, kp, scale):
draw = ImageDraw.Draw(img)
# Draw a maximum of 300 keypoints
for i in range(min(len(scale),300)):
x1 = kp[i,1]
y1 = kp[i,0]
x2 = kp[i,1]+2**scale[i]
y2 = kp[i,0]+2**scale[i]
coords = (x1, y1, x2, y2)
draw.ellipse(coords, fill = None, outline ='white')
if __name__ == '__main__':
try:
file_name = sys.argv[1]
except:
print("Didn't give me a file...")
file_name = "Lenna.png"
def nothing(*arg):
pass
# Create sliderbars to change the values of CENSURE parameters online
# Defaults: min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10
cv2.namedWindow('censure')
cv2.createTrackbar('min_scale', 'censure', 1, 10, nothing)
cv2.createTrackbar('max_scale', 'censure', 7, 20, nothing)
cv2.createTrackbar('mode', 'censure', 2, 2, nothing)
cv2.createTrackbar('non_max_threshold', 'censure', 6, 1000, nothing)
cv2.createTrackbar('line_threshold', 'censure', 10, 100, nothing)
# Read image from file, then inspect the image dimensions
img = cv2.imread(file_name,1)
height, width, channels = img.shape
# Pull the different color channels from the image
blue = img[:,:,0]
green = img[:,:,1]
red = img[:,:,2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Make a PIL image from each channel so we can use PIL.Iamge.thumbnail to resize if needed
blue1 = Image.fromarray(blue)
green1 = Image.fromarray(green)
red1 = Image.fromarray(red)
gray1 = Image.fromarray(gray)
# Check if dimensions are above desired, if so then resize keepig aspect ratio
m, n = 512, 512
if height > m or width > n:
blue1.thumbnail((m,n), Image.ANTIALIAS)
green1.thumbnail((m,n), Image.ANTIALIAS)
red1.thumbnail((m,n), Image.ANTIALIAS)
gray1.thumbnail((m,n), Image.ANTIALIAS)
# CENSURE related
mode_dict = {"0": "DoB", "1": "Octagon", "2": "STAR"}
last_num_kp = 0
while True:
vis = gray.copy()
img = img1.copy()
# Read the values of the sliderbars and save them to variables
min_scale = cv2.getTrackbarPos('min_scale', 'censure')
max_scale = cv2.getTrackbarPos('max_scale', 'censure')
if min_scale is 0:
min_scale = 1
if min_scale + max_scale < 3:
max_scale = min_scale + 2
mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))]
non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000
line_threshold = cv2.getTrackbarPos('line_threshold', 'censure')
# Create a CENSURE feature detector
censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode,
non_max_threshold=non_max_threshold, line_threshold=line_threshold)
# Obtain the CENSURE features
censure.detect(blue1)
kp_blue, scale_blue = censure.keypoints, censure.scales
censure.detect(green1)
kp_green, scale_green = censure.keypoints, censure.scales
censure.detect(red1)
kp_red, scale_red = censure.keypoints, censure.scales
censure.detect(gray1)
kp_gray, scale_gray = censure.keypoints, censure.scales
# Print the # of features if it has changed between iterations
num_kp = len(censure.keypoints)
if last_num_kp != num_kp:
print("Number of keypoints: " + str(len(censure.keypoints)))
last_num_kp = num_kp
# Draw the feature points on the images
draw_keypoints(blue1, kp_blue, scale_blue)
draw_keypoints(green1, kp_green, scale_green)
draw_keypoints(red1, kp_red, scale_red)
draw_keypoints(gray1, kp_gray, scale_gray)
# Obtain the histogram of scale values
plt.clf() # clear the figure from any previous plot
scale_hist, bin_edges = np.histogram(censure.scales,max_scale-min_scale, (min_scale,max_scale+1))
plt.bar(bin_edges[:-1]-0.5, scale_hist, width = 1)
plt.show(block=False)
plt.draw()
# Show the image with keypoints drawn over
image = cv2.cvtColor(np.asarray(img),cv2.COLOR_BGR2RGB)
cv2.imshow('censure', image)
if 0xFF & cv2.waitKey(500) == 27:
break
cv2.destroyAllWindows()
|
gpl-2.0
| 159,560,390,159,278,880
| 36.032787
| 102
| 0.629179
| false
| 3.210377
| false
| false
| false
|
qiou/Dev
|
python/edf.py
|
1
|
4511
|
#=========================================================================
# Dependencies / Libraries
#=========================================================================
import time
import serial
import MySQLdb
import subprocess
from time import sleep
import datetime
#=========================================================================
# Fonction Tableau/Dictionnaire
#=========================================================================
def checksum (etiquette, valeur):
sum = 32
for c in etiquette: sum = sum + ord(c)
for c in valeur: sum = sum + ord(c)
sum = (sum & 63) + 32
return chr(sum)
#=========================================================================
# Fonction LireTeleinfo
#=========================================================================
def ReadTeleinfo ():
# Attendre le debut du message
while ser.read(1) != chr(2): pass
message = ""
fin = False
while not fin:
char = ser.read(1)
if char != chr(2):
message = message + char
else:
fin = True
trames = [
trame.split(" ")
for trame in message.strip("\r\n\x03").split("\r\n")
]
tramesValides = dict([
[trame[0],trame[1]]
for trame in trames
if (len(trame) == 3) and (checksum(trame[0],trame[1]) == trame[2])
])
return tramesValides
# print('Lecture des trames Teleinformation avec la carte RPIDOM')
#=========================================================================
# Connexion au port
#=========================================================================
ser = serial.Serial(
port='/dev/ttyAMA0',
baudrate=1200,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.SEVENBITS )
#=========================================================================
# Definition variables de trame et chargement d'une valeur initiale
#=========================================================================
vIINST = 0
vMOTDETAT = 0
vOPTARIF = 0
vISOUSC = 0
vADCO = 0
vPAPP = 0
vIMAX = 0
vBASE = 0
vADPS = 0
#=========================================================================
# Read serial data
#=========================================================================
#print '\nPremiere voie'
ser.write('A')
sleep(1)
ser.flushInput()
tramesOk = ReadTeleinfo()
trouve = False
for etiquette in tramesOk:
if etiquette == 'IINST':
#print etiquette , ":", tramesOk[etiquette]
vIINST = tramesOk[etiquette]
if etiquette == 'MOTDETAT':
#print etiquette , ":", tramesOk[etiquette]
vMOTDETAT = tramesOk[etiquette]
if etiquette == 'OPTARIF':
#print etiquette , ":", tramesOk[etiquette]
vOPTARIF = tramesOk[etiquette]
if etiquette == 'ISOUSC':
#print etiquette , ":", tramesOk[etiquette]
vISOUSC = tramesOk[etiquette]
if etiquette == 'ADCO':
#print etiquette , ":", tramesOk[etiquette]
vADCO = tramesOk[etiquette]
if etiquette == 'PAPP':
#print etiquette , ":", tramesOk[etiquette]
vPAPP = tramesOk[etiquette]
if etiquette == 'IMAX':
#print etiquette , ":", tramesOk[etiquette]
vIMAX = tramesOk[etiquette]
if etiquette == 'BASE':
#print etiquette , ":", tramesOk[etiquette]
vBASE = tramesOk[etiquette]
if etiquette == 'ADPS':
#print etiquette , ":", tramesOk[etiquette]
vADPS = tramesOk[etiquette]
#=========================================================================
# Date and Hour
#=========================================================================
vHEURE = datetime.datetime.now().strftime('%H:%M')
vDATE = datetime.datetime.today().strftime('%Y-%m-%d')
#=========================================================================
# Connect and insert into DB
#=========================================================================
db = MySQLdb.connect(host="192.168.1.250",port=3307,user="root",passwd="MariaQiou",db="edf" )
cursor = db.cursor()
if vBASE > 0:
cursor.execute("""INSERT INTO teleinfo(DATE, HEURE, IINST, MOTDETAT, OPTARIF, ISOUSC, ADCO, PAPP, IMAX, BASE, ADPS) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""" ,(vDATE, vHEURE, vIINST, vMOTDETAT, vOPTARIF, vISOUSC, vADCO, vPAPP, vIMAX, vBASE, vADPS))
# Write into DB
db.commit()
db.rollback()
db.close()
#=========================================================================
ser.close()
|
gpl-2.0
| 3,979,199,428,035,871,000
| 35.379032
| 265
| 0.441809
| false
| 3.617482
| false
| false
| false
|
rithms/hearthstone
|
xml_to_json.py
|
1
|
4835
|
#!/usr/bin/env python
from bs4 import BeautifulSoup
import glob
import json
#############################################
# Convert Hearthstone card data XML to JSON #
#############################################
__author__ = "Taylor Caldwell - http://github.com/rithms"
__copyright__ = "Copyright 2015, Taylor Caldwell"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Taylor Caldwell"
__email__ = "tcaldwel@nmsu.edu"
__status__ = "Production"
# EnumIds - Non-Boolean
enum_dict = {
45 : "health",
47 : "attack",
48 : "cost",
183 : "cardSet",
184 : "cardTextInHand",
185 : "cardName",
187 : "durability",
199 : "class",
200 : "race",
201 : "faction",
202 : "cardType",
203 : "rarity",
251 : "attackVisualType",
252 : "cardTextInPlay",
268 : "devState",
325 : "targetingArrowText",
330 : "enchantmentBirthVisual",
331 : "enchantmentIdleVisual",
342 : "artistName",
351 : "flavorText",
365 : "howToGetThisGoldCard",
364 : "howToGetThisCard",
#377 : "unknownHasOnDrawEffect",
#380 : "unknownBlackrockHeroes",
#389 : "unknownDuneMaulShaman",
#402 : "unknownIntenseGaze",
#401 : "unknownBroodAffliction"
}
# EnumIds - Boolean
bool_dict = {
32 : "Trigger Visual",
114 : "elite",
321 : "collectible",
189 : "Windfury",
190 : "Taunt",
191 : "Stealth",
192 : "Spell Power",
194 : "Divine Shield",
197 : "Charge",
205 : "Summoned",
208 : "Freeze",
212 : "Enrage",
215 : "Overload",
217 : "Deathrattle",
218 : "Battlecry",
219 : "Secret",
220 : "Combo",
240 : "Can't Be Damaged",
293 : "Morph",
335 : "Invisible Deathrattle",
338 : "One Turn Effect",
339 : "Silence",
340 : "Counter",
349 : "Immune To Spell Power",
350 : "Adjacent Buff",
361 : "Heal Target",
362 : "Aura",
363 : "Poisonous",
367 : "AI Must Play",
370 : "Affected By Spell Power",
388 : "Spare Part",
}
# Card Class IDs
class_dict = {
0 : "Developer",
2 : "Druid",
3 : "Hunter",
4 : "Mage",
5 : "Paladin",
6 : "Priest",
7 : "Rogue",
8 : "Shaman",
9 : "Warlock",
10 : "Warrior",
11 : "Dream"
}
# Card Set IDs
set_dict = {
2 : "Basic",
3 : "Classic",
4 : "Reward",
5 : "Missions",
7 : "System",
8 : "Debug",
11 : "Promotion",
12 : "Curse of Naxxramas",
13 : "Goblin vs Gnomes",
14 : "Blackrock Mountain",
16 : "Credits"
}
# Card Type IDs
type_dict = {
3 : "Hero",
4 : "Minion",
5 : "Spell",
6 : "Enchantment",
7 : "Weapon",
10 : "Hero Power"
}
# Card Race IDs
race_dict = {
14 : "Murloc",
15 : "Demon",
17 : "Mechanical",
20 : "Beast",
21 : "Totem",
23 : "Pirate",
24 : "Dragon"
}
# Card Faction IDs
faction_dict = {
1 : "Horde",
2 : "Alliance",
3 : "Neutral"
}
# Card Rarity IDs
rarity_dict = {
0 : "Developer",
1 : "Common",
2 : "Free",
3 : "Rare",
4 : "Epic",
5 : "Legendary"
}
# Get the name of the corresponding enum ID
def get_name(enum_id, d):
if enum_id in d:
return d[enum_id]
for f in glob.glob('cardxml0/CAB-cardxml0/TextAsset/*.txt'):
with open(f) as cardfile:
file_name = f.split('/')[-1].split('.')[0]
cardsoup = BeautifulSoup(cardfile.read(), features="xml")
cards = cardsoup.find_all('Entity')
json_dict = { 'data' : {} }
for card in cards:
card_id = card.get('CardID')
json_dict['data'][card_id] = { 'id' : card_id, 'mechanics' : [] }
tags = card.find_all('Tag')
for tag in tags:
enum_id = int(tag.get('enumID'))
if(tag.get('type') == 'String'):
enum_name = tag.text
else:
enum_name = tag.get('value')
if enum_id in enum_dict:
field = enum_dict[enum_id]
if field == 'class':
enum_name = get_name(int(enum_name), class_dict)
elif field == 'cardSet':
enum_name = enum_name = get_name(int(enum_name), set_dict)
elif field == 'cardType':
enum_name = get_name(int(enum_name), type_dict)
elif field == 'race':
enum_name = get_name(int(enum_name), race_dict)
elif field == 'faction':
enum_name = get_name(int(enum_name), faction_dict)
elif field == 'rarity':
enum_name = get_name(int(enum_name), rarity_dict)
json_dict['data'][card_id][enum_dict[enum_id]] = enum_name
elif enum_id in bool_dict:
field = bool_dict[enum_id]
if field == 'collectible' or field == 'elite':
if enum_name == '1':
json_dict['data'][card_id][field] = True
elif enum_name == '0':
json_dict['data'][card_id][field] = False
else:
if enum_name == '1':
json_dict['data'][card_id]['mechanics'].append(field)
for key in bool_dict:
field = bool_dict[key]
if field == 'collectible' or field == 'elite':
if field not in json_dict['data'][card_id]:
json_dict['data'][card_id][field] = False
if not json_dict['data'][card_id]['mechanics']:
del json_dict['data'][card_id]['mechanics']
with open(file_name+'.json', 'w') as outfile:
json.dump(json_dict, outfile, sort_keys=True)
|
mit
| 7,331,306,430,884,571,000
| 20.20614
| 67
| 0.588211
| false
| 2.512994
| false
| false
| false
|
bodedev/prospera
|
plataforma/management/commands/atualizar_saldos.py
|
1
|
2085
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand
from plataforma.constants import ETHER_DIVISOR
from plataforma.models import Saldo
import requests
def buscar_saldo(carteira):
try:
r = requests.get("https://api.etherscan.io/api?module=account&action=tokenbalance&contractaddress=%s&address=%s&tag=latest&apikey=%s" % (settings.ETHERSCAN_CONTRACT_ADDRESS, carteira, settings.ETHERSCAN_APIKEY))
if r.status_code == 200:
data = r.json()
if data["status"] == "1":
saldo = float(data["result"]) / float(ETHER_DIVISOR)
_, created = Saldo.objects.update_or_create(carteira=carteira, defaults={"total": saldo})
print "%s: %0.6f (%s)" % (carteira, saldo, str(created))
return True
return False
except Exception, e:
print "Nao consegui pegar o saldo da carteira %s" % carteira
return None
class Command(BaseCommand):
help = u"Atualiza o saldo de todas as carteiras de um contrato."
def handle(self, *args, **options):
url = "https://api.etherscan.io/api?module=logs&action=getLogs&fromBlock=%s&toBlock=latest&address=%s&apikey=%s" % (settings.ETHERSCAN_START_BLOCK_NUMBER, settings.ETHERSCAN_CONTRACT_ADDRESS, settings.ETHERSCAN_APIKEY)
r = requests.get(url)
data = r.json()
saldos_atualizados = []
for transacion in data["result"]:
carteira_from = transacion["topics"][1].replace("0x000000000000000000000000", "0x")
if carteira_from not in saldos_atualizados:
if buscar_saldo(carteira_from):
saldos_atualizados.append(carteira_from)
if len(transacion["topics"]) >= 3:
carteira_to = transacion["topics"][2].replace("0x000000000000000000000000", "0x")
if carteira_to not in saldos_atualizados:
if buscar_saldo(carteira_to):
saldos_atualizados.append(carteira_to)
print "Fim de processo!"
|
mit
| -6,934,085,334,793,678,000
| 44.326087
| 226
| 0.632134
| false
| 3.434926
| false
| false
| false
|
jas0n1ee/SonyCameraAPI
|
takePicture.py
|
1
|
1212
|
#!/usr/bin/env python
from sonyAPI2 import API2
import cv2
import urllib2
import numpy as np
import time
import struct
api = API2()
api.update_api_list()
try:
result = api.do('getAvailableCameraFunction')
current = result['result'][0]
availavle = result['result'][1]
if current != "Remote Shooting":
if "Remote Shooting" in availavle:
api.do('setCameraFunction',["Remote Shooting"])
api.update_api_list()
else:
print "Remote Shooting not availavle"
except KeyError:
print result
try:
result = api.do('getAvailableShootMode')
current = result['result'][0]
availavle = result['result'][1]
if current != "still":
if "still" in availavle:
api.do('setShootMode',["still"])
api.update_api_list()
else:
print "stil Shooting not availavle"
except KeyError:
print result
try:
result = api.do('actTakePicture')
url = result['result'][0][0]
except KeyError:
print result
except TypeError:
print result
f = urllib2.urlopen(url)
d = np.asarray(bytearray(f.read()), dtype='uint8')
img = cv2.imdecode(d,cv2.IMREAD_COLOR)
cv2.imshow('postview',img)
time.sleep(10)
|
apache-2.0
| 5,010,609,995,021,559,000
| 23.734694
| 59
| 0.640264
| false
| 3.338843
| false
| true
| false
|
junhe/chopper
|
src/MWpyFS/Monitor.py
|
1
|
44187
|
# Chopper is a diagnostic tool that explores file systems for unexpected
# behaviors. For more details, see paper Reducing File System Tail
# Latencies With Chopper (http://research.cs.wisc.edu/adsl/Publications/).
#
# Please send bug reports and questions to jhe@cs.wisc.edu.
#
# Written by Jun He at University of Wisconsin-Madison
# Copyright (C) 2015 Jun He (jhe@cs.wisc.edu)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# The monitor is used to monitor the FS fragmentation status.
# What I want to see is, generally, how's the metadata. This may include:
#
# SIZE of inode and extent tree. (number of inode block and extent tree
# block). This can be find by debugfs "dump_extents [-n] [-l] filespec".
# But you have to do it for ALL files in the file system, which might be
# slow. I haven't got a better approach. A good indicator of metadata
# problem is #_metadata_block/#_data_block. This should be very informative
# about the aging of a file system which causes metadata disaster.
# I expect the following from the output of this per file:
#
# filepath create_time n_metablock n_datablock metadata_ratio filebytes
#
# Extent fragmentation overview. This can be obtained by e2freefrag. This
# should give me a good sense of how fragemented the FS is. The acceleration
# rate of fragmentation might be a good indicator of whether a workload
# can cause metadata problem. (Because of fragmentation, physical blocks
# might not be able to allocated contiguously, then it needs two or more
# extents to the logically contiguous blocks.)
# I expect the following from the output of this per FS:
# JUST LIKE THE ORIGINAL OUTPUT BUT FORMAT IT A LITTLE BIT
#
#
#
#
# TODO:
# 1. I need to figure out a good way to figure out
# dspan of the interested files.
# 2. Is there a better way in btrfs to find only the
# interested file, other than deleting all the
# uninteresting file.
#
import subprocess
from time import strftime, localtime, sleep
import re
import shlex
import os
import pprint
import shutil
import fnmatch
import itertools
import glob
import btrfs_db_parser
import xfs_db_parser
import dataframe
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def fill_white_space(path, filler="_"):
path.strip()
return path.replace(" ", filler)
class FSMonitor:
"""
This monitor probes the ext4 file system and return information I
want in a nice format.
"""
def __init__(self, dn, mp, ld="/tmp", cw=20, filesystem='ext4'):
self.devname = dn # this should be the device name of the partition
self.mountpoint = mp # please only provide path without mountpoint
# when using this class.
self.col_width = cw
self.logdir = ld
self.resetMonitorTime()
self.resetJobID()
self.filesystem = filesystem # the file system this monitor monitors
def resetMonitorTime(self, monitorid=""):
"monitor_time is used to identify each data retrieval"
if monitorid == "":
self.monitor_time = strftime("%Y-%m-%d-%H-%M-%S", localtime())
else:
self.monitor_time = monitorid
def resetJobID(self, jobid="DefaultJOBID"):
self.jobid = jobid
def _spliter_dumpfs(self, line):
line = line.replace(",", " ")
elems = line.split(":")[1]
elems = elems.split()
new_elems = [] # [[a0,a1],[b0,b1]...]
for elem in elems:
e = elem.split("-")
elen = len(e)
if elen == 2:
new_elems.append(e)
elif elen == 1:
e = e*2
new_elems.append(e)
else:
print "wrong split", elem
exit(1)
return new_elems
def dumpfsSummary(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", "-h", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
proc.wait()
return proc.communicate()[0]
def dumpfs(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
freeblocks = []
freeinodes = []
for line in proc.stdout:
if line.startswith(" Free blocks:"):
freeblocks += self._spliter_dumpfs(line)
elif line.startswith(" Free inodes:"):
freeinodes += self._spliter_dumpfs(line)
else:
pass
proc.wait()
# initialize
freeblocks_df = dataframe.DataFrame(header=['start', 'end'],
table=freeblocks)
freeinodes_df = dataframe.DataFrame(header=['start', 'end'],
table=freeinodes)
# add additional columns
freeblocks_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeblocks_df.addColumn(key="jobid",
value=self.jobid)
freeblocks_df.addColumn(key="HEADERMARKER_freeblocks",
value="DATAMARKER_freeblocks")
freeinodes_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeinodes_df.addColumn(key="jobid",
value=self.jobid)
freeinodes_df.addColumn(key="HEADERMARKER_freeinodes",
value="DATAMARKER_freeinodes")
return {"freeblocks":freeblocks_df, "freeinodes":freeinodes_df}
def e2freefrag(self):
if self.filesystem != 'ext4':
return
cmd = ["e2freefrag", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc.wait()
part = 0
sums_dict = {}
hist_table = ""
hist_df = dataframe.DataFrame()
for line in proc.stdout:
if part == 0:
if "HISTOGRAM" in line:
part = 1
continue
mo = re.search( r'(.*): (\d+)', line, re.M)
if mo:
keyname = mo.group(1)
keyname = keyname.replace('.', '')
keyname = "_".join(keyname.split())
sums_dict[keyname] = mo.group(2)
elif part == 1:
# This part is the histogram.
line = line.strip()
if "Extent Size" in line:
hist_table = "Extent_start Extent_end Free_extents Free_Blocks Percent"
hist_df.header = hist_table.split()
continue
fline = re.sub(r'[\-:\n]', "", line)
fline = re.sub(r'\.{3}', "", fline)
row = fline.split()
hist_df.addRowByList(row)
hist_df.addColumns(keylist = ["HEADERMARKER_freefrag_hist",
"monitor_time",
"jobid"],
valuelist = ["DATAMARKER_freefrag_hist",
self.monitor_time,
self.jobid])
# convert dict to data frame
sums_df = dataframe.DataFrame(header=sums_dict.keys(),
table=[sums_dict.values()])
sums_df.addColumn(key="HEADERMARKER_freefrag_sum",
value="DATAMARKER_freefrag_sum")
sums_df.addColumn(key="monitor_time",
value=self.monitor_time)
sums_df.addColumn(key="jobid",
value=self.jobid)
return {"FragSummary":sums_df, "ExtSizeHistogram":hist_df}
def imap_of_a_file(self, filepath):
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'imap " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'imap "' + filepath + '"']
print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
imapdict = {}
for line in proc.stdout:
#print line
if "block group" in line:
nums = re.findall(r'\d+', line)
if len(nums) != 2:
print "Error parsing imap"
exit(1)
imapdict['inode_number'] = nums[0]
imapdict['group_number'] = nums[1]
elif 'located at block' in line:
items = line.split()
imapdict['block_number'] = items[3].rstrip(',')
imapdict['offset_in_block'] = items[5]
proc.wait()
#print imapdict
return imapdict
def dump_extents_of_a_file(self, filepath):
"This function only gets ext list for this file"
if self.filesystem != 'ext4':
return
#print "filepath:", filepath
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
ext_list = [] # Use list here in case I want to extract data in Python
header = []
max_level = 0
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
df_ext.header = header
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, tokens[6]) #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
df_ext.addRowByDict(d)
proc.wait()
# Put the location of the inode the df_ext, level_index as -1 to
# indicate that it is a inode
imapdict = self.imap_of_a_file(filepath)
d = {}
d['Level_index'] = '-1'
d['Max_level'] = '-1'
d['Entry_index'] = 'NA'
d['N_Entry'] = 'NA'
d['Logical_start'] = 'NA'
d['Logical_end'] = 'NA'
d['Physical_start'] = imapdict['block_number']
d['Physical_end'] = imapdict['block_number']
d['Length'] = '1'
d['Flag'] = 'NA'
df_ext.addRowByDict(d)
df_ext.addColumn(key = "filepath",
value = fill_white_space(filepath))
df_ext.addColumn(key = "HEADERMARKER_extlist",
value = "DATAMARKER_extlist")
df_ext.addColumn(key = "jobid",
value = self.jobid)
df_ext.addColumn(key = "monitor_time",
value = self.monitor_time)
return df_ext
def setBlock(self, blockn, count):
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'setb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
proc.wait()
return proc.returncode
def isAllBlocksInUse(self, blockn, count):
"if any of the blocks is not in use, return false. return true otherwise"
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'testb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
if 'not' in line:
return False
proc.wait()
return True
def dumpextents_sum(self, filepath):
"TODO: merge this with dump_extents_of_a_file()"
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
#cmd = ['debugfs', self.devname, '-R', '"dump_extents ' + filepath + '"']
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, "........."
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
header = []
n_entries = [0] * 3 # n_entries[k] is the number of entries at level k
# it can be used to calculate number of
# internal/leaf nodes
max_level = 0
exttable = ""
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, "NA") #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
n_entries[ int(d["Level_index"]) ] = int( d["N_Entry"] )
max_level = int( d["Max_level"] )
#print "..... finished stdout parsing .... "
proc.terminate()
#print "..... after terminating .... "
# calculate number of meatadata blocks
# only 1st and 2nd levels takes space.
# How to calculate:
# if there is only 1 level (root and level 1).
# the number of entires in level 0 indicates the
# number of nodes in level 1.
# Basically, the number of entries in level i
# equals the number of ETB of the next level
n_metablock = 0
if max_level == 0:
# the tree has no extent tree block outside of the inode
n_metablock = 0
else:
for n in n_entries[0:max_level]:
n_metablock += n
dumpdict = {}
dumpdict["filepath"] = fill_white_space(filepath)
dumpdict["n_metablock"] = n_metablock
others = self.filefrag(filepath)
if others.has_key('nblocks'):
dumpdict["n_datablock"] = others["nblocks"]
else:
dumpdict["n_datablock"] = 'NA'
if others.has_key('nbytes'):
dumpdict["filebytes"] = others["nbytes"]
else:
dumpdict["filebytes"] = 'NA'
#print "Reached end of debugfs...."
return dumpdict
def filefrag(self, filepath):
if self.filesystem != 'ext4':
return
fullpath = os.path.join(self.mountpoint, filepath)
cmd = ["filefrag", "-sv", fullpath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
mydict = {}
for line in proc.stdout:
if line.startswith("File size of"):
#print line
line = line.split(" is ")[1]
#print line
nums = re.findall(r'\d+', line)
if len(nums) != 3:
print "filefrag something wrong"
exit(1)
mydict["nbytes"] = nums[0]
mydict["nblocks"] = nums[1]
mydict["blocksize"] = nums[2]
return mydict
def getAllInodePaths(self, target="."):
"it returns paths of all files and diretories"
rootpath = os.path.join(self.mountpoint)
paths = []
with cd(rootpath):
cmd = ['find', target]
print cmd
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def getExtentList_of_a_dir(self, target):
"""
this only works for absolute path
"""
if self.filesystem != 'ext4':
return
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
#print files
#exit(1)
df = dataframe.DataFrame()
for f in files:
f = os.path.relpath(f, target)
if len(df.header) == 0:
df = self.dump_extents_of_a_file(f)
else:
df.table.extend( self.dump_extents_of_a_file(f).table )
return df
def getPerFileBlockCounts(self, rootdir="."):
if self.filesystem != 'ext4':
return
files = self.getAllInodePaths(rootdir)
counts_df = dataframe.DataFrame()
for f in files:
d = self.dumpextents_sum(f)
if len(counts_df.header) == 0:
counts_df.header = d.keys()
counts_df.addRowByDict(d)
counts_df.addColumns(keylist=["HEADERMARKER_extstats",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extstats",
self.monitor_time,
self.jobid])
return counts_df
def getFSBlockCount(self, df_files):
"df_files has number of metablocks datablocks of each file"
if self.filesystem != 'ext4':
return
if len(df_files.table) == 0:
return ""
fs_nmetablocks = 0
fs_ndatablocks = 0
nmetaindex = df_files.header.index('n_metablock')
ndataindex = df_files.header.index('n_datablock')
for row in df_files.table:
if row[nmetaindex] == 'NA' or row[ndataindex] == 'NA':
fs_nmetablocks = 'NA'
fs_ndatablocks = 'NA'
break
fs_nmetablocks += int(row[nmetaindex])
fs_ndatablocks += int(row[ndataindex])
headerstr = "fs_nmetablocks fs_ndatablocks monitor_time HEADERMARKER_extstatssum jobid"
valuelist = [fs_nmetablocks, fs_ndatablocks, self.monitor_time,
'DATAMARKER_extstatssum', self.jobid]
fsblkcount_df = dataframe.DataFrame(
header=headerstr.split(),
table=[valuelist])
return fsblkcount_df
def widen(self, s):
return s.ljust(self.col_width)
def dict2table(self, mydict):
mytable = ""
header = ""
for keyname in mydict:
header += self.widen(keyname) + " "
header += self.widen("monitor_time") + " HEADERMARKER_freefrag_sum\n"
vals = ""
for keyname in mydict:
vals += self.widen(mydict[keyname]) + " "
vals += self.widen(str(self.monitor_time)) + " DATAMARKER_freefrag_sum\n"
return header + vals
def display(self, savedata=False, logfile="", monitorid="", jobid="myjobid"):
self.resetMonitorTime(monitorid=monitorid)
self.resetJobID(jobid=jobid)
ret_dict = {'d_span':'NA',
'physical_layout_hash':'NA'}
if savedata:
if logfile == "":
filename = self.monitor_time + ".result"
else:
filename = logfile
fullpath = os.path.join(self.logdir, filename)
f = open(fullpath, 'w')
if self.filesystem == 'ext3':
extlist = ext34_getExtentList_of_myfiles(target=self.mountpoint)
df_ext = extlist_block_to_byte(extlist)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
elif self.filesystem == 'ext4':
######################
# get extents of all files
extlist = self.getExtentList_of_a_dir(target=self.mountpoint)
df_ext = extlist_translate_new_format(extlist)
#print df_ext.toStr()
#exit(1)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
######################
# e2freefrag
#frag = self.e2freefrag()
#if savedata and frag != None:
#frag0_header = "----------- Extent summary -------------\n"
#frag1_header = "----------- Extent Histogram -------------\n"
#f.write(frag0_header + frag["FragSummary"].toStr())
#f.write(frag1_header + frag["ExtSizeHistogram"].toStr())
######################
# dumpfs
#freespaces = self.dumpfs()
#if savedata and frag != None:
#dumpfs_header = "----------- Dumpfs Header ------------\n"
#f.write(dumpfs_header + freespaces['freeblocks'].toStr())
#f.write(dumpfs_header + freespaces['freeinodes'].toStr())
elif self.filesystem == 'xfs':
df_ext = self.xfs_getExtentList_of_a_dir(self.mountpoint)
#df_ext = self.xfs_getExtentList_of_a_dir('./dir.1/')
#df_ext.table.extend(df_ext0.table)
df_ext = extlist_translate_new_format(df_ext)
#print df_ext.toStr()
#exit(1)
if savedata and df_ext != None:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr() )
elif self.filesystem == 'btrfs':
# too many files thera sometimes, let me remove some
remove_unecessary(self.mountpoint)
tree_lines = btrfs_db_parser.btrfs_debug_tree(self.devname)
tree_parser = btrfs_db_parser.TreeParser(tree_lines)
df_dic = tree_parser.parse()
df_rawext = df_dic['extents']
df_chunk = df_dic['chunks']
paths = get_all_my_files(self.mountpoint)
df_map = btrfs_db_parser.get_filepath_inode_map2(paths)
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
#exit(0)
df_ext = btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map)
if savedata:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr())
else:
print "Unsupported file system."
exit(1)
if savedata:
f.flush()
f.close()
# calculate return value
print df_ext.toStr()
#exit(0)
ret_dict['d_span'] = get_d_span_from_extent_list(df_ext,
'.file')
ret_dict['distance_sum'] = \
get_distant_sum_from_extent_list(df_ext, '.file')
if ret_dict['distance_sum'] < 0:
print 'distance_sum should be >=0'
allpaths = get_paths_in_df(df_ext)
myfiles = [os.path.basename(path) for path in allpaths \
if '.file' in path]
myfiles.sort( key=lambda x:int(x.split('.')[0]) ) #sort by file id
ret_dict['datafiles'] = '|'.join( myfiles )
dspans = []
for f in myfiles:
dspans.append( get_d_span_from_extent_list(df_ext, f) )
dspans = [str(x) for x in dspans]
ret_dict['datafiles_dspan'] = '|'.join( dspans )
num_extents = []
for f in myfiles:
num_extents.append( get_num_ext_from_extent_list(df_ext, f) )
num_extents = [str(x) for x in num_extents]
ret_dict['num_extents'] = '|'.join( num_extents )
ret_dict['physical_layout_hash'] \
= get_physical_layout_hash(df_ext,
'file',
merge_contiguous=True)
return ret_dict
def stat_a_file(self, filepath):
filepath = os.path.join(self.mountpoint, filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses buffer. Don't use it
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def xfs_get_extentlist_of_a_file(self, filepath):
inode_number = self.stat_a_file(filepath)['inode_number']
df = xfs_db_parser.xfs_get_extent_tree(inode_number, self.devname)
df.addColumn(key = "filepath",
value = fill_white_space(filepath))
return df
def xfs_getExtentList_of_a_dir(self, target="."):
"rootdir is actually relative to mountpoint. Seems bad"
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
#print "UU____UU"
if len(df.header) == 0:
df = self.xfs_get_extentlist_of_a_file(f)
else:
df.table.extend( self.xfs_get_extentlist_of_a_file(f).table )
return df
############################################
SECTORSIZE=512
def get_num_sectors(length):
return int((length+SECTORSIZE-1)/SECTORSIZE)
def get_distant_sum(extentlist):
"""
extentlist is a list like:
[ {'off':xxx, 'len':xxx}, {..}, ..]
This unit is byte.
"""
#print extentlist
# for each extent
distsum = 0
n = 0
for ext in extentlist:
distsum += extent_distant_sum(ext)
n += get_num_sectors(ext['len'])
for ext1, ext2 in itertools.combinations(extentlist, 2):
distsum += extent_pair_distant_sum(ext1, ext2)
return distsum
def extent_distant_sum(extent):
"""
The sum of all pair distance inside the extent is:
n(n-1)(n+1)/6
"""
# doing a trick to get ceiling without floats
n = get_num_sectors(extent['len'])
# hmm.. define the distance of 1 sector
# to be 1.
if n == 1:
return 1
#print "n:", n
ret = n*(n-1)*(n+1)/6
#print extent, ret
return ret
def extent_pair_distant_sum( extent1, extent2 ):
"ext1 and ext2 cannot overlap!"
if extent1['off'] > extent2['off']:
extent1, extent2 = extent2, extent1
m = get_num_sectors(extent1['len'])
n = get_num_sectors(extent2['len'])
k = (extent2['off']-extent1['off']-extent1['len'])/SECTORSIZE
ret = m*n*(m+n+2*k)/2
#print extent1, extent2, ret
return ret
if __name__ == '__main__':
print get_distant_sum( [
{'off':0, 'len':512},
#{'off':512, 'len':512}] )
{'off':512*10, 'len':512}] )
def remove_unecessary(top):
objlist = os.listdir(top)
for name in objlist:
if name.endswith('.file') or name.startswith('dir.'):
continue
path = os.path.join(top, name)
if os.path.isfile(path):
os.remove(path)
#print 'remove FILE:', path
else:
shutil.rmtree(path)
#print 'remove DIR:', path
subprocess.call('sync')
def get_all_my_files( target ):
matches = []
for root, dirnames, filenames in os.walk(target):
for filename in fnmatch.filter(filenames, '*.file'):
matches.append(os.path.join(root, filename))
dirnames[:] = fnmatch.filter(dirnames, 'dir.*')
return matches
def ext34_getExtentList_of_myfiles(target):
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
if len(df.header) == 0:
df = filefrag(f)
else:
df.table.extend( filefrag(f).table )
return df
def get_physical_layout_hash(df_ext, filter_str, merge_contiguous=False):
"""
It only cares about physical block positions.
It has nothing to do with filename, logical address of blocks..
Just sort the physical block start and end, then do a hash
Inlcuding inode, ETB, and data extent!
Another way to find layout is to get all the free blocks and do
hash on them. It is more straight free space.
"""
hdr = df_ext.header
phy_blocks = []
for row in df_ext.table:
if filter_str in row[hdr.index('filepath')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
phy_blocks.append( physical_start )
phy_blocks.append( physical_end )
# There can be over lap between extents for inode and only for inode
# block number can be overlapped in extent
# block number of the same extent always next to each other
phy_blocks.sort()
if merge_contiguous:
# the block number are ALWAYS in pair, even after sorting
# [start, end, start, end, start, end, ...]
# This may not work for BTRFS!
merged = []
n = len(phy_blocks)
assert n % 2 == 0
for i in range(0, n, 2):
# i is start of an extent
if i == 0: # the first extent
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
continue
if phy_blocks[i] == phy_blocks[i-1] + 1:
# can be merged
merged[-1] = phy_blocks[i+1]
elif phy_blocks[i] == phy_blocks[i-2] and \
phy_blocks[i+1] == phy_blocks[i-1]:
# hmm... duplicated extent. can only happen to inode
pass # do nothing
else:
# cannot be merged
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
phy_blocks = merged
return hash( str(phy_blocks) )
def get_inode_num_from_dfmap(filepath, df_map):
hdr = df_map.header
for row in df_map.table:
if row[hdr.index('filepath')] == filepath:
return row[hdr.index('inode_number')]
return None
def get_all_vir_ranges_of_an_inode(inode_number, df_rawext):
hdr = df_rawext.header
ranges = []
for row in df_rawext.table:
if str(row[hdr.index('inode_number')]) == str(inode_number):
d = {
'virtual_start': int(row[hdr.index('Virtual_start')]),
'length': int(row[hdr.index('Length')])
}
ranges.append( d )
return ranges
def btrfs_df_map_to_dic(df_map):
d = {}
hdr = df_map.header
for row in df_map.table:
filepath = row[hdr.index('filepath')]
inode_number = row[hdr.index('inode_number')]
d[str(inode_number)] = filepath
return d
def btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map):
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
dic_map = btrfs_df_map_to_dic(df_map)
hdr = df_rawext.header
devices = set()
df_ext = dataframe.DataFrame()
df_ext.header = ['Level_index',
'Max_level',
'Entry_index',
'N_Entry',
'Virtual_start',
'Logical_start',
'Logical_end',
'Physical_start',
'Physical_end',
'Length',
'Flag',
'filepath']
for row in df_rawext.table:
rowdic = {}
for col in hdr:
rowdic[col] = row[hdr.index(col)]
#print rowdic
phy_starts = btrfs_db_parser.virtual_to_physical( rowdic['Virtual_start'], df_chunk )
for stripe in phy_starts:
devices.add( stripe['devid'] )
assert len(devices) == 1, 'we only allow one device at this time'
rowdic['Physical_start'] = stripe['physical_addr']
rowdic['Physical_end'] = stripe['physical_addr'] + \
int( rowdic['Length'] )
rowdic['Logical_end'] = int(rowdic['Logical_start']) + \
int( rowdic['Length'] )
rowdic['Level_index'] = 0
rowdic['Max_level'] = 0
rowdic['Entry_index'] = 0
rowdic['N_Entry'] = 0
rowdic['filepath'] = dic_map[str( rowdic['inode_number'] )]
rowdic['Flag'] = "NA"
df_ext.addRowByDict( rowdic )
return df_ext
def extlist_translate_new_format(df_ext):
"""
Use ending of file and new unit(byte)
Only df_ext of ext4 and xfs need this, btrfs already
uses byte as unit.
But does btrfs use the new style of ending?
"""
df_ext = extlist_lastblock_to_nextblock(df_ext)
df_ext = extlist_block_to_byte(df_ext)
return df_ext
def extlist_lastblock_to_nextblock(df_ext):
"""
for ext4 and xfs, the Logical_end and Physical_end point
to the last block of the file. This is not convenient when
we translate the unit from block to byte.
so in this function, we shift the _end to point to the
next block of the file (out of the file), kind of like
the .end() of iterator in C++.
For example, it was 8,8 for a file, indicating, the first
and the last block of the file is 8.
After the translating of this file, it is 8,9.
"""
colnames = ['Logical_end', 'Physical_end']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) + 1
row[hdr.index(col)] = x
return df_ext
def extlist_block_to_byte(df_ext):
"""
Translate the unit from block to byte for extent list
Translated:
Logical_start Logical_end Physical_start Physical_end
This function should be used as soon as the df_ext is created
so all the later functions that use this df_ext can treat it
as byte.
"""
BLOCKSIZE = 4096
colnames = ['Logical_start', 'Logical_end',
'Physical_start', 'Physical_end', 'Length']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) * BLOCKSIZE
row[hdr.index(col)] = x
return df_ext
def get_num_ext_from_extent_list(df_ext, filename):
"Get number of extents"
hdr = df_ext.header
cnt = 0
for row in df_ext.table:
if filename == os.path.basename(row[hdr.index('filepath')]) and \
row[hdr.index('Level_index')] != '-1':
cnt += 1
return cnt
def get_paths_in_df(df_ext):
hdr = df_ext.header
paths = set()
for row in df_ext.table:
paths.add( row[hdr.index('filepath')] )
return list(paths)
def get_d_span_from_extent_list(df_ext, filepath):
hdr = df_ext.header
byte_max = -1
byte_min = float('Inf')
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
mmin = min(physical_start, physical_end)
mmax = max(physical_start, physical_end)
if mmin < byte_min:
byte_min = mmin
if mmax > byte_max:
byte_max = mmax
if byte_max == -1:
# no extent found
return 'NA'
else:
return byte_max - byte_min
def get_distant_sum_from_extent_list(df_ext, filepath):
hdr = df_ext.header
extlist = []
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
d = {
'off': physical_start,
'len': physical_end - physical_start
}
extlist.append( d )
distsum = get_distant_sum( extlist )
return distsum
def stat_a_file(filepath):
filepath = os.path.join(filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses limited buffer
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
#print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def get_all_paths(mountpoint, dir):
"it returns paths of all files and diretories"
paths = []
with cd(mountpoint):
cmd = ['find', dir]
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def isfilefrag_ext_line(line):
if 'Filesystem' in line or \
'blocksize' in line or \
('logical' in line and 'length' in line) or\
('extent' in line and 'found' in line):
return False
else:
return True
def filefrag(filepath):
cmd = ["filefrag", "-sv", filepath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag", "filepath"]
df_ext.header = header
#ext logical physical expected length flags
#0 0 1545 12 merged
for line in proc.stdout:
if isfilefrag_ext_line(line):
items = line.split()
# it is 4 because there might be some line without
# both expected and flags
assert len(items) >= 4, line
if len(items) == 5 or len(items) == 4:
items.insert(3, -1)
#print items
d = {
'Level_index': 0,
'Max_level' : 0,
'Entry_index': int(items[0]),
'N_Entry' : 'NA',
'Logical_start': int(items[1]),
'Logical_end': int(items[1]) + int(items[4]),
'Physical_start': int(items[2]),
'Physical_end': int(items[2]) + int(items[4]),
'Length' : int(items[4]),
'Flag' : 'NA',
'filepath' : filepath
}
df_ext.addRowByDict(d)
#pprint.pprint(d)
#print df_ext.toStr()
proc.wait()
return df_ext
def get_possible_cpu():
f = open("/sys/devices/system/cpu/possible", 'r')
line = f.readline()
f.close()
return line.strip()
def get_available_cpu_dirs():
"Counting dirs is more accurate than */cpu/possible, at least on emulab"
cpudirs = [name for name in glob.glob("/sys/devices/system/cpu/cpu[0-9]*") \
if os.path.isdir(name)]
return cpudirs
def get_online_cpuids():
with open('/sys/devices/system/cpu/online', 'r') as f:
line = f.readline().strip()
# assuming format of 0-2,4,6-63
items = line.split(',')
cpus = []
for item in items:
if '-' in item:
a,b = item.split('-')
a = int(a)
b = int(b)
cpus.extend(range(a, b+1))
else:
cpus.append(int(item))
return cpus
def switch_cpu(cpuid, mode):
path = "/sys/devices/system/cpu/cpu{cpuid}/online"
path = path.format(cpuid=cpuid)
modedict = {'ON':'1', 'OFF':'0'}
f = open(path, 'w')
f.write(modedict[mode])
f.flush()
f.close()
return
|
gpl-2.0
| 4,359,682,231,986,908,700
| 33.280062
| 95
| 0.512707
| false
| 3.906206
| false
| false
| false
|
isaachenrion/jets
|
src/proteins/train/validation.py
|
1
|
1461
|
import logging
import time
import torch
from src.data_ops.wrapping import unwrap
from ..loss import loss
def half_and_half(a,b):
a = torch.stack([torch.triu(x) for x in a], 0)
b = torch.stack([torch.tril(x, diagonal=-1) for x in b], 0)
return a + b
def validation(model, data_loader):
t_valid = time.time()
model.eval()
valid_loss = 0.
yy, yy_pred = [], []
half = []
mask = []
hard_pred = []
for i, batch in enumerate(data_loader):
(x, y, y_mask, batch_mask) = batch
y_pred = model(x, mask=batch_mask)
vl = loss(y_pred, y, y_mask, batch_mask)
valid_loss = valid_loss + float(unwrap(vl))
yy.append(unwrap(y))
yy_pred.append(unwrap(y_pred))
mask.append(unwrap(batch_mask))
half.append(unwrap(half_and_half(y, y_pred)))
hard_pred.append(unwrap(half_and_half(y, (y_pred > 0.5).float())))
del y; del y_pred; del y_mask; del x; del batch_mask; del batch
valid_loss /= len(data_loader)
#grads = torch.cat([p.grad.view(-1) for p in model.parameters() if p.grad is not None], 0)
logdict = dict(
yy=yy,
yy_pred=yy_pred,
half=half,
hard_pred=hard_pred,
mask=mask,
valid_loss=valid_loss,
model=model,
#grads=grads,
)
model.train()
t1=time.time()
logging.info("Validation took {:.1f} seconds".format(time.time() - t_valid))
return logdict
|
bsd-3-clause
| -3,322,496,645,558,040,600
| 23.762712
| 94
| 0.578371
| false
| 3.069328
| false
| false
| false
|
byteface/sing
|
core/PyPal.py
|
1
|
16532
|
"""
PyPal.py
@author: byteface
"""
class PyPal(object):
"""
PyPal is the heart for all pypals :)
"""
# TODO - tell command to pass messages to other pypals. non conflicting. saves having to quit out of current one
# TODO - list commands
# TODO - learn from. quick command to copy commands between pypals may be useful. save moving between dirs and copying
# memory? - obj with funcitons for loading data etc.
# dictionary that stores object from _meta.json
o = None
# TODO - if current context is gone should be able to go through history
# MULTIPLE CONTEXT OBJECT MAY NEED TO EXISTS. searching for relevant ones is a requirement
context=None
# TODO - should every statement should carry certainty?. for now maybe store number 0-1 on here?
#certainty=0
# TODO third person, you, actor???... you can 'be' another person
#perspective={}
# the natural language processing engine. eventually will live on a brain object
nlp=None # TODO - should be an array
# natural language generation. used for output
nlg=None # TODO - as above
def __init__(self,data):
"""
data param is obj with unique name. i.e {'name':'pypal'}
"""
import json
with open("bin/%s/_meta.json" % data['name']) as json_file:
self.o = json.load(json_file)['object']
# TODO - externalise the class
self.nlp=NLP( self )
# TODO - externalise the class
self.nlg=NLG( self )
#self.context=Context( [self], [self] ) # talk to self
def introduce(self):
"""
introduce - when a pypal is first created this is what it says
"""
self.nlg.say( "Hi my name is %s, Thankyou for creating me!" % self.o['name'] )
self.listen()
def welcome(self):
"""
welcome - whenever you init a pypal
"""
self.nlg.say( "%s I see you have returned!" % self.o['friend'] )
# TODO - display stats?
self.listen()
# TODO - listen should really be an open stream at the moment this is just a friend channel.
# TODO - create channels for pypal>pyal comms
# TODO - event should be created
# TODO - should be having thoughts
def listen(self):
# NOTE - listen currently considers it to be friend who is talking
#self_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/self/' % self.o['name'], self.o['name'] )
#friend_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/physical/animal/human/' % self.o['name'], self.o['friend'] )
#friend_obj={}
try:
# THIS IS A APPARENTLY nix ONLY SOLUTION FOR AUTO PROCESSING
# IT WILL TIME OUT IF NO INPUT RESPONSE AND RUN AUTOMATIONS
# steps towards automation. I looked and using mulitprocessing and thread but non can stop a raw_input
# for now i'm doing this way just as I'm building some content bots and need it sorting
# the timeout for automation
#import signal
#signal.signal(signal.SIGALRM, self.automate)
#signal.alarm(10)
#from threading import Timer
#t=Timer(10,self.automate)
#t.start()
self.nlg.say( "I am listening..." )
import sys
from select import select
# TODO - keys presses should reset the timeout
timeout = 10000 # TODO - add to a pypal config?? - make timeout longer. for testing type automate. have flag/config for autobots?
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = sys.stdin.readline().strip()
self.process(s)
self.listen()
else:
self.nlg.say( "No input. Automating..." ) # TODO - just run as bg proccess
self.automate()
self.listen()
return
# NOTE - DOESNT RUN ANYMORE
# NOTE - old way. preserving here for now until figure this all out
#self.nlg.say( "I am listening..." )
#information = raw_input("> ")
#self.process( information )
# TODO - parralell process for automation whilst listening?
except:
self.nlg.log( "FAIL :::::listen" )
def automate(self,*args,**kwargs):
"""
automate is a super simple task runner
it executes tasks listed in brain/automation/tasks.json
"""
self.nlg.log( "automate" )
try:
# add and run automation.py
path = 'bin/%s/brain/automation' % self.o['name']
import sys
sys.path.append( path )
task_runner = __import__( 'automate' )
task_runner.run(self,path)
except Exception:
self.nlg.log( "AUTOMATE FAIL!" )
# TODO - what when automations are complete?. currently returns to listening
#self.listen()
return
history=[] # TODO - should the history go on the context obj?
## TODO - this should be a HEAR function and should process chunks
# TODO - this is something that would be also good to parrallel process and decide which streams of informtion to listen to or ignore
def process(self,information,caller=None,callee=None):
self.context=Context( self, information ) # FOR NOW JUST FOR STORING PATHS
self.history.append(information)
# update the context object
#self.context=Context( [caller], [callee] )
# bust all into words, squash whitespace
words = information.split(None)
# if its a one letter answer. some helpers/shortcuts
if len(words)==1:
# added a repeat function
if information == 'r':
print self.history[len(self.history)-2]
self.process( self.history[len(self.history)-2] )
return
# show command history
if information == 'h':
for h in history:
print h
return
# TODO - some more 1 key helpers
# 'r' - repeat last command
# 'h' - history
# 'c' - show all available commands in pypal
self.nlp.processOneWord( information )
#self.listen()
return
self.nlp.processSentence( information )
#self.listen()
return
# TODO - need to ask meaning of words. to at least put it into memory for considering
# should also be able to check dictionary / nltk sources. but needs to build a program for the word
def ask_word_meaning(self,word):
self.nlp.say( "What is '%s'?" % word )
answer = raw_input("> ")
# TODO - NO - should probs be processess response
self.nlp.addNewWord( word, answer )
# when the bot is not active could explore data sources
# using a decorator pattern for behaviours on data
# def explore(self):
# TODO - let bot decide/choose which data source to consume
# TODO - can bot find new data sources? from interaction with other bots
# TODO - to begin with will attempt to buid knowledge graph data sets
# from webpages/ relational object maps from text
# can also explore things in the world
# theres various ways of determining what to explore in the world
# TODO - create a discover function?...
# do this by going into unknown. i.e. inventing urls to read.
# figure out how to chain commands?
# how to 'think of something to do'
# def spawn(self):
# def merge(self,pypal):
# the first job of the context object is to store caller, callee information
# Who is talking and who are they talking to
# NOTE / TODO - this may evolve with time
class Context(object):
"""
Context still to be fully defined.
Will hold things like conversation history and caller/callee information and is used to aid comprehension
not just personable but subject context
i.e. if i say show list, then add to list should add to the one ive shown
hmmmm caller callee is perspective and incorreclty stubbed here. probably why i removed
the implementation. unless perspective is an object that also resides in a context object?
NOW GIVES SOME PATH INFO. can access in a command like
o.context.COMMAND_PATH
Also forces app into running 1 command at a time. which is good. as thats how a brain kinda works.
you could probably still spin threads in commands if requried. but we context we have 1 train of thought which is the running command
"""
# both can be lists of animals
caller=None
callee=None
# useful for commands to know where they are loading from
# so can dump/store stuff there directly when scraping etc.
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
BASEPATH=''
# TODO - may move these 3 onto the NLP. and make context more of a caretaker pattern extended to surmise and store sessions.
LAST_COMMAND =''
COMMAND_PATH =''
#PARAMS=''
def __init__(self, parent, command, caller=None,callee=None):
# self.caller=caller
# self.callee=callee
self.BASEPATH = './bin/%s/brain/commands' % parent.o['name']
self.LAST_COMMAND = command
path = '/'.join( self.LAST_COMMAND.split(' ') )
file = '_'.join( self.LAST_COMMAND.split(' ') ) + '.py'
#self.COMMAND_PATH = '%s/%s/%s' % ( self.BASEPATH, path, file )
self.COMMAND_PATH = '%s/%s' % ( self.BASEPATH, path )
#self.PARAMS='' # NOTE - gets updated once string is parsed
class NLP(object):
"""
NLP are processes for word parsing. Generating functions for words.
Essentially a custom module loader
"""
owner=None
# TODO -
#TIME
#PLACE
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
def __init__(self,owner):
self.owner=owner
def addNewWord( self, word, answer ):
# TODO - addNewWord. store what friend thinks/says it is
return
def processOneWord( self, word ):
"""
parse single word commands.
basically runs a command from the command folder
"""
#TODO - check that the word is clean
#TODO - see if we know the word
#TODO - deal with inuendo
#TODO - lemmatiser maybe required. or we re-routing manually?
knows_word=False;
c_path = 'bin/%s/brain/commands' % self.owner.o['name']
if self.has_command(c_path+"/"+word+"/"+word+".py"):
self.owner.nlg.log( "command detected" )
knows_word=True
return self.runWordAsFunction( c_path, word )
if knows_word == False:
self.owner.nlg.say( "I don't yet know that word" )
# now that we know input function we can run it..
# TODO - should probably create new problem though and process that way
# TODO - all the meta properties need updating
def runWordAsFunction(self,path,word):
import sys
sys.path.append( "%s/%s" % (path,word) )
try:
# TODO - check and update all the meta props
command_module = __import__( word )
reload(command_module) # reload class without restarting pypal
return command_module.run(self.owner)
except Exception, e:
self.owner.nlg.say( "Sorry, I can't do that, I tried but it didn't work" )
self.owner.nlg.log( "CHECK YOUR VIRUTAL ENVIRONMENT IS RUNNING." )
pass
# TODO - try to find the finite verb
# NOTE - AT THE MOMENT ONLY PROCESSING COMMANDS
def processSentence( self, sentence ):
# print "processSentence"
words = sentence.split(None)
word_count = len(words)
basepath = 'bin/%s/brain/commands' % self.owner.o['name']
word_path_arr=[]
# walk up the sentence
for word in words:
root = basepath+"/"+'/'.join(word_path_arr)
has_path = self.has_path( root +"/"+ word )
# if next word is the last word. check for a command and run it without params.
if (len(word_path_arr)+1)==word_count:
path = root+"/"+word
function = '_'.join( word_path_arr ) + "_" + word
if self.has_command(path+"/"+function+".py"):
return self.runSentenceAsFunction( path, function )
# if nowhere to go. but there's a command at current path. run it and pass the rest as param
if (False==has_path):
function = '_'.join( word_path_arr )
if self.has_command(root+"/"+function+".py"):
# get params by removing where we were up to
params = sentence.replace( ' '.join( word_path_arr ), '' )
# REMOVE THE WHITE SPACE FROM START OF PARAMS
params = params[1:]
# TODO - note. i see i built up to path to strip param. problem here is param is on the command_path. and doesn't get parsed off until here. during execution.
# TODO - will have a rethink about how want context to work before changing this. so for now will operate on the context obj here
# TODO - when doing change, nlp ref should probs get given to context. or context keeps them all in array.
self.owner.context.COMMAND_PATH = self.owner.context.COMMAND_PATH.replace( params, '' )
#self.owner.context.PARAMS = params
# TODO - throw error if no param is passed
if params == None or params == '':
print 'ERROR:parameter expected. none recieved'
# run the function
return self.runSentenceAsFunction( root, function, params )
else:
break
word_path_arr.append(word)
# TODO - if no command, attempt gnerating reponse from the self compiled programs.
# TODO - integrate memory, world states, schemas and emotions
# A LAD is a KAD : cognitive learning
return self.owner.nlg.say( "No command found" )
# params at the moment are 'rest of string'
# long term might break around finite verb and pass whole string?
def runSentenceAsFunction(self,path,function,params=None):
#print "runSentenceAsFunction"
#print path, function, params
import sys
sys.path.append( path )
try:
# TODO - check all the meta props
# TODO - may need to also write to some of the meta
# TODO - if no meta create a default one
command_module = __import__( function )
reload(command_module) # reload class without restarting pypal
if(params!=None):
return command_module.run(self.owner,params)
else:
return command_module.run(self.owner)
pass
except Exception, e:
self.owner.nlg.log( "runSentenceAsFunction FAIL!! \
\n happens when : \
\n failing code in the command. i.e imports used by the command not intalled \
\n venv not running \
\n not passing params when required" )
return False
#self.owner.listen()
pass
# run several possibilities. decide which is most relevant?
# the listener as to suppose an ontological truth in each word as they hear it
# when that doesn't happen even over sets of words things have to be considered
# and find more context or information. even lead to questioning
def suppose():
pass
## ---------------------------- NLP LANGUGAGE UTILS -----------------------------------
# check a lookup table of yes words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is negative, it could be neutral
def is_string_positive( s ):
pass
# check a lookup table of no words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is positive, it could be neutral
def is_string_negative( s ):
pass
# check a lookup table of
# TODO - building lookup tables on the fly is something we need to do
# RETURN THE NUMBER OR WORD FALSE
def is_string_number( s ):
# TODO - check if NLTK can do this
pass
def is_math_operator():
# TODO - check if NLTK can do this
pass
## ---------------------------- NLP FILE UTILS -----------------------------------
# TODO - may get rid of this lookup and have root words as delegators
def hasParams( self, path, word ):
"""
check if parameters True
"""
try:
#TODO - shoud just check if folder has param folder
import Program
program = Program.Program( path, word );
canHasParams = program.meta.get_property( 'rules', 'parameters' );
return canHasParams
except:
print "no meta or param found"
return False # force false if passing a non command. TODO- BUT. we shouldn't be calling if the case.
def has_path( self, path_to_directory ):
import os.path
return os.path.isdir(path_to_directory)
def has_command(self, path_to_py_file):
import os.path
return os.path.isfile(path_to_py_file)
class NLG(object):
"""
NLG - generates sentences in the natural language
at moment just logs strings to output.
from now on all output should come through here
"""
owner=None
def __init__(self,owner):
self.owner=owner
def say( self, words ):
"""
output helps distinguish pypals when in the console
"""
print "%s : %s" % ( self.owner.o['name'], words )
return
# TODO - setup python logger
# TODO - pass ref to pypal?
# TODO - logs should write to a file and be accessible by events. i.e. evt12345 - created variable xxx
def log( self, words ):
"""
log differs to the 'say' method.
log should be more about debugging.
say should be user comms
"""
return # NOTE <<<<<<<<<<<<<<<<<<<<<< im not running
# TOOD - if debug is true
import logging
logging.warning( "------------------------------------- %s : %s" % ( self.owner.o['name'], words ) )
return
|
gpl-2.0
| 2,327,574,827,839,849,000
| 27.259829
| 163
| 0.67578
| false
| 3.362897
| false
| false
| false
|
gfyoung/numpy
|
numpy/lib/twodim_base.py
|
2
|
27180
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
bsd-3-clause
| -2,810,006,162,861,620,700
| 26.289157
| 79
| 0.551214
| false
| 3.448363
| false
| false
| false
|
greyshell/Pen-Test
|
leetcode/factorial.py
|
1
|
1129
|
#!/usr/bin/python
# author: greyshell
"""
[+] problem description
=======================
find the factorial of a number
1) recursive two_sum
2) tail recursive two_sum
[+] reference
=============
TBD
"""
def tail_recursion_driver(n):
"""
tail recursive two_sum
:param n: int
:return: int
"""
return factorial_tail_recursion(n, 1) # 1 is used to start the first accumulation
def factorial_tail_recursion(n, a):
"""
better than normal recursion as it could be optimized by the compiler by not saving the current stack frame
:param n: int
:param a: int => it accumulates the result
:return: int
"""
if n == 1 or n == 0:
return a # it carries the final result
else:
return factorial_tail_recursion(n - 1, n * a)
def factorial(n):
"""
normal recursive two_sum
:return: int
"""
if n == 1 or n == 0: # base case for n = 0, 1
return 1
else: # recursive case when n > 1
return n * factorial(n - 1)
def main():
print tail_recursion_driver(12)
print factorial(0)
if __name__ == '__main__':
main()
|
mit
| 5,056,837,605,008,551,000
| 18.135593
| 111
| 0.581045
| false
| 3.528125
| false
| false
| false
|
timothyclemansinsea/smc
|
src/k8s/smc-hub/control.py
|
1
|
9152
|
#!/usr/bin/env python3
"""
Hub management script
"""
import os, shutil, sys, tempfile
join = os.path.join
# Boilerplate to ensure we are in the directory fo this path and make the util module available.
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(0, os.path.abspath(os.path.join(SCRIPT_PATH, '..', 'util')))
os.chdir(SCRIPT_PATH)
import util
# For now in all cases, we just call the container the following; really it should
# maybe be smc-webapp-static#sha1hash, which makes switching between versions easy, etc.
NAME='smc-hub'
SECRETS = os.path.abspath(join(SCRIPT_PATH, '..', '..', 'data', 'secrets'))
def build(tag, rebuild, upgrade=False, commit=None):
"""
Build Docker container by installing and building everything inside the container itself, and
NOT using ../../static/ on host.
"""
# First build smc-hub-base, which is generic install of ubuntu packages, so we should rarely
# clear the cache for this.
v = ['sudo', 'docker', 'build', '-t', '{name}-base'.format(name=NAME)]
if upgrade:
v.append("--no-cache")
v.append(".")
util.run(v, path=join(SCRIPT_PATH, 'image-base'))
# Next build smc-hub, which depends on smc-hub-base.
v = ['sudo', 'docker', 'build', '-t', tag]
if commit:
v.append("--build-arg")
v.append("commit={commit}".format(commit=commit))
if rebuild: # will cause a git pull to happen
v.append("--no-cache")
v.append('.')
util.run(v, path=join(SCRIPT_PATH,'image'))
def build_docker(args):
if args.commit:
args.tag += ('-' if args.tag else '') + args.commit[:6]
tag = util.get_tag(args, NAME)
build(tag, args.rebuild, args.upgrade, args.commit)
if not args.local:
util.gcloud_docker_push(tag)
def run_on_kubernetes(args):
if args.test:
rethink_cpu_request = hub_cpu_request = '10m'
rethink_memory_request = hub_memory_request = '200Mi'
else:
hub_cpu_request = '300m'
hub_memory_request = '1Gi'
rethink_cpu_request = '300m'
rethink_memory_request = '1Gi'
util.ensure_secret_exists('sendgrid-api-key', 'sendgrid')
util.ensure_secret_exists('zendesk-api-key', 'zendesk')
args.local = False # so tag is for gcloud
if args.replicas is None:
args.replicas = util.get_desired_replicas(NAME, 2)
tag = util.get_tag(args, NAME, build)
opts = {
'image_hub' : tag,
'replicas' : args.replicas,
'pull_policy' : util.pull_policy(args),
'min_read_seconds' : args.gentle,
'smc_db_hosts' : args.database_nodes,
'smc_db_pool' : args.database_pool_size,
'smc_db_concurrent_warn' : args.database_concurrent_warn,
'hub_cpu_request' : hub_cpu_request,
'hub_memory_request' : hub_memory_request,
'rethink_cpu_request' : rethink_cpu_request,
'rethink_memory_request' : rethink_memory_request
}
if args.database_nodes == 'localhost':
from argparse import Namespace
ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False)
opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy', build)
filename = 'smc-hub-rethinkdb-proxy.template.yaml'
else:
filename = '{name}.template.yaml'.format(name=NAME)
t = open(join('conf', filename)).read()
with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
r = t.format(**opts)
#print(r)
tmp.write(r)
tmp.flush()
util.update_deployment(tmp.name)
if NAME not in util.get_services():
util.run(['kubectl', 'expose', 'deployment', NAME])
def stop_on_kubernetes(args):
util.stop_deployment(NAME)
def load_secret(name, args):
path = args.path
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise RuntimeError("path='{path}' must be a directory".format(path=path))
file = join(path, name)
if not os.path.exists(file):
raise RuntimeError("'{file}' must exist".format(file=file))
util.create_secret(name+'-api-key', file)
def status(args):
# Get all pod names
v = util.get_pods(run=NAME)
print("Getting last %s lines of logs from %s pods"%(args.tail, len(v)))
for x in v:
lg = util.get_logs(x['NAME'], tail=args.tail, container='smc-hub').splitlines()
blocked = concurrent = 0
for w in lg:
if 'BLOCKED for' in w: # 2016-07-07T17:39:23.159Z - debug: BLOCKED for 1925ms
b = int(w.split()[-1][:-2])
blocked = max(blocked, b)
if 'concurrent]' in w: # 2016-07-07T17:41:16.226Z - debug: [1 concurrent] ...
concurrent = max(concurrent, int(w.split()[3][1:]))
x['blocked'] = blocked
x['concurrent'] = concurrent
bad = util.run("kubectl describe pod {name} |grep Unhealthy |tail -1 ".format(name=x['NAME']), get_output=True, verbose=False).splitlines()
if len(bad) > 0:
x['unhealthy'] = bad[-1].split()[0]
else:
x['unhealthy'] = ''
print("%-30s%-12s%-12s%-12s%-12s%-12s"%('NAME', 'CONCURRENT', 'BLOCKED', 'UNHEALTHY', 'RESTARTS', 'AGE'))
for x in v:
print("%-30s%-12s%-12s%-12s%-12s%-12s"%(x['NAME'], x['concurrent'], x['blocked'], x['unhealthy'], x['RESTARTS'], x['AGE']))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Control deployment of {name}'.format(name=NAME))
subparsers = parser.add_subparsers(help='sub-command help')
sub = subparsers.add_parser('build', help='build docker image')
sub.add_argument("-t", "--tag", default="", help="tag for this build")
sub.add_argument("-c", "--commit", default='',
help="build a particular sha1 commit; the commit is automatically appended to the tag")
sub.add_argument("-r", "--rebuild", action="store_true",
help="re-pull latest hub source code from git and install any dependencies")
sub.add_argument("-u", "--upgrade", action="store_true",
help="re-install the base Ubuntu packages")
sub.add_argument("-l", "--local", action="store_true",
help="only build the image locally; don't push it to gcloud docker repo")
sub.set_defaults(func=build_docker)
sub = subparsers.add_parser('run', help='create/update {name} deployment on the currently selected kubernetes cluster'.format(name=NAME))
sub.add_argument("-t", "--tag", default="", help="tag of the image to run")
sub.add_argument("-r", "--replicas", default=None, help="number of replicas")
sub.add_argument("-f", "--force", action="store_true", help="force reload image in k8s")
sub.add_argument("-g", "--gentle", default=30, type=int,
help="how gentle to be in doing the rolling update; in particular, will wait about this many seconds after each pod starts up (default: 30)")
sub.add_argument("-d", "--database-nodes", default='localhost', type=str, help="database to connect to. If 'localhost' (the default), will run a local rethindkb proxy that is itself pointed at the rethinkdb-cluster service; if 'rethinkdb-proxy' will use that service.")
sub.add_argument("-p", "--database-pool-size", default=50, type=int, help="size of database connection pool")
sub.add_argument("--database-concurrent-warn", default=300, type=int, help="if this many concurrent queries for sustained time, kill container")
sub.add_argument("--rethinkdb-proxy-tag", default="", help="tag of rethinkdb-proxy image to run")
sub.add_argument("--test", action="store_true", help="using for testing so make very minimal resource requirements")
sub.set_defaults(func=run_on_kubernetes)
sub = subparsers.add_parser('delete', help='delete the deployment')
sub.set_defaults(func=stop_on_kubernetes)
sub = subparsers.add_parser('load-sendgrid', help='load the sendgrid password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "sendgrid"')
sub.set_defaults(func=lambda args: load_secret('sendgrid',args))
sub = subparsers.add_parser('load-zendesk', help='load the zendesk password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "zendesk"')
sub.set_defaults(func=lambda args: load_secret('zendesk',args))
util.add_deployment_parsers(NAME, subparsers, default_container='smc-hub')
sub = subparsers.add_parser('status', help='display status info about concurrent and blocked, based on recent logs')
sub.add_argument("-t", "--tail", default=100, type=int, help="how far back to go in log")
sub.set_defaults(func=status)
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
|
gpl-3.0
| -8,445,299,926,647,956,000
| 46.419689
| 275
| 0.633086
| false
| 3.521354
| false
| false
| false
|
Yuecai/com-yuecai-dream
|
src/nodelay/forms.py
|
1
|
9690
|
# coding=utf-8
#########################################################################
# File Name: forms.py
# Original Author: 段凯强
# Mail: duankq@ios.ac.cn
# Created Time: 2013-12-26
# Update:
#########################################################################
#########################################################################
# Copyright (c) 2013~2014 by 段凯强
# Reand the file "license" distributed with these sources, or XXXX
# XXXXXXXXXXXXXXXXXX switch for additional information, such as how
# to use, copy, modify, sell and/or distribute this software and its
# documentation any purpose anyway.
#########################################################################
import datetime, time
import re
from django import forms
class BasicTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
title = forms.CharField()
content = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Basic':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_title(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
title = self.cleaned_data['title']
l = len(title)
if l >= 1 and l <= 10 and pattern.match(title):
return title
raise forms.ValidationError('title_err')
def clean_content(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
content = self.cleaned_data['content']
l = len(content)
if l >= 1 and l <= 100 and pattern.match(content) and not pattern_blank.match(content):
return content
raise forms.ValidationError('content_err')
class BookTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
bookName = forms.CharField()
readFrom = forms.CharField()
readTo = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Book':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_bookName(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
bookName = self.cleaned_data['bookName']
l = len(bookName)
if l >= 1 and l <= 50 and pattern.match(bookName) and not pattern_blank.match(bookName):
return bookName
raise forms.ValidationError('bookName_err')
def clean_readFrom(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readFrom = self.cleaned_data['readFrom']
l = len(readFrom)
if l >= 1 and l <= 50 and pattern.match(readFrom) and not pattern_blank.match(readFrom):
return readFrom
raise forms.ValidationError('readFrom_err')
def clean_readTo(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readTo = self.cleaned_data['readTo']
l = len(readTo)
if l >= 1 and l <= 50 and pattern.match(readTo) and not pattern_blank.match(readTo):
return readTo
raise forms.ValidationError('readTo_err')
class WorkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
summary = forms.CharField()
goal = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Work':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_summary(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
summary = self.cleaned_data['summary']
l = len(summary)
if l >= 1 and l <= 50 and pattern.match(summary) and not pattern_blank.match(summary):
return summary
raise forms.ValidationError('summary')
def clean_goal(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
goal = self.cleaned_data['goal']
l = len(goal)
if l >= 1 and l <= 50 and pattern.match(goal) and not pattern_blank.match(goal):
return goal
raise forms.ValidationError('goal_err')
class HomeworkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
courseName = forms.CharField()
introduction = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Homework':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_courseName(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
courseName = self.cleaned_data['courseName']
l = len(courseName)
if l >= 1 and l <= 10 and pattern.match(courseName):
return courseName
raise forms.ValidationError('courseName_err')
def clean_introduction(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
introduction = self.cleaned_data['introduction']
l = len(introduction)
if l >= 1 and l <= 100 and pattern.match(introduction) and not pattern_blank.match(introduction):
return introduction
raise forms.ValidationError('introduction_err')
class TaskIdForm(forms.Form):
taskId = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
class ChangeDateForm(forms.Form):
taskId = forms.IntegerField()
date = forms.DateField()
time = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
class ExchangeTaskForm(forms.Form):
taskId1 = forms.IntegerField()
taskId2 = forms.IntegerField()
def clean_taskId1(self):
taskId1 = self.cleaned_data['taskId1']
if taskId1 > 0:
return taskId1
raise forms.ValidationError('taskId1_err')
def clean_taskId2(self):
taskId2 = self.cleaned_data['taskId2']
if taskId2 > 0:
return taskId2
raise forms.ValidationError('taskId2_err')
class DelayShiftTaskForm(forms.Form):
fromId = forms.IntegerField()
toId = forms.IntegerField()
def clean_fromId(self):
fromId = self.cleaned_data['fromId']
if fromId > 0:
return fromId
raise forms.ValidationError('fromId_err')
def clean_toId(self):
toId = self.cleaned_data['toId']
if toId > 0:
return toId
raise forms.ValidationError('toId_err')
|
bsd-3-clause
| 8,940,460,477,779,460,000
| 33.025362
| 105
| 0.577787
| false
| 3.504104
| false
| false
| false
|
quantopian/zipline
|
zipline/data/in_memory_daily_bars.py
|
1
|
5363
|
from six import iteritems
import numpy as np
import pandas as pd
from pandas import NaT
from trading_calendars import TradingCalendar
from zipline.data.bar_reader import OHLCV, NoDataOnDate, NoDataForSid
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.input_validation import expect_types, validate_keys
from zipline.utils.pandas_utils import check_indexes_all_same
class InMemoryDailyBarReader(CurrencyAwareSessionBarReader):
"""
A SessionBarReader backed by a dictionary of in-memory DataFrames.
Parameters
----------
frames : dict[str -> pd.DataFrame]
Dictionary from field name ("open", "high", "low", "close", or
"volume") to DataFrame containing data for that field.
calendar : str or trading_calendars.TradingCalendar
Calendar (or name of calendar) to which data is aligned.
currency_codes : pd.Series
Map from sid -> listing currency for that sid.
verify_indices : bool, optional
Whether or not to verify that input data is correctly aligned to the
given calendar. Default is True.
"""
@expect_types(
frames=dict,
calendar=TradingCalendar,
verify_indices=bool,
currency_codes=pd.Series,
)
def __init__(self,
frames,
calendar,
currency_codes,
verify_indices=True):
self._frames = frames
self._values = {key: frame.values for key, frame in iteritems(frames)}
self._calendar = calendar
self._currency_codes = currency_codes
validate_keys(frames, set(OHLCV), type(self).__name__)
if verify_indices:
verify_frames_aligned(list(frames.values()), calendar)
self._sessions = frames['close'].index
self._sids = frames['close'].columns
@classmethod
def from_panel(cls, panel, calendar, currency_codes):
"""Helper for construction from a pandas.Panel.
"""
return cls(dict(panel.iteritems()), calendar, currency_codes)
@property
def last_available_dt(self):
return self._calendar[-1]
@property
def trading_calendar(self):
return self._calendar
@property
def sessions(self):
return self._sessions
def load_raw_arrays(self, columns, start_dt, end_dt, assets):
if start_dt not in self._sessions:
raise NoDataOnDate(start_dt)
if end_dt not in self._sessions:
raise NoDataOnDate(end_dt)
asset_indexer = self._sids.get_indexer(assets)
if -1 in asset_indexer:
bad_assets = assets[asset_indexer == -1]
raise NoDataForSid(bad_assets)
date_indexer = self._sessions.slice_indexer(start_dt, end_dt)
out = []
for c in columns:
out.append(self._values[c][date_indexer, asset_indexer])
return out
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
field : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.frames[field].loc[dt, sid]
def get_last_traded_dt(self, asset, dt):
"""
Parameters
----------
asset : zipline.asset.Asset
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
try:
return self.frames['close'].loc[:, asset.sid].last_valid_index()
except IndexError:
return NaT
@property
def first_trading_day(self):
return self._sessions[0]
def currency_codes(self, sids):
codes = self._currency_codes
return np.array([codes[sid] for sid in sids])
def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
)
|
apache-2.0
| -1,749,134,559,772,530,400
| 30.547059
| 78
| 0.611039
| false
| 4.222835
| false
| false
| false
|
suizokukan/dchars-fe
|
kshortcuts.py
|
1
|
2929
|
#!./python_link
# -*- coding: utf-8 -*-
################################################################################
# DChars-FE Copyright (C) 2008 Xavier Faure
# Contact: faure dot epistulam dot mihi dot scripsisti at orange dot fr
#
# This file is part of DChars-FE.
# DChars-FE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DChars-FE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DChars-FE. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
❏DChars-FE❏ kshortcuts.py
(keyboard) shortcuts
"""
################################################################################
class KeyboardShortcut(object):
"""
class KeyboardShortcut
Use this class to store two representations of shortcuts : the Qt one
and the "human readable" one.
"""
#///////////////////////////////////////////////////////////////////////////
def __init__(self, qstring, human_readeable_string):
"""
KeyboardShortcut.__init__
"""
self.qstring = qstring
self.human_readeable_string = human_readeable_string
KSHORTCUTS = {
"open" : \
KeyboardShortcut( qstring = "CTRL+O",
human_readeable_string = "CTRL+O" ),
"save as" : \
KeyboardShortcut( qstring = "CTRL+S",
human_readeable_string = "CTRL+S" ),
"exit" : \
KeyboardShortcut( qstring = "CTRL+Q",
human_readeable_string = "CTRL+Q" ),
"display help chars" : \
KeyboardShortcut( qstring = "CTRL+H",
human_readeable_string = "CTRL+H" ),
"apply" : \
KeyboardShortcut( qstring = "CTRL+SPACE",
human_readeable_string = "CTRL+SPACE" ),
"add trans" : \
KeyboardShortcut( qstring = "CTRL++",
human_readeable_string = "CTRL + '+'" ),
"sub trans" : \
KeyboardShortcut( qstring = "CTRL+-",
human_readeable_string = "CTRL + '-'" ),
}
|
gpl-3.0
| -7,099,916,806,350,500,000
| 38.527027
| 82
| 0.454701
| false
| 4.695024
| false
| false
| false
|
akniffe1/fsf
|
fsf-server/daemon.py
|
1
|
3798
|
#!/usr/bin/env python
#
# All credit for this class goes to Sander Marechal, 2009-05-31
# Reference: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
#
#
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exists. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
apache-2.0
| 6,438,743,217,424,489,000
| 27.343284
| 110
| 0.511848
| false
| 3.931677
| false
| false
| false
|
immanetize/nikola
|
nikola/filters.py
|
1
|
7187
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Utility functions to help you run filters on files."""
from .utils import req_missing
from functools import wraps
import os
import io
import shutil
import subprocess
import tempfile
import shlex
try:
import typogrify.filters as typo
except ImportError:
typo = None # NOQA
def apply_to_binary_file(f):
"""Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in binary mode."""
@wraps(f)
def f_in_file(fname):
with open(fname, 'rb') as inf:
data = inf.read()
data = f(data)
with open(fname, 'wb+') as outf:
outf.write(data)
return f_in_file
def apply_to_text_file(f):
"""Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in UTF-8."""
@wraps(f)
def f_in_file(fname):
with io.open(fname, 'r', encoding='utf-8') as inf:
data = inf.read()
data = f(data)
with io.open(fname, 'w+', encoding='utf-8') as outf:
outf.write(data)
return f_in_file
def list_replace(the_list, find, replacement):
"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``"
for i, v in enumerate(the_list):
if v == find:
the_list[i] = replacement
def runinplace(command, infile):
"""Run a command in-place on a file.
command is a string of the form: "commandname %1 %2" and
it will be execed with infile as %1 and a temporary file
as %2. Then, that temporary file will be moved over %1.
Example usage:
runinplace("yui-compressor %1 -o %2", "myfile.css")
That will replace myfile.css with a minified version.
You can also supply command as a list.
"""
if not isinstance(command, list):
command = shlex.split(command)
tmpdir = None
if "%2" in command:
tmpdir = tempfile.mkdtemp(prefix="nikola")
tmpfname = os.path.join(tmpdir, os.path.basename(infile))
try:
list_replace(command, "%1", infile)
if tmpdir:
list_replace(command, "%2", tmpfname)
subprocess.check_call(command)
if tmpdir:
shutil.move(tmpfname, infile)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
def yui_compressor(infile):
yuicompressor = False
try:
subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
yuicompressor = 'yui-compressor'
except Exception:
pass
if not yuicompressor:
try:
subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
yuicompressor = 'yuicompressor'
except:
raise Exception("yui-compressor is not installed.")
return False
return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)
def closure_compiler(infile):
return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)
def optipng(infile):
return runinplace(r"optipng -preserve -o2 -quiet %1", infile)
def jpegoptim(infile):
return runinplace(r"jpegoptim -p --strip-all -q %1", infile)
def html_tidy_nowrap(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_wrap(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_wrap_attr(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_mini(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no -modify %1")
def _html_tidy_runner(infile, options):
""" Warnings (returncode 1) are not critical, and *everything* is a warning """
try:
status = runinplace(r"tidy5 " + options, infile)
except subprocess.CalledProcessError as err:
status = 0 if err.returncode == 1 else err.returncode
return status
@apply_to_text_file
def minify_lines(data):
return data
@apply_to_text_file
def typogrify(data):
if typo is None:
req_missing(['typogrify'], 'use the typogrify filter')
data = typo.amp(data)
data = typo.widont(data)
data = typo.smartypants(data)
# Disabled because of typogrify bug where it breaks <title>
# data = typo.caps(data)
data = typo.initial_quotes(data)
return data
@apply_to_text_file
def typogrify_sans_widont(data):
# typogrify with widont disabled because it caused broken headline
# wrapping, see issue #1465
if typo is None:
req_missing(['typogrify'], 'use the typogrify_sans_widont filter')
data = typo.amp(data)
data = typo.smartypants(data)
# Disabled because of typogrify bug where it breaks <title>
# data = typo.caps(data)
data = typo.initial_quotes(data)
return data
@apply_to_text_file
def php_template_injection(data):
import re
template = re.search('<\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\:(.*) checksum\:(.*)__ -->', data)
if template:
source = template.group(1)
with io.open(source, "r", encoding="utf-8") as in_file:
phpdata = in_file.read()
_META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')'
phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]
phpdata = re.sub(template.group(0), phpdata, data)
return phpdata
else:
return data
|
mit
| -9,010,117,883,076,772,000
| 31.369369
| 198
| 0.660868
| false
| 3.594797
| false
| false
| false
|
Arzaroth/python_rapidxml
|
tests/test_basic.py
|
1
|
5638
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: simple.py
# by Arzaroth Lekva
# arzaroth@arzaroth.com
#
import os
import rapidxml
def test_unparse(init_rapidxml):
assert init_rapidxml.unparse() == ('<root><test attr1="one" attr2="two" attr3="three"/>'
'<test2><node id="1"/><node id="2"/><node id="3"/></test2>'
'<test>some text</test></root>')
assert init_rapidxml.unparse() == repr(init_rapidxml)
assert init_rapidxml.unparse(False, False) == repr(init_rapidxml)
assert init_rapidxml.unparse(raw=False) == repr(init_rapidxml)
assert init_rapidxml.unparse(pretty=False) == repr(init_rapidxml)
assert init_rapidxml.unparse(pretty=False, raw=False) == repr(init_rapidxml)
assert init_rapidxml.unparse(True) == str(init_rapidxml)
assert init_rapidxml.unparse(True, False) == str(init_rapidxml)
assert init_rapidxml.unparse(pretty=True) == str(init_rapidxml)
assert init_rapidxml.unparse(pretty=True, raw=False) == str(init_rapidxml)
assert init_rapidxml.unparse(True, raw=False) == str(init_rapidxml)
def test_parse(init_rapidxml):
r = rapidxml.RapidXml()
try:
data = init_rapidxml.unparse().encode('utf-8')
except UnicodeDecodeError:
data = init_rapidxml.unparse()
r.parse(data)
assert str(r) == str(init_rapidxml)
def test_parse_from_file(init_rapidxml, tmpdir):
f = tmpdir.join("dump.xml")
f.write(init_rapidxml.unparse())
r = rapidxml.RapidXml(str(f), from_file=True)
assert str(r) == str(init_rapidxml)
def test_equals(init_rapidxml):
assert init_rapidxml == init_rapidxml
root = init_rapidxml.first_node()
assert root == root
assert root == init_rapidxml.first_node()
assert root.first_node() != root.first_node("test2")
assert (root != root) == (not (root == root))
def test_parent(init_rapidxml):
assert init_rapidxml.parent is None
assert init_rapidxml.first_node().parent == init_rapidxml
def test_assign(init_rapidxml):
root = init_rapidxml.first_node()
root.name = "new_root"
assert root.name == "new_root"
test = root.first_node()
test.name = "new_test"
test.first_attribute().name = "new_attr1"
test.first_attribute().next_attribute().value = "new_two"
test = root.first_node("test")
test.value = "some new text"
assert test.value == "some new text"
assert init_rapidxml.unparse() == ('<new_root><new_test new_attr1="one" attr2="new_two" attr3="three"/>'
'<test2><node id="1"/><node id="2"/><node id="3"/></test2>'
'<test>some new text</test></new_root>')
def test_init_cdata(init_rapidxml_with_CDADA):
datra_str =('<root><test attr1="one" attr2="two" attr3="three"/>'
'<test2><node id="1"/><node id="2"/><node id="3"/></test2>'
'<test>some text</test>'
"<ns2:AdditionalData><ns2:Data TID=\"AD_1\">"
"<![CDATA[{\"Cart\":{\"expirationTime\":\"2017-04-22T09:40\","
"\"id\":\"b469df3b-f626-4fe3-898c-825373e546a2\",\"products\":[\"1223\"],"
"\"creationTime\":\"2017-04-21T09:40\",\"totalPrice\":"
"{\"currencyCode\":\"EUR\",\"amount\":\"138.000\"}}}]]>"
"</ns2:Data></ns2:AdditionalData></root>")
assert init_rapidxml_with_CDADA.unparse() == rapidxml.RapidXml(datra_str,
from_file=False,
attribute_prefix='@',
cdata_key='#text',
always_aslist=False,
parse_cdata=True).unparse()
assert init_rapidxml_with_CDADA.unparse() == repr(init_rapidxml_with_CDADA)
assert init_rapidxml_with_CDADA.unparse(True) == str(init_rapidxml_with_CDADA)
def test_parse_cdata(init_rapidxml_with_CDADA):
r = rapidxml.RapidXml()
try:
data = init_rapidxml_with_CDADA.unparse().encode('utf-8')
except UnicodeDecodeError:
data = init_rapidxml_with_CDADA.unparse()
r.parse(data, from_file=False, parse_cdata=True)
assert str(r) == str(init_rapidxml_with_CDADA)
def test_parse_from_file_cdata(init_rapidxml_with_CDADA, tmpdir):
f = tmpdir.join("dump.xml")
f.write(init_rapidxml_with_CDADA.unparse())
r = rapidxml.RapidXml(str(f), from_file=True, parse_cdata=True)
assert str(r) == str(init_rapidxml_with_CDADA)
def test_equals_cdata(init_rapidxml_with_CDADA):
assert init_rapidxml_with_CDADA == init_rapidxml_with_CDADA
root = init_rapidxml_with_CDADA.first_node()
assert root == root
assert root == init_rapidxml_with_CDADA.first_node()
assert root.first_node() != root.first_node("test2")
assert (root != root) == (not (root == root))
def test_parent_cdata(init_rapidxml_with_CDADA):
assert init_rapidxml_with_CDADA.parent is None
assert init_rapidxml_with_CDADA.first_node().parent == init_rapidxml_with_CDADA
def test_assign_cdata(init_rapidxml_with_CDADA):
root = init_rapidxml_with_CDADA.first_node()
root.name = "new_root"
assert root.name == "new_root"
test = root.first_node()
test.name = "new_test"
test.first_attribute().name = "new_attr1"
test.first_attribute().next_attribute().value = "new_two"
test = root.first_node("test")
test.value = "some new text"
assert test.value == "some new text"
|
mit
| 6,300,679,868,644,704,000
| 44.104
| 108
| 0.595956
| false
| 3.130483
| true
| false
| false
|
OpenNetworkingFoundation/PIF-Open-Intermediate-Representation
|
pif_ir/bir/tests/test_common.py
|
1
|
1166
|
# single BIRStruct description
yaml_eth_struct_dict = {
'type' : 'struct',
'fields' : [
{'dst' : 48},
{'src' : 48},
{'type_' : 16}
]
}
yaml_udp_struct_dict = {
'type' : 'struct',
'fields' : [
{'sport' : 16},
{'dport' : 16},
{'len' : 16},
{'chksum' : 16}
]
}
yaml_req_struct_dict = {
'type' : 'struct',
'fields' : [
{'type_' : 16}
]
}
yaml_resp_struct_dict = {
'type' : 'struct',
'fields' : [
{'hit' : 1},
{'p4_action' : 2},
{'action_0_arg0' : 16},
{'action_1_arg0' : 16}
]
}
# single MetadataInstance description
yaml_eth_meta_dict = {
'type' : 'metadata',
'values' : 'eth_t',
'visibility' : 'inout'
}
yaml_req_meta_dict = {
'type' : 'metadata',
'values' : 'req_t',
'visibility' : 'inout'
}
yaml_resp_meta_dict = {
'type' : 'metadata',
'values' : 'resp_t',
'visibility' : 'inout'
}
# single Table description
yaml_table_dict = {
'type' : 'table',
'match_type' : 'ternary',
'depth' : 64,
'request' : 'req_t',
'response' : 'resp_t',
'operations' : None
}
|
apache-2.0
| -2,003,529,813,339,534,800
| 17.21875
| 37
| 0.465695
| false
| 2.823245
| false
| true
| false
|
specter119/custodian
|
custodian/feff/handlers.py
|
1
|
4398
|
# coding: utf-8
from __future__ import unicode_literals, division
from custodian.custodian import ErrorHandler
import re
from custodian.utils import backup
from pymatgen.io.feff.sets import FEFFDictSet
from custodian.feff.interpreter import FeffModder
import logging
""" This module implements specific error handler for FEFF runs. """
__author__ = "Chen Zheng"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Chen Zheng"
__email__ = "chz022@ucsd.edu"
__date__ = "Oct 18, 2017"
FEFF_BACKUP_FILES = ["ATOMS", "HEADER", "PARAMETERS", "POTENTIALS", "feff.inp", "*.cif", "pot.bin"]
logger = logging.getLogger(__name__)
class UnconvergedErrorHandler(ErrorHandler):
"""
Correct the unconverged error of FEFF's SCF calculation.
"""
is_monitor = False
def __init__(self, output_filename='log1.dat'):
"""
Initializes the handler with the output file to check
Args:
output_filename (str): Filename for the log1.dat file. log1.dat file
contains the SCF calculation convergence information. Change this only
if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
"""
If the FEFF run does not converge, the check will return
"TRUE"
"""
return self._notconverge_check()
def _notconverge_check(self):
# Process the output file and get converge information
not_converge_pattern = re.compile("Convergence not reached.*")
converge_pattern = re.compile('Convergence reached.*')
for _, line in enumerate(open(self.output_filename)):
if len(not_converge_pattern.findall(line)) > 0:
return True
elif len(converge_pattern.findall(line)) > 0:
return False
def correct(self):
backup(FEFF_BACKUP_FILES)
feff_input = FEFFDictSet.from_directory(".")
scf_values = feff_input.tags.get("SCF")
nscmt = scf_values[2]
ca = scf_values[3]
nmix = scf_values[4]
actions = []
#Add RESTART card to PARAMETERS
if not "RESTART" in feff_input.tags:
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"RESTART": []}}})
if nscmt < 100 and ca == 0.2:
scf_values[2] = 100
scf_values[4] = 3 # Set nmix = 3
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nscmt == 100 and nmix == 3 and ca > 0.01:
# Reduce the convergence accelerator factor
scf_values[3] = round(ca / 2, 2)
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nmix == 3 and ca == 0.01:
# Set ca = 0.05 and set nmix
scf_values[3] = 0.05
scf_values[4] = 5
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nmix == 5 and ca == 0.05:
# Set ca = 0.05 and set nmix
scf_values[3] = 0.05
scf_values[4] = 10
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nmix == 10 and ca < 0.2:
# loop through ca with nmix = 10
scf_values[3] = round(ca * 2, 2)
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
# Unfixable error. Just return None for actions.
else:
return {"errors": ["Non-converging job"], "actions": None}
|
mit
| -3,734,159,296,317,997,000
| 35.65
| 99
| 0.555707
| false
| 3.775107
| false
| false
| false
|
ActiveState/code
|
recipes/Python/271607_fiber_scheduler/recipe-271607.py
|
1
|
5269
|
import sys, select, time, socket, traceback
class SEND:
def __init__( self, sock, timeout ):
self.fileno = sock.fileno()
self.expire = time.time() + timeout
def __str__( self ):
return 'SEND(%i,%s)' % ( self.fileno, time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class RECV:
def __init__( self, sock, timeout ):
self.fileno = sock.fileno()
self.expire = time.time() + timeout
def __str__( self ):
return 'RECV(%i,%s)' % ( self.fileno, time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class WAIT:
def __init__( self, timeout = None ):
self.expire = timeout and time.time() + timeout or None
def __str__( self ):
return 'WAIT(%s)' % ( self.expire and time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class Fiber:
def __init__( self, generator ):
self.__generator = generator
self.state = WAIT()
def step( self, throw=None ):
self.state = None
try:
if throw:
assert hasattr( self.__generator, 'throw' ), throw
self.__generator.throw( AssertionError, throw )
state = self.__generator.next()
assert isinstance( state, (SEND, RECV, WAIT) ), 'invalid waiting state %r' % state
self.state = state
except KeyboardInterrupt:
raise
except StopIteration:
del self.__generator
pass
except AssertionError, msg:
print 'Error:', msg
except:
traceback.print_exc()
def __repr__( self ):
return '%i: %s' % ( self.__generator.gi_frame.f_lineno, self.state )
class GatherFiber( Fiber ):
def __init__( self, generator ):
Fiber.__init__( self, generator )
self.__chunks = [ '[ 0.00 ] %s\n' % time.ctime() ]
self.__start = time.time()
self.__newline = True
def step( self, throw=None ):
stdout = sys.stdout
stderr = sys.stderr
try:
sys.stdout = sys.stderr = self
Fiber.step( self, throw )
finally:
sys.stdout = stdout
sys.stderr = stderr
def write( self, string ):
if self.__newline:
self.__chunks.append( '%6.2f ' % ( time.time() - self.__start ) )
self.__chunks.append( string )
self.__newline = string.endswith( '\n' )
def __del__( self ):
sys.stdout.writelines( self.__chunks )
if not self.__newline:
sys.stdout.write( '\n' )
class DebugFiber( Fiber ):
id = 0
def __init__( self, generator ):
Fiber.__init__( self, generator )
self.__id = DebugFiber.id
sys.stdout.write( '[ %04X ] %s\n' % ( self.__id, time.ctime() ) )
self.__newline = True
self.__stdout = sys.stdout
DebugFiber.id = ( self.id + 1 ) % 65535
def step( self, throw=None ):
stdout = sys.stdout
stderr = sys.stderr
try:
sys.stdout = sys.stderr = self
Fiber.step( self, throw )
if self.state:
print 'Waiting at', self
finally:
sys.stdout = stdout
sys.stderr = stderr
def write( self, string ):
if self.__newline:
self.__stdout.write( ' %04X ' % self.__id )
self.__stdout.write( string )
self.__newline = string.endswith( '\n' )
def spawn( generator, port, debug ):
try:
listener = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
listener.setblocking( 0 )
listener.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, listener.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR ) | 1 )
listener.bind( ( '', port ) )
listener.listen( 5 )
except Exception, e:
print 'error: failed to create socket:', e
return False
if debug:
myFiber = DebugFiber
else:
myFiber = GatherFiber
print ' .... Server started'
try:
fibers = []
while True:
tryrecv = { listener.fileno(): None }
trysend = {}
expire = None
now = time.time()
i = len( fibers )
while i:
i -= 1
state = fibers[ i ].state
if state and now > state.expire:
if isinstance( state, WAIT ):
fibers[ i ].step()
else:
fibers[ i ].step( throw='connection timed out' )
state = fibers[ i ].state
if not state:
del fibers[ i ]
continue
if isinstance( state, RECV ):
tryrecv[ state.fileno ] = fibers[ i ]
elif isinstance( state, SEND ):
trysend[ state.fileno ] = fibers[ i ]
elif state.expire is None:
continue
if state.expire < expire or expire is None:
expire = state.expire
if expire is None:
print '[ IDLE ]', time.ctime()
sys.stdout.flush()
canrecv, cansend, dummy = select.select( tryrecv, trysend, [] )
print '[ BUSY ]', time.ctime()
sys.stdout.flush()
else:
canrecv, cansend, dummy = select.select( tryrecv, trysend, [], max( expire - now, 0 ) )
for fileno in canrecv:
if fileno is listener.fileno():
fibers.append( myFiber( generator( *listener.accept() ) ) )
else:
tryrecv[ fileno ].step()
for fileno in cansend:
trysend[ fileno ].step()
except KeyboardInterrupt:
print ' .... Server terminated'
return True
except:
print ' .... Server crashed'
traceback.print_exc( file=sys.stdout )
return False
|
mit
| -389,809,782,214,965,100
| 23.281106
| 132
| 0.571076
| false
| 3.621306
| false
| false
| false
|
DeanThompson/pyelong
|
pyelong/request.py
|
1
|
6017
|
# -*- coding: utf-8 -*-
import hashlib
import json
import time
import urllib
import requests
from requests import RequestException, ConnectionError, Timeout
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from .api import ApiSpec
from .exceptions import ElongException, ElongAPIError, \
RetryableException, RetryableAPIError
from .response import RequestsResponse, TornadoResponse, logger
from .util.retry import retry_on_error, is_retryable
class Request(object):
def __init__(self, client,
host=ApiSpec.host,
version=ApiSpec.version,
local=ApiSpec.local):
self.client = client
self.verify_ssl = self.client.cert is not None
self.host = host
self.version = version
self.local = local
def do(self, api, params, https, raw=False):
raise NotImplementedError()
def prepare(self, api, params, https, raw):
timestamp = str(int(time.time()))
data = self.build_data(params, raw)
scheme = 'https' if https else 'http'
url = "%s://%s" % (scheme, self.host)
params = {
'method': api,
'user': self.client.user,
'timestamp': timestamp,
'data': data,
'signature': self.signature(data, timestamp),
'format': 'json'
}
return url, params
def build_data(self, params, raw=False):
if not raw:
data = {
'Version': self.version,
'Local': self.local,
'Request': params
}
else:
data = params
return json.dumps(data, separators=(',', ':'))
def signature(self, data, timestamp):
s = self._md5(data + self.client.app_key)
return self._md5("%s%s%s" % (timestamp, s, self.client.secret_key))
@staticmethod
def _md5(data):
return hashlib.md5(data.encode('utf-8')).hexdigest()
def check_response(self, resp):
if not resp.ok and self.client.raise_api_error:
# logger.error('pyelong calling api failed, url: %s', resp.url)
if is_retryable(resp.code):
raise RetryableAPIError(resp.code, resp.error)
raise ElongAPIError(resp.code, resp.error)
return resp
def timing(self, api, delta):
if self.client.statsd_client and \
hasattr(self.client.statsd_client, 'timing'):
self.client.statsd_client.timing(api, delta)
class SyncRequest(Request):
@property
def session(self):
if not hasattr(self, '_session') or not self._session:
self._session = requests.Session()
if self.client.proxy_host and self.client.proxy_port:
p = '%s:%s' % (self.client.proxy_host, self.client.proxy_port)
self._session.proxies = {'http': p, 'https': p}
return self._session
@retry_on_error(retry_api_error=True)
def do(self, api, params, https, raw=False):
url, params = self.prepare(api, params, https, raw)
try:
result = self.session.get(url=url,
params=params,
verify=self.verify_ssl,
cert=self.client.cert)
except (ConnectionError, Timeout) as e:
logger.exception('pyelong catches ConnectionError or Timeout, '
'url: %s, params: %s', url, params)
raise RetryableException('ConnectionError or Timeout: %s' % e)
except RequestException as e:
logger.exception('pyelong catches RequestException, url: %s,'
' params: %s', url, params)
raise ElongException('RequestException: %s' % e)
except Exception as e:
logger.exception('pyelong catches unknown exception, url: %s, '
'params: %s', url, params)
raise ElongException('unknown exception: %s' % e)
resp = RequestsResponse(result)
self.timing(api, resp.request_time)
return self.check_response(resp)
class AsyncRequest(Request):
@property
def proxy_config(self):
if not getattr(self, '_proxy_config', None):
if self.client.proxy_host and self.client.proxy_port:
self._proxy_config = {
'proxy_host': self.client.proxy_host,
'proxy_port': self.client.proxy_port
}
else:
self._proxy_config = {}
return self._proxy_config
@staticmethod
def _encode_params(data):
"""
:param dict data: params
Taken from requests.models.RequestEncodingMixin._encode_params
"""
result = []
for k, vs in data.iteritems():
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urllib.urlencode(result, doseq=True)
def _prepare_url(self, url, params):
if url.endswith('/'):
url = url.strip('/')
return '%s?%s' % (url, self._encode_params(params))
@gen.coroutine
def do(self, api, params, https, raw=False):
url, params = self.prepare(api, params, https, raw)
# use the default SimpleAsyncHTTPClient
resp = yield AsyncHTTPClient().fetch(self._prepare_url(url, params),
validate_cert=self.verify_ssl,
ca_certs=self.client.cert,
**self.proxy_config)
resp = TornadoResponse(resp)
self.timing(api, resp.request_time)
raise gen.Return(self.check_response(resp))
|
mit
| -2,665,918,147,271,490,000
| 35.466667
| 78
| 0.553266
| false
| 4.172677
| true
| false
| false
|
lordmos/blink
|
Source/bindings/scripts/unstable/idl_compiler.py
|
1
|
5668
|
#!/usr/bin/python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compile an .idl file to Blink V8 bindings (.h and .cpp files).
FIXME: Not currently used in build.
This is a rewrite of the Perl IDL compiler in Python, but is not complete.
Once it is complete, we will switch all IDL files over to Python at once.
Until then, please work on the Perl IDL compiler.
For details, see bug http://crbug.com/239771
"""
import optparse
import os
import pickle
import posixpath
import shlex
import sys
import code_generator_v8
import idl_reader
module_path, _ = os.path.split(__file__)
source_path = os.path.normpath(os.path.join(module_path, os.pardir, os.pardir, os.pardir))
def parse_options():
parser = optparse.OptionParser()
parser.add_option('--additional-idl-files')
# FIXME: The --dump-json-and-pickle option is only for debugging and will
# be removed once we complete migrating all IDL files from the Perl flow to
# the Python flow.
parser.add_option('--dump-json-and-pickle', action='store_true', default=False)
parser.add_option('--idl-attributes-file')
parser.add_option('--include', dest='idl_directories', action='append')
parser.add_option('--output-directory')
parser.add_option('--interface-dependencies-file')
parser.add_option('--verbose', action='store_true', default=False)
parser.add_option('--write-file-only-if-changed', type='int')
# ensure output comes last, so command line easy to parse via regexes
parser.disable_interspersed_args()
options, args = parser.parse_args()
if options.output_directory is None:
parser.error('Must specify output directory using --output-directory.')
if options.additional_idl_files is None:
options.additional_idl_files = []
else:
# additional_idl_files is passed as a string with varied (shell-style)
# quoting, hence needs parsing.
options.additional_idl_files = shlex.split(options.additional_idl_files)
if len(args) != 1:
parser.error('Must specify exactly 1 input file as argument, but %d given.' % len(args))
options.idl_filename = os.path.realpath(args[0])
return options
def get_relative_dir_posix(filename):
"""Returns directory of a local file relative to Source, in POSIX format."""
relative_path_local = os.path.relpath(filename, source_path)
relative_dir_local = os.path.dirname(relative_path_local)
return relative_dir_local.replace(os.path.sep, posixpath.sep)
def write_json_and_pickle(definitions, interface_name, output_directory):
json_string = definitions.to_json()
json_basename = interface_name + '.json'
json_filename = os.path.join(output_directory, json_basename)
with open(json_filename, 'w') as json_file:
json_file.write(json_string)
pickle_basename = interface_name + '.pkl'
pickle_filename = os.path.join(output_directory, pickle_basename)
with open(pickle_filename, 'wb') as pickle_file:
pickle.dump(definitions, pickle_file)
def main():
options = parse_options()
idl_filename = options.idl_filename
basename = os.path.basename(idl_filename)
interface_name, _ = os.path.splitext(basename)
output_directory = options.output_directory
verbose = options.verbose
if verbose:
print idl_filename
relative_dir_posix = get_relative_dir_posix(idl_filename)
reader = idl_reader.IdlReader(options.interface_dependencies_file, options.additional_idl_files, options.idl_attributes_file, output_directory, verbose)
definitions = reader.read_idl_definitions(idl_filename)
code_generator = code_generator_v8.CodeGeneratorV8(definitions, interface_name, options.output_directory, relative_dir_posix, options.idl_directories, verbose)
if not definitions:
# We generate dummy .h and .cpp files just to tell build scripts
# that outputs have been created.
code_generator.write_dummy_header_and_cpp()
return
if options.dump_json_and_pickle:
write_json_and_pickle(definitions, interface_name, output_directory)
return
code_generator.write_header_and_cpp()
if __name__ == '__main__':
sys.exit(main())
|
mit
| 4,028,016,241,521,882,600
| 42.937984
| 163
| 0.728652
| false
| 3.971969
| false
| false
| false
|
zhlinh/leetcode
|
0173.Binary Search Tree Iterator/test.py
|
1
|
1230
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import TreeNode
from solution import BSTIterator
def constructOne(s):
s = s.strip()
if s == '#':
return None
else:
return TreeNode(int(s))
def createTree(tree):
q = []
tree = tree.split(",")
root = constructOne(tree[0]);
q.append(root);
idx = 1;
while q:
tn = q.pop(0)
if not tn:
continue
if idx == len(tree):
break
left = constructOne(tree[idx])
tn.left = left
q.append(left)
idx += 1
if idx == len(tree):
break
right = constructOne(tree[idx])
idx += 1
tn.right = right
q.append(right)
return root
def printNode(tn, indent):
sb = ""
for i in range(indent):
sb += "\t"
sb += str(tn.val)
print(sb)
def printTree(root, indent):
if not root:
return
printTree(root.right, indent + 1)
printNode(root, indent)
printTree(root.left, indent + 1)
# root = createTree("1, 2, 5, 3, 4, #, 6")
root = createTree("4, 3, 5, 2, #, #, 7")
i, v = BSTIterator(root), []
while i.hasNext():
v.append(i.next())
for node in v:
print(node.val)
|
apache-2.0
| -6,259,950,088,596,226,000
| 20.206897
| 43
| 0.523577
| false
| 3.245383
| false
| false
| false
|
spl0k/supysonic
|
tests/base/test_cache.py
|
1
|
8007
|
# This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2018 Alban 'spl0k' Féron
# 2018-2019 Carey 'pR0Ps' Metcalfe
#
# Distributed under terms of the GNU AGPLv3 license.
import os
import unittest
import shutil
import time
import tempfile
from supysonic.cache import Cache, CacheMiss, ProtectedError
class CacheTestCase(unittest.TestCase):
def setUp(self):
self.__dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.__dir)
def test_existing_files_order(self):
cache = Cache(self.__dir, 30)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
cache.set("key3", val)
self.assertEqual(cache.size, 30)
# file mtime is accurate to the second
time.sleep(1)
cache.get_value("key1")
cache = Cache(self.__dir, 30, min_time=0)
self.assertEqual(cache.size, 30)
self.assertTrue(cache.has("key1"))
self.assertTrue(cache.has("key2"))
self.assertTrue(cache.has("key3"))
cache.set("key4", val)
self.assertEqual(cache.size, 30)
self.assertTrue(cache.has("key1"))
self.assertFalse(cache.has("key2"))
self.assertTrue(cache.has("key3"))
self.assertTrue(cache.has("key4"))
def test_missing(self):
cache = Cache(self.__dir, 10)
self.assertFalse(cache.has("missing"))
with self.assertRaises(CacheMiss):
cache.get_value("missing")
def test_delete_missing(self):
cache = Cache(self.__dir, 0, min_time=0)
cache.delete("missing1")
cache.delete("missing2")
def test_store_literal(self):
cache = Cache(self.__dir, 10)
val = b"0123456789"
cache.set("key", val)
self.assertEqual(cache.size, 10)
self.assertTrue(cache.has("key"))
self.assertEqual(cache.get_value("key"), val)
def test_store_generated(self):
cache = Cache(self.__dir, 10)
val = [b"0", b"12", b"345", b"6789"]
def gen():
yield from val
t = []
for x in cache.set_generated("key", gen):
t.append(x)
self.assertEqual(cache.size, 0)
self.assertFalse(cache.has("key"))
self.assertEqual(t, val)
self.assertEqual(cache.size, 10)
self.assertEqual(cache.get_value("key"), b"".join(val))
def test_store_to_fp(self):
cache = Cache(self.__dir, 10)
val = b"0123456789"
with cache.set_fileobj("key") as fp:
fp.write(val)
self.assertEqual(cache.size, 0)
self.assertEqual(cache.size, 10)
self.assertEqual(cache.get_value("key"), val)
def test_access_data(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key", val)
self.assertEqual(cache.get_value("key"), val)
with cache.get_fileobj("key") as f:
self.assertEqual(f.read(), val)
with open(cache.get("key"), "rb") as f:
self.assertEqual(f.read(), val)
def test_accessing_preserves(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
self.assertEqual(cache.size, 20)
cache.get_value("key1")
cache.set("key3", val)
self.assertEqual(cache.size, 20)
self.assertTrue(cache.has("key1"))
self.assertFalse(cache.has("key2"))
self.assertTrue(cache.has("key3"))
def test_automatic_delete_oldest(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key1", val)
self.assertTrue(cache.has("key1"))
self.assertEqual(cache.size, 10)
cache.set("key2", val)
self.assertEqual(cache.size, 20)
self.assertTrue(cache.has("key1"))
self.assertTrue(cache.has("key2"))
cache.set("key3", val)
self.assertEqual(cache.size, 20)
self.assertFalse(cache.has("key1"))
self.assertTrue(cache.has("key2"))
self.assertTrue(cache.has("key3"))
def test_delete(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key1", val)
self.assertTrue(cache.has("key1"))
self.assertEqual(cache.size, 10)
cache.delete("key1")
self.assertFalse(cache.has("key1"))
self.assertEqual(cache.size, 0)
def test_cleanup_on_error(self):
cache = Cache(self.__dir, 10)
def gen():
# Cause a TypeError halfway through
yield from [b"0", b"12", object(), b"345", b"6789"]
with self.assertRaises(TypeError):
for x in cache.set_generated("key", gen):
pass
# Make sure no partial files are left after the error
self.assertEqual(list(os.listdir(self.__dir)), list())
def test_parallel_generation(self):
cache = Cache(self.__dir, 20)
def gen():
yield from [b"0", b"12", b"345", b"6789"]
g1 = cache.set_generated("key", gen)
g2 = cache.set_generated("key", gen)
next(g1)
files = os.listdir(self.__dir)
self.assertEqual(len(files), 1)
for x in files:
self.assertTrue(x.endswith(".part"))
next(g2)
files = os.listdir(self.__dir)
self.assertEqual(len(files), 2)
for x in files:
self.assertTrue(x.endswith(".part"))
self.assertEqual(cache.size, 0)
for x in g1:
pass
self.assertEqual(cache.size, 10)
self.assertTrue(cache.has("key"))
# Replace the file - size should stay the same
for x in g2:
pass
self.assertEqual(cache.size, 10)
self.assertTrue(cache.has("key"))
# Only a single file
self.assertEqual(len(os.listdir(self.__dir)), 1)
def test_replace(self):
cache = Cache(self.__dir, 20)
val_small = b"0"
val_big = b"0123456789"
cache.set("key", val_small)
self.assertEqual(cache.size, 1)
cache.set("key", val_big)
self.assertEqual(cache.size, 10)
cache.set("key", val_small)
self.assertEqual(cache.size, 1)
def test_no_auto_prune(self):
cache = Cache(self.__dir, 10, min_time=0, auto_prune=False)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
cache.set("key3", val)
cache.set("key4", val)
self.assertEqual(cache.size, 40)
cache.prune()
self.assertEqual(cache.size, 10)
def test_min_time_clear(self):
cache = Cache(self.__dir, 40, min_time=1)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
time.sleep(1)
cache.set("key3", val)
cache.set("key4", val)
self.assertEqual(cache.size, 40)
cache.clear()
self.assertEqual(cache.size, 20)
time.sleep(1)
cache.clear()
self.assertEqual(cache.size, 0)
def test_not_expired(self):
cache = Cache(self.__dir, 40, min_time=1)
val = b"0123456789"
cache.set("key1", val)
with self.assertRaises(ProtectedError):
cache.delete("key1")
time.sleep(1)
cache.delete("key1")
self.assertEqual(cache.size, 0)
def test_missing_cache_file(self):
cache = Cache(self.__dir, 10, min_time=0)
val = b"0123456789"
os.remove(cache.set("key", val))
self.assertEqual(cache.size, 10)
self.assertFalse(cache.has("key"))
self.assertEqual(cache.size, 0)
os.remove(cache.set("key", val))
self.assertEqual(cache.size, 10)
with self.assertRaises(CacheMiss):
cache.get("key")
self.assertEqual(cache.size, 0)
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
| 539,294,049,317,192,400
| 28.112727
| 67
| 0.569323
| false
| 3.558222
| true
| false
| false
|
cpn18/track-chart
|
desktop/gps_smoothing.py
|
1
|
1313
|
import sys
import json
import math
THRESHOLD = 10
data = []
with open(sys.argv[1], "r") as f:
used = count = 0
for line in f:
if line[0] == "#":
continue
items = line.split()
if items[1] == "TPV":
obj = json.loads(" ".join(items[2:-1]))
obj['used'] = used
obj['count'] = count
elif items[1] == "SKY":
obj = json.loads(" ".join(items[2:-1]))
used = 0
count = len(obj['satellites'])
for i in range(0, count):
if obj['satellites'][i]['used']:
used += 1
continue
else:
continue
if used >= THRESHOLD and 'lon' in obj and 'lat' in obj:
data.append(obj)
print("Longitude Latitude dx epx dy epy used count")
for i in range(1, len(data)):
dx = abs((data[i]['lon'] - data[i-1]['lon']) * 111120 * math.cos(math.radians(data[i]['lat'])))
dy = abs((data[i]['lat'] - data[i-1]['lat']) * 111128) # degrees to meters
try:
if dx > 3*data[i]['epx'] or dy > 3*data[i]['epy']:
continue
print("%f %f %f %f %f %f %d %d" % (data[i]['lon'], data[i]['lat'], dx, data[i]['epx'], dy, data[i]['epy'], data[i]['used'], data[i]['count']))
except KeyError:
pass
|
gpl-3.0
| -8,348,637,999,380,964,000
| 29.534884
| 150
| 0.476009
| false
| 3.241975
| false
| false
| false
|
kgori/treeCl
|
treeCl/parutils.py
|
1
|
9550
|
from abc import ABCMeta, abstractmethod
from .constants import PARALLEL_PROFILE
from .utils import setup_progressbar, grouper, flatten_list
import logging
import multiprocessing
import sys
logger = logging.getLogger(__name__)
__author__ = 'kgori'
"""
Introduced this workaround for a bug in multiprocessing where
errors are thrown for an EINTR interrupt.
Workaround taken from http://stackoverflow.com/a/5395277 - but
changed because can't subclass from multiprocessing.Queue (it's
a factory method)
"""
import errno
def retry_on_eintr(function, *args, **kw):
while True:
try:
return function(*args, **kw)
except IOError as e:
if e.errno == errno.EINTR:
continue
else:
raise
def get_from_queue(queue, block=True, timeout=None):
return retry_on_eintr(queue.get, block, timeout)
"""
End of workaround
"""
def fun(f, q_in, q_out):
while True:
(i, x) = get_from_queue(q_in)
if i is None:
break
q_out.put((i, f(*x)))
def async_avail():
from IPython import parallel
try:
client = parallel.Client(PARALLEL_PROFILE)
return len(client) > 0
except IOError:
return False
except Exception:
return False
def get_client():
from IPython import parallel
try:
client = parallel.Client(profile=PARALLEL_PROFILE)
return client if len(client) > 0 else None
except IOError:
return None
except Exception:
return None
def tupleise(args):
for a in args:
if isinstance(a, (tuple, list)):
yield a
else:
yield (a,)
def get_njobs(nargs, args):
if nargs is not None:
njobs = nargs
elif isinstance(args, (tuple, list)):
njobs = len(args)
else:
njobs = int(sys.maxsize / 1000000) # sys.maxsize is too large for progressbar to display ETA (datetime issue)
return njobs
def parallel_map(client, task, args, message, batchsize=1, background=False, nargs=None):
"""
Helper to map a function over a sequence of inputs, in parallel, with progress meter.
:param client: IPython.parallel.Client instance
:param task: Function
:param args: Must be a list of tuples of arguments that the task function will be mapped onto.
If the function takes a single argument, it still must be a 1-tuple.
:param message: String for progress bar
:param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic,
but longer execution time per job.
:return: IPython.parallel.AsyncMapResult
"""
show_progress = bool(message)
njobs = get_njobs(nargs, args)
nproc = len(client)
logger.debug('parallel_map: len(client) = {}'.format(len(client)))
view = client.load_balanced_view()
if show_progress:
message += ' (IP:{}w:{}b)'.format(nproc, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
if not background:
pbar.start()
map_result = view.map(task, *list(zip(*args)), chunksize=batchsize)
if background:
return map_result, client
while not map_result.ready():
map_result.wait(1)
if show_progress:
pbar.update(min(njobs, map_result.progress * batchsize))
if show_progress:
pbar.finish()
return map_result
def sequential_map(task, args, message, nargs=None):
"""
Helper to map a function over a sequence of inputs, sequentially, with progress meter.
:param client: IPython.parallel.Client instance
:param task: Function
:param args: Must be a list of tuples of arguments that the task function will be mapped onto.
If the function takes a single argument, it still must be a 1-tuple.
:param message: String for progress bar
:param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic,
but longer execution time per job.
:return: IPython.parallel.AsyncMapResult
"""
njobs = get_njobs(nargs, args)
show_progress = bool(message)
if show_progress:
pbar = setup_progressbar(message, njobs, simple_progress=True)
pbar.start()
map_result = []
for (i, arglist) in enumerate(tupleise(args), start=1):
map_result.append(task(*arglist))
if show_progress:
pbar.update(i)
if show_progress:
pbar.finish()
return map_result
def threadpool_map(task, args, message, concurrency, batchsize=1, nargs=None):
"""
Helper to map a function over a range of inputs, using a threadpool, with a progress meter
"""
import concurrent.futures
njobs = get_njobs(nargs, args)
show_progress = bool(message)
batches = grouper(batchsize, tupleise(args))
batched_task = lambda batch: [task(*job) for job in batch]
if show_progress:
message += ' (TP:{}w:{}b)'.format(concurrency, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
pbar.start()
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
futures = []
completed_count = 0
for batch in batches:
futures.append(executor.submit(batched_task, batch))
if show_progress:
for i, fut in enumerate(concurrent.futures.as_completed(futures), start=1):
completed_count += len(fut.result())
pbar.update(completed_count)
else:
concurrent.futures.wait(futures)
if show_progress:
pbar.finish()
return flatten_list([fut.result() for fut in futures])
def processpool_map(task, args, message, concurrency, batchsize=1, nargs=None):
"""
See http://stackoverflow.com/a/16071616
"""
njobs = get_njobs(nargs, args)
show_progress = bool(message)
batches = grouper(batchsize, tupleise(args))
def batched_task(*batch):
return [task(*job) for job in batch]
if show_progress:
message += ' (PP:{}w:{}b)'.format(concurrency, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
pbar.start()
q_in = multiprocessing.Queue() # Should I limit either queue size? Limiting in-queue
q_out = multiprocessing.Queue() # increases time taken to send jobs, makes pbar less useful
proc = [multiprocessing.Process(target=fun, args=(batched_task, q_in, q_out)) for _ in range(concurrency)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for (i, x) in enumerate(batches)]
[q_in.put((None, None)) for _ in range(concurrency)]
res = []
completed_count = 0
for _ in range(len(sent)):
result = get_from_queue(q_out)
res.append(result)
completed_count += len(result[1])
if show_progress:
pbar.update(completed_count)
[p.join() for p in proc]
if show_progress:
pbar.finish()
return flatten_list([x for (i, x) in sorted(res)])
class JobHandler(object):
"""
Base class to provide uniform interface for all job handlers
"""
metaclass = ABCMeta
@abstractmethod
def __call__(self, task, args, message, batchsize):
""" If you define a message, then progress will be written to stderr """
pass
class SequentialJobHandler(JobHandler):
"""
Jobs are handled using a simple map
"""
def __call__(self, task, args, message, batchsize, nargs=None):
if batchsize > 1:
logger.warn("Setting batchsize > 1 has no effect when using a SequentialJobHandler")
return sequential_map(task, args, message, nargs)
class ThreadpoolJobHandler(JobHandler):
"""
Jobs are handled by a threadpool using concurrent.futures
"""
def __init__(self, concurrency):
self.concurrency = concurrency
def __call__(self, task, args, message, batchsize, nargs=None):
return threadpool_map(task, args, message, self.concurrency, batchsize, nargs)
class ProcesspoolJobHandler(JobHandler):
"""
Jobs are handled by a threadpool using concurrent.futures
"""
def __init__(self, concurrency):
self.concurrency = concurrency
def __call__(self, task, args, message, batchsize, nargs=None):
return processpool_map(task, args, message, self.concurrency, batchsize, nargs)
class IPythonJobHandler(JobHandler):
"""
Jobs are handled using an IPython.parallel.Client
"""
def __init__(self, profile=None):
"""
Initialise the IPythonJobHandler using the given ipython profile.
Parameters
----------
profile: string
The ipython profile to connect to - this should already be running an ipcluster
If the connection fails it raises a RuntimeError
"""
import IPython.parallel
try:
self.client=IPython.parallel.Client(profile=profile)
logger.debug('__init__: len(client) = {}'.format(len(self.client)))
except (IOError, IPython.parallel.TimeoutError):
msg = 'Could not obtain an IPython parallel Client using profile "{}"'.format(profile)
logger.error(msg)
raise RuntimeError(msg)
def __call__(self, task, args, message, batchsize):
logger.debug('__call__: len(client) = {}'.format(len(self.client)))
return list(parallel_map(self.client, task, args, message, batchsize))
|
mit
| 559,474,478,483,958,140
| 32.745583
| 118
| 0.638325
| false
| 3.926809
| false
| false
| false
|
anbangr/trusted-juju
|
juju/unit/tests/test_charm.py
|
1
|
5922
|
from functools import partial
import os
import shutil
from twisted.internet.defer import inlineCallbacks, returnValue, succeed, fail
from twisted.web.error import Error
from twisted.web.client import downloadPage
from juju.charm import get_charm_from_path
from juju.charm.bundle import CharmBundle
from juju.charm.publisher import CharmPublisher
from juju.charm.tests import local_charm_id
from juju.charm.tests.test_directory import sample_directory
from juju.errors import FileNotFound
from juju.lib import under
from juju.state.errors import CharmStateNotFound
from juju.state.tests.common import StateTestBase
from juju.unit.charm import download_charm
from juju.lib.mocker import MATCH
class CharmPublisherTestBase(StateTestBase):
@inlineCallbacks
def setUp(self):
yield super(CharmPublisherTestBase, self).setUp()
yield self.push_default_config()
self.provider = self.config.get_default().get_machine_provider()
self.storage = self.provider.get_file_storage()
@inlineCallbacks
def publish_charm(self, charm_path=sample_directory):
charm = get_charm_from_path(charm_path)
publisher = CharmPublisher(self.client, self.storage)
yield publisher.add_charm(local_charm_id(charm), charm)
charm_states = yield publisher.publish()
returnValue((charm, charm_states[0]))
class DownloadTestCase(CharmPublisherTestBase):
@inlineCallbacks
def test_charm_download_file(self):
"""Downloading a charm should store the charm locally.
"""
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
# Download the charm
yield download_charm(
self.client, charm_state.id, charm_directory)
# Verify the downloaded copy
checksum = charm.get_sha256()
charm_id = local_charm_id(charm)
charm_key = under.quote("%s:%s" % (charm_id, checksum))
charm_path = os.path.join(charm_directory, charm_key)
self.assertTrue(os.path.exists(charm_path))
bundle = CharmBundle(charm_path)
self.assertEquals(bundle.get_revision(), charm.get_revision())
self.assertEqual(checksum, bundle.get_sha256())
@inlineCallbacks
def test_charm_missing_download_file(self):
"""Downloading a file that doesn't exist raises FileNotFound.
"""
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
# Delete the file
file_path = charm_state.bundle_url[len("file://"):]
os.remove(file_path)
# Download the charm
yield self.assertFailure(
download_charm(self.client, charm_state.id, charm_directory),
FileNotFound)
@inlineCallbacks
def test_charm_download_http(self):
"""Downloading a charm should store the charm locally.
"""
mock_storage = self.mocker.patch(self.storage)
def match_string(expected, value):
self.assertTrue(isinstance(value, basestring))
self.assertIn(expected, value)
return True
mock_storage.get_url(MATCH(
partial(match_string, "local_3a_series_2f_dummy-1")))
self.mocker.result("http://example.com/foobar.zip")
download_page = self.mocker.replace(downloadPage)
download_page(
MATCH(partial(match_string, "http://example.com/foobar.zip")),
MATCH(partial(match_string, "local_3a_series_2f_dummy-1")))
def bundle_in_place(url, local_path):
# must keep ref to charm else temp file goes out of scope.
charm = get_charm_from_path(sample_directory)
bundle = charm.as_bundle()
shutil.copyfile(bundle.path, local_path)
self.mocker.call(bundle_in_place)
self.mocker.result(succeed(True))
self.mocker.replay()
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
self.assertEqual(
charm_state.bundle_url, "http://example.com/foobar.zip")
# Download the charm
yield download_charm(
self.client, charm_state.id, charm_directory)
@inlineCallbacks
def test_charm_download_http_error(self):
"""Errors in donwloading a charm are reported as charm not found.
"""
def match_string(expected, value):
self.assertTrue(isinstance(value, basestring))
self.assertIn(expected, value)
return True
mock_storage = self.mocker.patch(self.storage)
mock_storage.get_url(
MATCH(partial(match_string, "local_3a_series_2f_dummy-1")))
remote_url = "http://example.com/foobar.zip"
self.mocker.result(remote_url)
download_page = self.mocker.replace(downloadPage)
download_page(
MATCH(partial(match_string, "http://example.com/foobar.zip")),
MATCH(partial(match_string, "local_3a_series_2f_dummy-1")))
self.mocker.result(fail(Error("400", "Bad Stuff", "")))
self.mocker.replay()
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
self.assertEqual(charm_state.bundle_url, remote_url)
error = yield self.assertFailure(
download_charm(self.client, charm_state.id, charm_directory),
FileNotFound)
self.assertIn(remote_url, str(error))
@inlineCallbacks
def test_charm_download_not_found(self):
"""An error is raised if trying to download a non existant charm.
"""
charm_directory = self.makeDir()
# Download the charm
error = yield self.assertFailure(
download_charm(
self.client, "local:mickey-21", charm_directory),
CharmStateNotFound)
self.assertEquals(str(error), "Charm 'local:mickey-21' was not found")
|
agpl-3.0
| -8,211,216,513,305,947,000
| 34.461078
| 78
| 0.651807
| false
| 3.733922
| true
| false
| false
|
bcantoni/ccm
|
ccmlib/dse_node.py
|
1
|
22234
|
# ccm node
from __future__ import absolute_import, with_statement
import os
import re
import shutil
import signal
import stat
import subprocess
import time
import yaml
from six import iteritems, print_
from ccmlib import common, extension, repository
from ccmlib.node import (Node, NodeError, ToolError,
handle_external_tool_process)
class DseNode(Node):
"""
Provides interactions to a DSE node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None, byteman_port='0', environment_variables=None):
super(DseNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface, byteman_port, environment_variables=environment_variables)
self.get_cassandra_version()
self._dse_config_options = {}
if self.cluster.hasOpscenter():
self._copy_agent()
def get_install_cassandra_root(self):
return os.path.join(self.get_install_dir(), 'resources', 'cassandra')
def get_node_cassandra_root(self):
return os.path.join(self.get_path(), 'resources', 'cassandra')
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
def get_tool(self, toolname):
return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname)
def get_tool_args(self, toolname):
return [common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', 'dse'), toolname]
def get_env(self):
(node_ip, _) = self.network_interfaces['binary']
return common.make_dse_env(self.get_install_dir(), self.get_path(), node_ip)
def get_cassandra_version(self):
return common.get_dse_cassandra_version(self.get_install_dir())
def node_setup(self, version, verbose):
dir, v = repository.setup_dse(version, self.cluster.dse_username, self.cluster.dse_password, verbose=verbose)
return dir
def set_workloads(self, workloads):
self.workloads = workloads
self._update_config()
if 'solr' in self.workloads:
self.__generate_server_xml()
if 'graph' in self.workloads:
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
graph_options = data['graph']
graph_options['gremlin_server']['host'] = node_ip
self.set_dse_configuration_options({'graph': graph_options})
self.__update_gremlin_config_yaml()
if 'dsefs' in self.workloads:
dsefs_options = {'dsefs_options': {'enabled': True,
'work_dir': os.path.join(self.get_path(), 'dsefs'),
'data_directories': [{'dir': os.path.join(self.get_path(), 'dsefs', 'data')}]}}
self.set_dse_configuration_options(dsefs_options)
if 'spark' in self.workloads:
self._update_spark_env()
def set_dse_configuration_options(self, values=None):
if values is not None:
self._dse_config_options = common.merge_configuration(self._dse_config_options, values)
self.import_dse_config_files()
def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'):
"""
Watch the log of this node until it detects that the provided other
nodes are marked UP. This method works similarly to watch_log_for_death.
We want to provide a higher default timeout when this is called on DSE.
"""
super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename)
def get_launch_bin(self):
cdir = self.get_install_dir()
launch_bin = common.join_bin(cdir, 'bin', 'dse')
# Copy back the dse scripts since profiling may have modified it the previous time
shutil.copy(launch_bin, self.get_bin_dir())
return common.join_bin(self.get_path(), 'bin', 'dse')
def add_custom_launch_arguments(self, args):
args.append('cassandra')
for workload in self.workloads:
if 'hadoop' in workload:
args.append('-t')
if 'solr' in workload:
args.append('-s')
if 'spark' in workload:
args.append('-k')
if 'cfs' in workload:
args.append('-c')
if 'graph' in workload:
args.append('-g')
def start(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=True,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False,
quiet_start=False,
allow_root=False,
set_migration_task=True):
process = super(DseNode, self).start(join_ring, no_wait, verbose, update_pid, wait_other_notice, replace_token,
replace_address, jvm_args, wait_for_binary_proto, profile_options, use_jna,
quiet_start, allow_root, set_migration_task)
if self.cluster.hasOpscenter():
self._start_agent()
def _start_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
self._write_agent_address_yaml(agent_dir)
self._write_agent_log4j_properties(agent_dir)
args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def stop(self, wait=True, wait_other_notice=False, signal_event=signal.SIGTERM, **kwargs):
if self.cluster.hasOpscenter():
self._stop_agent()
return super(DseNode, self).stop(wait=wait, wait_other_notice=wait_other_notice, signal_event=signal_event, **kwargs)
def _stop_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
pidfile = os.path.join(agent_dir, 'datastax-agent.pid')
if os.path.exists(pidfile):
with open(pidfile, 'r') as f:
pid = int(f.readline().strip())
f.close()
if pid is not None:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
os.remove(pidfile)
def nodetool(self, cmd, username=None, password=None, capture_output=True, wait=True):
if password is not None:
cmd = '-pw {} '.format(password) + cmd
if username is not None:
cmd = '-u {} '.format(username) + cmd
return super(DseNode, self).nodetool(cmd)
def dsetool(self, cmd):
env = self.get_env()
extension.append_to_client_env(self, env)
node_ip, binary_port = self.network_interfaces['binary']
dsetool = common.join_bin(self.get_install_dir(), 'bin', 'dsetool')
args = [dsetool, '-h', node_ip, '-j', str(self.jmx_port), '-c', str(binary_port)]
args += cmd.split()
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return handle_external_tool_process(p, args)
def dse(self, dse_options=None):
if dse_options is None:
dse_options = []
env = self.get_env()
extension.append_to_client_env(self, env)
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse]
args += dse_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def hadoop(self, hadoop_options=None):
if hadoop_options is None:
hadoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hadoop']
args += hadoop_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def hive(self, hive_options=None):
if hive_options is None:
hive_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hive']
args += hive_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def pig(self, pig_options=None):
if pig_options is None:
pig_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'pig']
args += pig_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def sqoop(self, sqoop_options=None):
if sqoop_options is None:
sqoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'sqoop']
args += sqoop_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def spark(self, spark_options=None):
if spark_options is None:
spark_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'spark']
args += spark_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def import_dse_config_files(self):
self._update_config()
if not os.path.isdir(os.path.join(self.get_path(), 'resources', 'dse', 'conf')):
os.makedirs(os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'dse', 'conf'), os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
self.__update_yaml()
def copy_config_files(self):
for product in ['dse', 'cassandra', 'hadoop', 'hadoop2-client', 'sqoop', 'hive', 'tomcat', 'spark', 'shark', 'mahout', 'pig', 'solr', 'graph']:
src_conf = os.path.join(self.get_install_dir(), 'resources', product, 'conf')
dst_conf = os.path.join(self.get_path(), 'resources', product, 'conf')
if not os.path.isdir(src_conf):
continue
if os.path.isdir(dst_conf):
common.rmdirs(dst_conf)
shutil.copytree(src_conf, dst_conf)
if product == 'solr':
src_web = os.path.join(self.get_install_dir(), 'resources', product, 'web')
dst_web = os.path.join(self.get_path(), 'resources', product, 'web')
if os.path.isdir(dst_web):
common.rmdirs(dst_web)
shutil.copytree(src_web, dst_web)
if product == 'tomcat':
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'lib')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'lib')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
src_webapps = os.path.join(self.get_install_dir(), 'resources', product, 'webapps')
dst_webapps = os.path.join(self.get_path(), 'resources', product, 'webapps')
if os.path.isdir(dst_webapps):
common.rmdirs(dst_webapps)
shutil.copytree(src_webapps, dst_webapps)
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'gremlin-console', 'conf')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'gremlin-console', 'conf')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
def import_bin_files(self):
common.copy_directory(os.path.join(self.get_install_dir(), 'bin'), self.get_bin_dir())
cassandra_bin_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'bin')
shutil.rmtree(cassandra_bin_dir, ignore_errors=True)
os.makedirs(cassandra_bin_dir)
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin'), cassandra_bin_dir)
if os.path.exists(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'tools')):
cassandra_tools_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'tools')
shutil.rmtree(cassandra_tools_dir, ignore_errors=True)
shutil.copytree(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'tools'), cassandra_tools_dir)
self.export_dse_home_in_dse_env_sh()
def export_dse_home_in_dse_env_sh(self):
'''
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
'''
with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh:
buf = dse_env_sh.readlines()
with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file:
for line in buf:
out_file.write(line)
if line == "# This is here so the installer can force set DSE_HOME\n":
out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n")
def _update_log4j(self):
super(DseNode, self)._update_log4j()
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
append_pattern = 'log4j.appender.V.File='
log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.A.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.B.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
def __update_yaml(self):
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['system_key_directory'] = os.path.join(self.get_path(), 'keys')
# Get a map of combined cluster and node configuration with the node
# configuration taking precedence.
full_options = common.merge_configuration(
self.cluster._dse_config_options,
self._dse_config_options, delete_empty=False)
# Merge options with original yaml data.
data = common.merge_configuration(data, full_options)
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def __generate_server_xml(self):
server_xml = os.path.join(self.get_path(), 'resources', 'tomcat', 'conf', 'server.xml')
if os.path.isfile(server_xml):
os.remove(server_xml)
with open(server_xml, 'w+') as f:
f.write('<Server port="8005" shutdown="SHUTDOWN">\n')
f.write(' <Service name="Solr">\n')
f.write(' <Connector port="8983" address="%s" protocol="HTTP/1.1" connectionTimeout="20000" maxThreads = "200" URIEncoding="UTF-8"/>\n' % self.network_interfaces['thrift'][0])
f.write(' <Engine name="Solr" defaultHost="localhost">\n')
f.write(' <Host name="localhost" appBase="../solr/web"\n')
f.write(' unpackWARs="true" autoDeploy="true"\n')
f.write(' xmlValidation="false" xmlNamespaceAware="false">\n')
f.write(' </Host>\n')
f.write(' </Engine>\n')
f.write(' </Service>\n')
f.write('</Server>\n')
f.close()
def __update_gremlin_config_yaml(self):
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'graph', 'gremlin-console', 'conf', 'remote.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['hosts'] = [node_ip]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _get_directories(self):
dirs = []
for i in ['data', 'commitlogs', 'saved_caches', 'logs', 'bin', 'keys', 'resources', os.path.join('data', 'hints')]:
dirs.append(os.path.join(self.get_path(), i))
return dirs
def _copy_agent(self):
agent_source = os.path.join(self.get_install_dir(), 'datastax-agent')
agent_target = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_source) and not os.path.exists(agent_target):
shutil.copytree(agent_source, agent_target)
def _write_agent_address_yaml(self, agent_dir):
address_yaml = os.path.join(agent_dir, 'conf', 'address.yaml')
if not os.path.exists(address_yaml):
with open(address_yaml, 'w+') as f:
(ip, port) = self.network_interfaces['thrift']
jmx = self.jmx_port
f.write('stomp_interface: 127.0.0.1\n')
f.write('local_interface: %s\n' % ip)
f.write('agent_rpc_interface: %s\n' % ip)
f.write('agent_rpc_broadcast_address: %s\n' % ip)
f.write('cassandra_conf: %s\n' % os.path.join(self.get_path(), 'resources', 'cassandra', 'conf', 'cassandra.yaml'))
f.write('cassandra_install: %s\n' % self.get_path())
f.write('cassandra_logs: %s\n' % os.path.join(self.get_path(), 'logs'))
f.write('thrift_port: %s\n' % port)
f.write('jmx_port: %s\n' % jmx)
f.close()
def _write_agent_log4j_properties(self, agent_dir):
log4j_properties = os.path.join(agent_dir, 'conf', 'log4j.properties')
with open(log4j_properties, 'w+') as f:
f.write('log4j.rootLogger=INFO,R\n')
f.write('log4j.logger.org.apache.http=OFF\n')
f.write('log4j.logger.org.eclipse.jetty.util.log=WARN,R\n')
f.write('log4j.appender.R=org.apache.log4j.RollingFileAppender\n')
f.write('log4j.appender.R.maxFileSize=20MB\n')
f.write('log4j.appender.R.maxBackupIndex=5\n')
f.write('log4j.appender.R.layout=org.apache.log4j.PatternLayout\n')
f.write('log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %m%n\n')
f.write('log4j.appender.R.File=./log/agent.log\n')
f.close()
def _update_spark_env(self):
try:
node_num = re.search(u'node(\d+)', self.name).group(1)
except AttributeError:
node_num = 0
conf_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-env.sh')
env = self.get_env()
content = []
with open(conf_file, 'r') as f:
for line in f.readlines():
for spark_var in env.keys():
if line.startswith('export %s=' % spark_var) or line.startswith('export %s=' % spark_var, 2):
line = 'export %s=%s\n' % (spark_var, env[spark_var])
break
content.append(line)
with open(conf_file, 'w') as f:
f.writelines(content)
# set unique spark.shuffle.service.port for each node; this is only needed for DSE 5.0.x;
# starting in 5.1 this setting is no longer needed
if self.cluster.version() > '5.0' and self.cluster.version() < '5.1':
defaults_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-defaults.conf')
with open(defaults_file, 'a') as f:
port_num = 7737 + int(node_num)
f.write("\nspark.shuffle.service.port %s\n" % port_num)
# create Spark working dirs; starting with DSE 5.0.10/5.1.3 these are no longer automatically created
for e in ["SPARK_WORKER_DIR", "SPARK_LOCAL_DIRS"]:
dir = env[e]
if not os.path.exists(dir):
os.makedirs(dir)
|
apache-2.0
| -8,702,722,111,606,709,000
| 46.105932
| 232
| 0.580777
| false
| 3.44073
| true
| false
| false
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/mesh_extra_tools/pkhg_faces.py
|
1
|
32230
|
bl_info = {
"name": "PKHG faces",
"author": " PKHG ",
"version": (0, 0, 5),
"blender": (2, 7, 1),
"location": "View3D > Tools > PKHG (tab)",
"description": "Faces selected will become added faces of different style",
"warning": "not yet finished",
"wiki_url": "",
"category": "Mesh",
}
import bpy
import bmesh
from mathutils import Vector, Matrix
from bpy.props import BoolProperty, StringProperty, IntProperty, FloatProperty, EnumProperty
class AddFaces(bpy.types.Operator):
"""Get parameters and build object with added faces"""
bl_idname = "mesh.add_faces_to_object"
bl_label = "new FACES: add"
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
reverse_faces = BoolProperty(name="reverse_faces", default=False,
description="revert the normal of selected faces")
name_source_object = StringProperty(
name="which MESH",
description="lets you chose a mesh",
default="Cube")
remove_start_faces = BoolProperty(name="remove_start_faces", default=True,
description="make a choice, remove or not")
base_height = FloatProperty(name="base_height faces", min=-20,
soft_max=10, max=20, default=0.2,
description="sets general base_height")
use_relative_base_height = BoolProperty(name="rel.base_height", default=False,
description=" reletive or absolute base_height")
relative_base_height = FloatProperty(name="relative_height", min=-5,
soft_max=5, max=20, default=0.2,
description="PKHG>TODO")
relative_width = FloatProperty(name="relative_width", min=-5,
soft_max=5, max=20, default=0.2,
description="PKHG>TODO")
second_height = FloatProperty(name="2. height", min=-5,
soft_max=5, max=20, default=0.2,
description="2. height for this and that")
width = FloatProperty(name="wds.faces", min=-20, max=20, default=0.5,
description="sets general width")
repeat_extrude = IntProperty(name="repeat", min=1,
soft_max=5, max=20,
description="for longer base")
move_inside = FloatProperty(name="move inside", min=0.0,
max=1.0, default=0.5,
description="how much move to inside")
thickness = FloatProperty(name="thickness", soft_min=0.01, min=0,
soft_max=5.0, max=20.0, default=0)
depth = FloatProperty(name="depth", min=-5,
soft_max=5.0, max=20.0, default=0)
collapse_edges = BoolProperty(name="make point", default=False,
description="collapse vertices of edges")
spike_base_width = FloatProperty(name="spike_base_width", default=0.4,
min=-4.0, soft_max=1, max=20,
description="base width of a spike")
base_height_inset = FloatProperty(name="base_height_inset", default=0.0,
min=-5, max=5,
description="to elevate/or neg the ...")
top_spike = FloatProperty(name="top_spike", default=1.0, min=-10.0, max=10.0,
description=" the base_height of a spike")
top_extra_height = FloatProperty(name="top_extra_height", default=0.0, min=-10.0, max=10.0,
description=" add extra height")
step_with_real_spike = BoolProperty(name="step_with_real_spike", default=False,
description=" in stepped a real spike")
use_relative = BoolProperty(name="use_relative", default=False,
description="change size using area, min of max")
face_types = EnumProperty(
description="different types of faces",
default="no",
items=[
('no', 'choose!', 'choose one of the other possibilies'),
('open inset', 'open inset', 'holes'),
('with base', 'with base', 'base and ...'),
('clsd vertical', 'clsd vertical', 'clsd vertical'),
('open vertical', 'open vertical', 'openvertical'),
('spiked', 'spiked', 'spike'),
('stepped', 'stepped', 'stepped'),
('boxed', 'boxed', 'boxed'),
('bar', 'bar', 'bar'),
])
strange_boxed_effect = BoolProperty(name="strange effect", default=False,
description="do not show one extrusion")
use_boundary = BoolProperty(name="use_boundary", default=True)
use_even_offset = BoolProperty(name="even_offset", default=True)
use_relative_offset = BoolProperty(name="relativ_offset", default=True)
use_edge_rail = BoolProperty(name="edge_rail", default=False)
use_outset = BoolProperty(name="outset", default=False)
use_select_inset = BoolProperty(name="inset", default=False)
use_interpolate = BoolProperty(name="interpolate", default=True)
@classmethod
def poll(cls, context):
result = False
active_object = context.active_object
if active_object:
mesh_objects_name = [el.name for el in bpy.data.objects if el.type ==
"MESH"]
if active_object.name in mesh_objects_name:
result = True
return result
def draw(self, context): # PKHG>INFO Add_Faces_To_Object operator GUI
layout = self.layout
col = layout.column()
col.label(text="ACTIVE object used!")
col.prop(self, "face_types")
col.prop(self, "use_relative")
if self.face_types == "open inset":
col.prop(self, "move_inside")
col.prop(self, "base_height")
elif self.face_types == "with base":
col.prop(self, "move_inside")
col.prop(self, "base_height")
col.prop(self, "second_height")
col.prop(self, "width")
elif self.face_types == "clsd vertical":
col.prop(self, "base_height")
elif self.face_types == "open vertical":
col.prop(self, "base_height")
elif self.face_types == "boxed":
col.prop(self, "move_inside")
col.prop(self, "base_height")
col.prop(self, "top_spike")
col.prop(self, "strange_boxed_effect")
elif self.face_types == "spiked":
col.prop(self, "spike_base_width")
col.prop(self, "base_height_inset")
col.prop(self, "top_spike")
elif self.face_types == "bar":
col.prop(self, "spike_base_width")
col.prop(self, "top_spike")
col.prop(self, "top_extra_height")
elif self.face_types == "stepped":
col.prop(self, "spike_base_width")
col.prop(self, "base_height_inset")
col.prop(self, "top_extra_height")
col.prop(self, "second_height")
col.prop(self, "step_with_real_spike")
def execute(self, context):
bpy.context.scene.objects.active
obj_name = self.name_source_object
face_type = self.face_types
if face_type == "spiked":
Spiked(spike_base_width=self.spike_base_width,
base_height_inset=self.base_height_inset,
top_spike=self.top_spike, top_relative=self.use_relative)
elif face_type == "boxed":
startinfo = prepare(self, context, self.remove_start_faces)
bm = startinfo['bm']
top = self.top_spike
obj = startinfo['obj']
obj_matrix_local = obj.matrix_local
distance = None
base_heights = None
t = self.move_inside
areas = startinfo['areas']
base_height = self.base_height
if self.use_relative:
distance = [min(t * area, 1.0) for i, area in enumerate(areas)]
base_heights = [base_height * area for i, area in enumerate(areas)]
else:
distance = [t] * len(areas)
base_heights = [base_height] * len(areas)
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
for i in range(len(rings)):
make_one_inset(self, context, bm=bm, ringvectors=rings[i],
center=centers[i], normal=normals[i],
t=distance[i], base_height=base_heights[i])
bpy.ops.mesh.select_mode(type="EDGE")
bpy.ops.mesh.select_more()
bpy.ops.mesh.select_more()
bpy.ops.object.mode_set(mode='OBJECT')
# PKHG>INFO base extrusion done and set to the mesh
# PKHG>INFO if the extrusion is NOT done ... it looks straneg soon!
if not self.strange_boxed_effect:
bpy.ops.object.mode_set(mode='EDIT')
obj = context.active_object
bm = bmesh.from_edit_mesh(obj.data)
bmfaces = [face for face in bm.faces if face.select]
res = extrude_faces(self, context, bm=bm, face_l=bmfaces)
ring_edges = [face.edges[:] for face in res]
bpy.ops.object.mode_set(mode='OBJECT')
# PKHG>INFO now the extruded facec have to move in normal direction
bpy.ops.object.mode_set(mode='EDIT')
obj = bpy.context.scene.objects.active
bm = bmesh.from_edit_mesh(obj.data)
todo_faces = [face for face in bm.faces if face.select]
for face in todo_faces:
bmesh.ops.translate(bm, vec=face.normal * top, space=obj_matrix_local,
verts=face.verts)
bpy.ops.object.mode_set(mode='OBJECT')
elif face_type == "stepped":
Stepped(spike_base_width=self.spike_base_width,
base_height_inset=self.base_height_inset,
top_spike=self.second_height,
top_extra_height=self.top_extra_height,
use_relative_offset=self.use_relative, with_spike=self.step_with_real_spike)
elif face_type == "open inset":
startinfo = prepare(self, context, self.remove_start_faces)
bm = startinfo['bm']
# PKHG>INFO adjust for relative, via areas
t = self.move_inside
areas = startinfo['areas']
base_height = self.base_height
base_heights = None
distance = None
if self.use_relative:
distance = [min(t * area, 1.0) for i, area in enumerate(areas)]
base_heights = [base_height * area for i, area in enumerate(areas)]
else:
distance = [t] * len(areas)
base_heights = [base_height] * len(areas)
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
for i in range(len(rings)):
make_one_inset(self, context, bm=bm, ringvectors=rings[i],
center=centers[i], normal=normals[i],
t=distance[i], base_height=base_heights[i])
bpy.ops.object.mode_set(mode='OBJECT')
elif face_type == "with base":
startinfo = prepare(self, context, self.remove_start_faces)
bm = startinfo['bm']
obj = startinfo['obj']
object_matrix = obj.matrix_local
# PKHG>INFO for relative (using areas)
t = self.move_inside
areas = startinfo['areas']
base_height = self.base_height
distance = None
base_heights = None
if self.use_relative:
distance = [min(t * area, 1.0) for i, area in enumerate(areas)]
base_heights = [base_height * area for i, area in enumerate(areas)]
else:
distance = [t] * len(areas)
base_heights = [base_height] * len(areas)
next_rings = []
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
for i in range(len(rings)):
next_rings.append(make_one_inset(self, context, bm=bm, ringvectors=rings[i],
center=centers[i], normal=normals[i],
t=distance[i], base_height=base_heights[i]))
prepare_ring = extrude_edges(self, context, bm=bm, edge_l_l=next_rings)
second_height = self.second_height
width = self.width
vectors = [[ele.verts[:] for ele in edge] for edge in prepare_ring]
n_ring_vecs = []
for rings in vectors:
v = []
for edgv in rings:
v.extend(edgv)
# PKHF>INFO no double verts allowed, coming from two adjacents edges!
bm.verts.ensure_lookup_table()
vv = list(set([ele.index for ele in v]))
vvv = [bm.verts[i].co for i in vv]
n_ring_vecs.append(vvv)
for i, ring in enumerate(n_ring_vecs):
make_one_inset(self, context, bm=bm, ringvectors=ring,
center=centers[i], normal=normals[i],
t=width, base_height=base_heights[i] + second_height)
bpy.ops.object.mode_set(mode='OBJECT')
else:
if face_type == "clsd vertical":
obj_name = context.active_object.name
ClosedVertical(name=obj_name, base_height=self.base_height,
use_relative_base_height=self.use_relative)
elif face_type == "open vertical":
obj_name = context.active_object.name
OpenVertical(name=obj_name, base_height=self.base_height,
use_relative_base_height=self.use_relative)
elif face_type == "bar":
startinfo = prepare(self, context, self.remove_start_faces)
result = []
bm = startinfo['bm']
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
spike_base_width = self.spike_base_width
for i, ring in enumerate(rings):
result.append(make_one_inset(self, context, bm=bm,
ringvectors=ring, center=centers[i],
normal=normals[i], t=spike_base_width))
next_ring_edges_list = extrude_edges(self, context, bm=bm,
edge_l_l=result)
top_spike = self.top_spike
fac = top_spike
object_matrix = startinfo['obj'].matrix_local
for i in range(len(next_ring_edges_list)):
translate_ONE_ring(self, context, bm=bm,
object_matrix=object_matrix,
ring_edges=next_ring_edges_list[i],
normal=normals[i], distance=fac)
next_ring_edges_list_2 = extrude_edges(self, context, bm=bm,
edge_l_l=next_ring_edges_list)
top_extra_height = self.top_extra_height
for i in range(len(next_ring_edges_list_2)):
move_corner_vecs_outside(self, context, bm=bm,
edge_list=next_ring_edges_list_2[i],
center=centers[i], normal=normals[i],
base_height_erlier=fac + top_extra_height,
distance=fac)
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_more()
bpy.ops.object.mode_set(mode='OBJECT')
return {'FINISHED'}
class ReverseFacesOperator(bpy.types.Operator):
"""Reverse selected Faces"""
bl_idname = "mesh.revers_selected_faces"
bl_label = "reverse normal of selected faces1"
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
reverse_faces = BoolProperty(name="reverse_faces", default=False,
description="revert the normal of selected faces")
def execute(self, context):
name = context.active_object.name
ReverseFaces(name=name)
return {'FINISHED'}
class pkhg_help(bpy.types.Operator):
bl_idname = 'help.pkhg'
bl_label = ''
def draw(self, context):
layout = self.layout
layout.label('To use:')
layout.label('Make a selection or selection of Faces')
layout.label('Extrude, rotate extrusions & more')
layout.label('Toggle edit mode after use')
def invoke(self, context, event):
return context.window_manager.invoke_popup(self, width=300)
class VIEW3D_Faces_Panel(bpy.types.Panel):
bl_label = "Face Extrude"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = 'Tools'
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
result = False
active_object = context.active_object
if active_object:
mesh_objects_name = [el.name for el in bpy.data.objects if el.type ==
"MESH"]
if active_object.name in mesh_objects_name:
if active_object.mode == "OBJECT":
result = True
return result
def draw(self, context):
layout = self.layout
layout.operator(AddFaces.bl_idname, "Selected Faces!")
layout.label("Use this to Extrude")
layout.label("Selected Faces Only")
layout.label("---------------------------------------")
layout.operator(ReverseFacesOperator.bl_idname, "Reverse faceNormals")
layout.label("Only Use This")
layout.label("After Mesh Creation")
layout.label("To Repair Normals")
layout.label("Save File Often")
def find_one_ring(sel_vertices):
ring0 = sel_vertices.pop(0)
to_delete = []
for i, edge in enumerate(sel_vertices):
len_nu = len(ring0)
if len(ring0 - edge) < len_nu:
to_delete.append(i)
ring0 = ring0.union(edge)
to_delete.reverse()
for el in to_delete:
sel_vertices.pop(el)
return (ring0, sel_vertices)
class Stepped:
def __init__(self, spike_base_width=0.5, base_height_inset=0.0, top_spike=0.2, top_relative=False, top_extra_height=0, use_relative_offset=False, with_spike=False):
obj = bpy.context.active_object
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=False, thickness=spike_base_width, depth=0, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=use_relative_offset, use_edge_rail=False, thickness=top_extra_height, depth=base_height_inset, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=use_relative_offset, use_edge_rail=False, thickness=spike_base_width, depth=0, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=False, thickness=0, depth=top_spike, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
if with_spike:
bpy.ops.mesh.merge(type='COLLAPSE')
bpy.ops.object.mode_set(mode='OBJECT')
class Spiked:
def __init__(self, spike_base_width=0.5, base_height_inset=0.0, top_spike=0.2, top_relative=False):
obj = bpy.context.active_object
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=False, thickness=spike_base_width, depth=base_height_inset, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=top_relative, use_edge_rail=False, thickness=0, depth=top_spike, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bm = bmesh.from_edit_mesh(obj.data)
selected_faces = [face for face in bm.faces if face.select]
edges_todo = []
bpy.ops.mesh.merge(type='COLLAPSE')
bpy.ops.object.mode_set(mode='OBJECT')
class ClosedVertical:
def __init__(self, name="Plane", base_height=1, use_relative_base_height=False):
obj = bpy.data.objects[name]
bm = bmesh.new()
bm.from_mesh(obj.data)
# PKHG>INFO deselect chosen faces
sel = [f for f in bm.faces if f.select]
for f in sel:
f.select = False
res = bmesh.ops.extrude_discrete_faces(bm, faces=sel)
# PKHG>INFO select extruded faces
for f in res['faces']:
f.select = True
lood = Vector((0, 0, 1))
# PKHG>INFO adjust extrusion by a vector! test just only lood
factor = base_height
for face in res['faces']:
if use_relative_base_height:
area = face.calc_area()
factor = area * base_height
else:
factor = base_height
for el in face.verts:
tmp = el.co + face.normal * factor
el.co = tmp
me = bpy.data.meshes[name]
bm.to_mesh(me)
bm.free()
class ReverseFaces:
def __init__(self, name="Cube"):
obj = bpy.data.objects[name]
me = obj.data
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.new()
bm.from_mesh(me)
bpy.ops.object.mode_set(mode='OBJECT')
sel = [f for f in bm.faces if f.select]
bmesh.ops.reverse_faces(bm, faces=sel)
bm.to_mesh(me)
bm.free()
class OpenVertical:
def __init__(self, name="Plane", base_height=1, use_relative_base_height=False):
obj = bpy.data.objects[name]
bm = bmesh.new()
bm.from_mesh(obj.data)
# PKHG>INFO deselect chosen faces
sel = [f for f in bm.faces if f.select]
for f in sel:
f.select = False
res = bmesh.ops.extrude_discrete_faces(bm, faces=sel)
# PKHG>INFO select extruded faces
for f in res['faces']:
f.select = True
# PKHG>INFO adjust extrusion by a vector! test just only lood
factor = base_height
for face in res['faces']:
if use_relative_base_height:
area = face.calc_area()
factor = area * base_height
else:
factor = base_height
for el in face.verts:
tmp = el.co + face.normal * factor
el.co = tmp
me = bpy.data.meshes[name]
bm.to_mesh(me)
bm.free()
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.editmode_toggle()
class StripFaces:
def __init__(self, use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=0.0, depth=0.0, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=True):
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.inset(use_boundary=use_boundary, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=use_outset, use_select_inset=use_select_inset, use_individual=use_individual, use_interpolate=use_interpolate)
bpy.ops.object.mode_set(mode='OBJECT')
# PKHG>IMFO only 3 parameters inc execution context supported!!
if False:
bpy.ops.mesh.inset(use_boundary, use_even_offset, use_relative_offset, use_edge_rail, thickness, depth, use_outset, use_select_inset, use_individual, use_interpolate)
elif type == 0:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=True)
elif type == 1:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=False)
bpy.ops.mesh.delete(type='FACE')
elif type == 2:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=False)
bpy.ops.mesh.delete(type='FACE')
elif type == 3:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=depth, depth=thickness, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.delete(type='FACE')
elif type == 4:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.mode_set(mode='OBJECT')
def prepare(self, context, remove_start_faces=True):
"""Start for a face selected change of faces
select an object of type mesh, with activated severel (all) faces
"""
obj = bpy.context.scene.objects.active
bpy.ops.object.mode_set(mode='OBJECT')
selectedpolygons = [el for el in obj.data.polygons if el.select]
# PKHG>INFO copies of the vectors are needed, otherwise Blender crashes!
centers = [face.center for face in selectedpolygons]
centers_copy = [Vector((el[0], el[1], el[2])) for el in centers]
normals = [face.normal for face in selectedpolygons]
normals_copy = [Vector((el[0], el[1], el[2])) for el in normals]
vertindicesofpolgons = [[vert for vert in face.vertices] for face in selectedpolygons]
vertVectorsOfSelectedFaces = [[obj.data.vertices[ind].co for ind in vertIndiceofface]
for vertIndiceofface in vertindicesofpolgons]
vertVectorsOfSelectedFaces_copy = [[Vector((el[0], el[1], el[2])) for el in listofvecs]
for listofvecs in vertVectorsOfSelectedFaces]
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(obj.data)
selected_bm_faces = [ele for ele in bm.faces if ele.select]
selected_edges_per_face_ind = [[ele.index for ele in face.edges] for face in selected_bm_faces]
indices = [el.index for el in selectedpolygons]
selected_faces_areas = [bm.faces[:][i] for i in indices]
tmp_area = [el.calc_area() for el in selected_faces_areas]
# PKHG>INFO, selected faces are removed, only their edges are used!
if remove_start_faces:
bpy.ops.mesh.delete(type='ONLY_FACE')
bpy.ops.object.mode_set(mode='OBJECT')
obj.data.update()
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(obj.data)
bm.verts.ensure_lookup_table()
bm.faces.ensure_lookup_table()
start_ring_raw = [[bm.verts[ind].index for ind in vertIndiceofface]
for vertIndiceofface in vertindicesofpolgons]
start_ring = []
for el in start_ring_raw:
start_ring.append(set(el))
bm.edges.ensure_lookup_table()
bm_selected_edges_l_l = [[bm.edges[i] for i in bm_ind_list] for bm_ind_list in selected_edges_per_face_ind]
result = {'obj': obj, 'centers': centers_copy, 'normals': normals_copy,
'rings': vertVectorsOfSelectedFaces_copy, 'bm': bm,
'areas': tmp_area, 'startBMRingVerts': start_ring,
'base_edges': bm_selected_edges_l_l}
return result
def make_one_inset(self, context, bm=None, ringvectors=None, center=None,
normal=None, t=None, base_height=0):
"""a face will get 'inserted' faces to create (normaly)
a hole it t is > 0 and < 1)
"""
tmp = []
for el in ringvectors:
tmp.append((el * (1 - t) + center * t) + normal * base_height)
tmp = [bm.verts.new(v) for v in tmp] # the new corner bmvectors
# PKHG>INFO so to say sentinells, ot use ONE for ...
tmp.append(tmp[0])
vectorsFace_i = [bm.verts.new(v) for v in ringvectors]
vectorsFace_i.append(vectorsFace_i[0])
myres = []
for ii in range(len(vectorsFace_i) - 1):
# PKHG>INFO next line: sequence important! for added edge
bmvecs = [vectorsFace_i[ii], vectorsFace_i[ii + 1], tmp[ii + 1], tmp[ii]]
res = bm.faces.new(bmvecs)
myres.append(res.edges[2])
myres[-1].select = True # PKHG>INFO to be used later selected!
return (myres)
def extrude_faces(self, context, bm=None, face_l=None):
"""
to make a ring extrusion!
"""
all_results = []
res = bmesh.ops.extrude_discrete_faces(bm, faces=face_l)['faces']
for face in res:
face.select = True
return res
def extrude_edges(self, context, bm=None, edge_l_l=None):
"""
to make a ring extrusion!
"""
all_results = []
for edge_l in edge_l_l:
for edge in edge_l:
edge.select = False
res = bmesh.ops.extrude_edge_only(bm, edges=edge_l)
tmp = [ele for ele in res['geom'] if isinstance(ele, bmesh.types.BMEdge)]
for edge in tmp:
edge.select = True
all_results.append(tmp)
return all_results
def translate_ONE_ring(self, context, bm=None, object_matrix=None, ring_edges=None,
normal=(0, 0, 1), distance=0.5):
"""
translate a ring in given (normal?!) direction with given (global) amount
"""
tmp = []
for edge in ring_edges:
tmp.extend(edge.verts[:])
# PKHG>INFO no double vertices allowed by bmesh!
tmp = set(tmp)
tmp = list(tmp)
bmesh.ops.translate(bm, vec=normal * distance, space=object_matrix, verts=tmp)
return ring_edges
# PKHG>INFO relevant edges will stay selected
def move_corner_vecs_outside(self, context, bm=None, edge_list=None, center=None, normal=None,
base_height_erlier=0.5, distance=0.5):
"""
move corners (outside meant mostly) dependent on the parameters
"""
tmp = []
for edge in edge_list:
tmp.extend([ele for ele in edge.verts if isinstance(ele, bmesh.types.BMVert)])
# PKHG>INFO to remove vertices, they are all twices used in the ring!
tmp = set(tmp)
tmp = list(tmp)
for i in range(len(tmp)):
vec = tmp[i].co
direction = vec + (vec - (normal * base_height_erlier + center)) * distance
tmp[i].co = direction
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
|
gpl-3.0
| -9,101,845,059,198,115,000
| 42.85034
| 278
| 0.57403
| false
| 3.667501
| false
| false
| false
|
laissezfarrell/rl-bitcurator-scripts
|
python/accession-reporter.py
|
1
|
3754
|
#!/usr/bin/env python3
#Script (in progress) to report high-level folder information for offices transferring records to the University Archives.
#Dependencies: argparse, pathlib, python3.6 or above, csv, datetime
#Assumptions:
# 1.
import argparse, csv, datetime
from pathlib import Path, PurePath
from datetime import datetime
def walkPath():
startingPath = Path(inputDir) #uncomment when ready for arguments
#csvOut = Path('/home/bcadmin/Desktop/accession-report-test.csv')
#startingPath = Path('/home/bcadmin/Desktop/test-data/objects') #comment when ready for arguments
spChild = [x for x in startingPath.iterdir() if x.is_dir()] #create a list of the children directories in startingPath.
with open (csvOut, 'w') as m:
writer = csv.writer(m)
writer.writerow(['path','foldersize '+ labelUnits,'Earliest Timestamp','Latest Timestamp'])
for i in spChild:
operatingDirectory = Path(i)
print("the next directory to process is ",operatingDirectory)#sanity check
fileList = list(operatingDirectory.glob('**/*'))
#fileList = [x for x in operatingDirectory.iterdir() if x.is_file()]
#print(fileList) #sanity check
folderSize = 0
fModTime = datetime.now()
oldestTime = fModTime
newestTime = datetime.strptime("Jan 01 1950", "%b %d %Y")
for f in fileList:
fSizeBytes = (Path.stat(f).st_size / repUnits)
folderSize = folderSize + fSizeBytes
fModTime = datetime.fromtimestamp(Path.stat(f).st_mtime)
if fModTime >= oldestTime:
pass
else:
oldestTime = fModTime
if fModTime <= newestTime:
pass
else:
newestTime = fModTime
#print(folderSize)
#print(oldestTime)
#print(newestTime)
writer.writerow([i,folderSize,oldestTime,newestTime])
#end of day May 15: above function calculates the size of the files in a folder, as well as the most recent and oldest date modified. Next steps: 1) add arguments back in and test function. Mostly compied/pasted writer stuff from another script, so potentially doesn't work yet.
# Main body to accept arguments and call the three functions.
parser = argparse.ArgumentParser()
parser.add_argument("output", help="Path to and filename for the CSV to create.")
parser.add_argument("input", help="Path to input directory.")
parser.add_argument("-u", "--units", type=str, choices=["b", "kb", "mb", "gb"], help="Unit of measurement for reporting aggregate size")
args = parser.parse_args()
if args.output:
csvOut = args.output
if args.input:
inputDir = args.input
if args.units == "kb":
repUnits = 1024
labelUnits = "(kilobytes)"
print("Reporting sizes in kilobytes")
elif args.units == "mb":
repUnits = 1024*1024
labelUnits = "(megabytes)"
print("Reporting sizes in megabytes")
elif args.units =="gb":
repUnits = 1024*1024*1024
labelUnits = "(gigabytes)"
print("Reporting sizes in gigabytes")
elif args.units == "b":
repUnits = 1
labelUnits = "(bytes)"
print("Reporting sizes in bytes, the purest way to report sizes.")
else:
repUnits = 1
labelUnits = "(bytes)"
print("Your inattentiveness leads Self to default to reporting size in bytes, the purest yet least human readable way to report sizes. Ha ha, puny human. Bow before Self, the mighty computer. 01000100 01000101 01010011 01010100 01010010 01001111 01011001 00100000 01000001 01001100 01001100 00100000 01001000 01010101 01001101 01000001 01001110 01010011 00101110")
walkPath()
|
gpl-3.0
| -5,533,678,024,818,261,000
| 43.690476
| 368
| 0.660629
| false
| 3.894191
| false
| false
| false
|
alirizakeles/zato
|
code/zato-server/src/zato/server/service/internal/security/openstack.py
|
1
|
6547
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
from traceback import format_exc
from uuid import uuid4
# Zato
from zato.common import SEC_DEF_TYPE
from zato.common.broker_message import SECURITY
from zato.common.odb.model import Cluster, OpenStackSecurity
from zato.common.odb.query import openstack_security_list
from zato.server.service.internal import AdminService, AdminSIO, ChangePasswordBase, GetListAdminSIO
class GetList(AdminService):
""" Returns a list of OpenStack definitions available.
"""
_filter_by = OpenStackSecurity.name,
class SimpleIO(GetListAdminSIO):
request_elem = 'zato_security_openstack_get_list_request'
response_elem = 'zato_security_openstack_get_list_response'
input_required = ('cluster_id',)
output_required = ('id', 'name', 'is_active', 'username')
def get_data(self, session):
return self._search(openstack_security_list, session, self.request.input.cluster_id, False)
def handle(self):
with closing(self.odb.session()) as session:
self.response.payload[:] = self.get_data(session)
class Create(AdminService):
""" Creates a new OpenStack definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_openstack_create_request'
response_elem = 'zato_security_openstack_create_response'
input_required = ('cluster_id', 'name', 'is_active', 'username')
output_required = ('id', 'name')
def handle(self):
input = self.request.input
input.password = uuid4().hex
with closing(self.odb.session()) as session:
try:
cluster = session.query(Cluster).filter_by(id=input.cluster_id).first()
# Let's see if we already have a definition of that name before committing
# any stuff into the database.
existing_one = session.query(OpenStackSecurity).\
filter(Cluster.id==input.cluster_id).\
filter(OpenStackSecurity.name==input.name).first()
if existing_one:
raise Exception('OpenStack definition [{0}] already exists on this cluster'.format(input.name))
auth = OpenStackSecurity(None, input.name, input.is_active, input.username, input.password, cluster)
session.add(auth)
session.commit()
except Exception, e:
msg = 'Could not create an OpenStack definition, e:[{e}]'.format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
input.action = SECURITY.OPENSTACK_CREATE.value
input.sec_type = SEC_DEF_TYPE.OPENSTACK
self.broker_client.publish(input)
self.response.payload.id = auth.id
self.response.payload.name = auth.name
class Edit(AdminService):
""" Updates an OpenStack definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_openstack_edit_request'
response_elem = 'zato_security_openstack_edit_response'
input_required = ('id', 'cluster_id', 'name', 'is_active', 'username')
output_required = ('id', 'name')
def handle(self):
input = self.request.input
with closing(self.odb.session()) as session:
try:
existing_one = session.query(OpenStackSecurity).\
filter(Cluster.id==input.cluster_id).\
filter(OpenStackSecurity.name==input.name).\
filter(OpenStackSecurity.id!=input.id).\
first()
if existing_one:
raise Exception('OpenStack definition [{0}] already exists on this cluster'.format(input.name))
definition = session.query(OpenStackSecurity).filter_by(id=input.id).one()
old_name = definition.name
definition.name = input.name
definition.is_active = input.is_active
definition.username = input.username
session.add(definition)
session.commit()
except Exception, e:
msg = 'Could not update the OpenStack definition, e:[{e}]'.format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
input.action = SECURITY.OPENSTACK_EDIT.value
input.old_name = old_name
input.sec_type = SEC_DEF_TYPE.OPENSTACK
self.broker_client.publish(input)
self.response.payload.id = definition.id
self.response.payload.name = definition.name
class ChangePassword(ChangePasswordBase):
""" Changes the password of an OpenStack definition.
"""
password_required = False
class SimpleIO(ChangePasswordBase.SimpleIO):
request_elem = 'zato_security_openstack_change_password_request'
response_elem = 'zato_security_openstack_change_password_response'
def handle(self):
def _auth(instance, password):
instance.password = password
return self._handle(OpenStackSecurity, _auth, SECURITY.OPENSTACK_CHANGE_PASSWORD.value)
class Delete(AdminService):
""" Deletes an OpenStack definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_openstack_delete_request'
response_elem = 'zato_security_openstack_delete_response'
input_required = ('id',)
def handle(self):
with closing(self.odb.session()) as session:
try:
auth = session.query(OpenStackSecurity).\
filter(OpenStackSecurity.id==self.request.input.id).\
one()
session.delete(auth)
session.commit()
except Exception, e:
msg = 'Could not delete the OpenStack definition, e:[{e}]'.format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
self.request.input.action = SECURITY.OPENSTACK_DELETE.value
self.request.input.name = auth.name
self.broker_client.publish(self.request.input)
|
gpl-3.0
| -7,917,064,679,736,714,000
| 36.626437
| 116
| 0.607148
| false
| 4.312912
| false
| false
| false
|
10239847509238470925387z/tmp123
|
app.py
|
1
|
2195
|
#!/usr/bin/env python
import urllib
import json
import os
import constants
import accounts
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
PERSON = constants.TEST_1
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "account-balance":
return constants.ERR_DICT(req.get("result").get("action"))
result = req.get("result")
parameters = result.get("parameters")
acct = parameters.get("account-type")
acct = acct.strip()
if acct=='401k':
acct='WI'
qual = parameters.get("qualifier")
speech = str(req.get("result").get("action"))
if acct:
if acct in constants.ACCT_TYPES:
speech = "The value of your {ACCT_TYPE} accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON, acct), ACCT_TYPE=acct)
else:
speech = "You don't have any accounts of that type. The total value of your other accounts is {VALU} dollars.".format(
VALU=accounts.get_balance(PERSON))
elif qual:
speech = "The total value of your accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON))
else:
speech = "The total value of your accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON))
# speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
print("Response:")
print(speech)
speech += "\nAnything else I can help you with today?"
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "home"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
|
apache-2.0
| -4,790,441,292,185,734,000
| 26.098765
| 144
| 0.625513
| false
| 3.540323
| false
| false
| false
|
Lind-Project/native_client
|
src/trusted/validator_arm/validation-report.py
|
1
|
3575
|
#!/usr/bin/python2
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
import sys
import textwrap
from subprocess import Popen, PIPE
_OBJDUMP = 'arm-linux-gnueabi-objdump'
def _objdump(binary, vaddr, ctx_before, ctx_after):
args = [
_OBJDUMP,
'-d',
'-G',
binary,
'--start-address=0x%08X' % (vaddr - (4 * ctx_before)),
'--stop-address=0x%08X' % (vaddr + 4 + (4 * ctx_after))]
highlight = ctx_before
lines = 0
for line in Popen(args, stdout=PIPE).stdout.read().split('\n'):
if line.startswith(' '):
if highlight == 0:
print '--> ', line
else:
print ' ', line
highlight -= 1
lines += 1
if not lines:
print ' (not found)'
def _problem_info(code):
return {
'kProblemUnsafe': ['Instruction is unsafe', 0, 0],
'kProblemBranchSplitsPattern': ['The destination of this branch is '
'part of an instruction sequence that must be executed in full, '
'or is inline data',
0, 0],
'kProblemPatternCrossesBundle': ['This instruction is part of a '
'sequence that must execute in full, but it spans a bundle edge '
'-- so an indirect branch may target it',
1, 1],
'kProblemBranchInvalidDest': ['This branch targets a location that is '
'outside of the application\'s executable code, and is not a valid '
'trampoline entry point', 0, 0],
'kProblemUnsafeLoadStore': ['This store instruction is not preceded by '
'a valid address mask instruction', 1, 0],
'kProblemUnsafeBranch': ['This indirect branch instruction is not '
'preceded by a valid address mask instruction', 1, 0],
'kProblemUnsafeDataWrite': ['This instruction affects a register that '
'must contain a valid data-region address, but is not followed by '
'a valid address mask instruction', 0, 1],
'kProblemReadOnlyRegister': ['This instruction changes the contents of '
'a read-only register', 0, 0],
'kProblemMisalignedCall': ['This linking branch instruction is not in '
'the last slot of its bundle, so when its LR result is masked, the '
'caller will not return to the next instruction', 0, 0],
}[code]
def _safety_msg(val):
return {
0: 'UNKNOWN', # Should not appear
1: 'is undefined',
2: 'has unpredictable effects',
3: 'is deprecated',
4: 'is forbidden',
5: 'uses forbidden operands',
}[val]
def _explain_problem(binary, vaddr, safety, code, ref_vaddr):
msg, ctx_before, ctx_after = _problem_info(code)
if safety == 6:
msg = "At %08X: %s:" % (vaddr, msg)
else:
msg = ("At %08X: %s (%s):"
% (vaddr, msg, _safety_msg(safety)))
print '\n'.join(textwrap.wrap(msg, 70, subsequent_indent=' '))
_objdump(binary, vaddr, ctx_before, ctx_after)
if ref_vaddr:
print "Destination address %08X:" % ref_vaddr
_objdump(binary, ref_vaddr, 1, 1)
def _parse_report(line):
vaddr_hex, safety, code, ref_vaddr_hex = line.split()
return (int(vaddr_hex, 16), int(safety), code, int(ref_vaddr_hex, 16))
for line in sys.stdin:
if line.startswith('ncval: '):
line = line[7:].strip()
_explain_problem(sys.argv[1], *_parse_report(line))
|
bsd-3-clause
| 2,495,468,691,190,867,000
| 36.631579
| 80
| 0.59021
| false
| 3.685567
| false
| false
| false
|
core-code/LibVT
|
Dependencies/Core3D/Preprocessing/generateOctreeFromObj.py
|
1
|
10849
|
#!/usr/bin/env python
#
# generateOctreeFromObj.py
# Core3D
#
# Created by Julian Mayer on 16.11.07.
# Copyright (c) 2010 A. Julian Mayer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitationthe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, bz2
from struct import *
from vecmath import *
TEXTURING = 1
GENTEX = 0
MAX_FLOAT = 1e+308
MIN_FLOAT = -1e308
MAX_USHORT = 0xFFFF
MAX_FACES_PER_TREELEAF = 1000
MAX_RECURSION_DEPTH = 10
SCALE = 1.0
vertices = []
faces = []
normals = []
texcoords = []
def faceContent(f, i):
if i == 0:
if f.count("/") == 0: return f
else: return f[:f.find("/")]
elif i == 1:
if f.count("/") == 0 or f.count("//") == 1: return 0
else:
if f.count("/") == 2:
return f[f.find("/")+1:f.rfind("/")]
else:
return f[f.find("/")+1:]
else:
if f.count("/") != 2: return 0
else: return f[f.rfind("/")+1:]
def calculateAABB(faces):
mi = [MAX_FLOAT, MAX_FLOAT,MAX_FLOAT]
ma = [MIN_FLOAT, MIN_FLOAT, MIN_FLOAT]
for face in faces:
for i in range(3):
for v in range(3):
ma[i] = max(ma[i], vertices[face[v]][i])
mi[i] = min(mi[i], vertices[face[v]][i])
return mi,ma
def classifyVertex(vertex, splitpoint): #TODO: do splitting or other funny things
if vertex[0] > splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] > splitpoint[2]: return 0
if vertex[0] <= splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] > splitpoint[2]: return 1
if vertex[0] > splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] <= splitpoint[2]: return 2
if vertex[0] > splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] > splitpoint[2]: return 3
if vertex[0] <= splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] <= splitpoint[2]: return 4
if vertex[0] > splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] <= splitpoint[2]: return 5
if vertex[0] <= splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] > splitpoint[2]: return 6
if vertex[0] <= splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] <= splitpoint[2]:return 7
def classifyFace(face, splitpoint):
return max(classifyVertex(vertices[face[0]], splitpoint), classifyVertex(vertices[face[1]], splitpoint), classifyVertex(vertices[face[2]], splitpoint)) #TODO: random instead of max?
def buildOctree(faces, offset, level):
mi,ma = calculateAABB(faces)
ournum = buildOctree.counter
buildOctree.counter += 1
childoffset = offset
if len(faces) > MAX_FACES_PER_TREELEAF and level < MAX_RECURSION_DEPTH:
splitpoint = [mi[0] + (ma[0] - mi[0])/2, mi[1] + (ma[1] - mi[1])/2, mi[2] + (ma[2] - mi[2])/2]
newfaces = [[],[],[],[],[],[],[],[]]
newnodes = []
childnums = []
for face in faces:
x = classifyFace(face, splitpoint)
newfaces[x].append(face)
for newface in newfaces:
a,b = buildOctree(newface, childoffset, level+1)
childoffset += len(newface)
childnums.append(a)
newnodes.extend(b)
faces[:] = newfaces[0]+newfaces[1]+newfaces[2]+newfaces[3]+newfaces[4]+newfaces[5]+newfaces[6]+newfaces[7]
newnodes.insert(0, [offset, len(faces), mi[0], mi[1], mi[2], ma[0] - mi[0], ma[1] - mi[1], ma[2] - mi[2], childnums[0], childnums[1], childnums[2], childnums[3], childnums[4], childnums[5], childnums[6], childnums[7]])
return ournum, newnodes
else:
return ournum, [[offset, len(faces), mi[0], mi[1], mi[2], ma[0] - mi[0], ma[1] - mi[1], ma[2] - mi[2], MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT]]
try:
if (len(sys.argv)) == 1: raise Exception('input', 'error')
f = open(sys.argv[len(sys.argv) - 1], 'r')
of = 0
for i in range(1, len(sys.argv) - 1):
if sys.argv[i].startswith("-s="): SCALE = float(sys.argv[i][3:])
elif sys.argv[i].startswith("-t"): TEXTURING = 0
elif sys.argv[i].startswith("-g="): GENTEX = int(sys.argv[i][3:]); TEXTURING = 0
elif sys.argv[i].startswith("-m="): MAX_FACES_PER_TREELEAF = int(sys.argv[i][3:])
elif sys.argv[i].startswith("-r="): MAX_RECURSION_DEPTH = int(sys.argv[i][3:])
elif sys.argv[i].startswith("-o="): of = open(sys.argv[i][3:], 'w')
else: raise Exception('input', 'error')
if of == 0: of = open(sys.argv[len(sys.argv) - 1][:sys.argv[len(sys.argv) - 1].rfind(".")] + ".octree.bz2", 'w')
except:
print """Usage: generateOctreeFromObj [options] obj_file
Options:
-t Ignore texture coordinates, produce an untextured Octree
-s=<scale> Scale all coordinates by <scale>
-m=<max_faces> Limit faces per leafnode to <max_faces> (Default: 1000)
-r=<max_recursion> Limit tree depth to <max_recursion> (Default: 10)
-o=<octree_file> Place the output octree into <octree_file>
-g=<0,1,2,3,4> Texture coordinate generation:
0 = off, 1 = on, 2 = swap X, 3 = swap Y, 4 = swap XY"""
sys.exit()
print "Info: Reading the OBJ-file"
lines = f.readlines()
for line in lines:
i = line.strip().split(" ")[0]
c = line[2:].strip().split(" ")
if i == "v":
vertices.append([float(c[0]) * SCALE, float(c[1]) * SCALE, float(c[2]) * SCALE])
elif i == "vn":
normals.append(normalize([float(c[0]), float(c[1]), float(c[2])]))
elif i == "vt":
texcoords.append([float(c[0]), float(c[1]), 0.0]) #TODO: if we discard W anyway we shouldnt store it
elif i == "f":
if (len(c) > 4):
print "Error: faces with more than 4 edges not supported"
sys.exit()
elif (len(c) == 4): #triangulate
faces.append([int(faceContent(c[0],0))-1, int(faceContent(c[1],0))-1, int(faceContent(c[2],0))-1, int(faceContent(c[0],2))-1, int(faceContent(c[1],2))-1, int(faceContent(c[2],2))-1, int(faceContent(c[0],1))-1, int(faceContent(c[1],1))-1, int(faceContent(c[2],1))-1])
faces.append([int(faceContent(c[0],0))-1, int(faceContent(c[2],0))-1, int(faceContent(c[3],0))-1, int(faceContent(c[0],2))-1, int(faceContent(c[2],2))-1, int(faceContent(c[3],2))-1, int(faceContent(c[0],1))-1, int(faceContent(c[2],1))-1, int(faceContent(c[3],1))-1])
else:
faces.append([int(faceContent(c[0],0))-1, int(faceContent(c[1],0))-1, int(faceContent(c[2],0))-1, int(faceContent(c[0],2))-1, int(faceContent(c[1],2))-1, int(faceContent(c[2],2))-1, int(faceContent(c[0],1))-1, int(faceContent(c[1],1))-1, int(faceContent(c[2],1))-1])
print "Info: Building the Octree"
buildOctree.counter = 0
a,nodes = buildOctree(faces, 0, 0)
if len(nodes) > MAX_USHORT:
print "Error: too many octree nodes generated, increase MAX_FACES_PER_TREELEAF"
sys.exit()
print "Info: Unifying and Uniquing Vertices, Normals and Texcoords"
normalwarning = 0
newvertices = []
newvertices_dict = {} #it's perhaps not the most intuitive way to have the newvertices stored twice, but it prevents a quadratic runtime
for face in faces:
for i in range(3):
if face[i+3] == -1:
normalwarning += 1
normals.append(normalize(crossProduct(substract(vertices[face[0]],vertices[face[1]]), substract(vertices[face[2]],vertices[face[0]]))))
face[i+3] = len(normals)-1
if TEXTURING and face[i+6] == -1:
print "Warning: some face without a texcoord detected, turning texturing off"
TEXTURING = 0
for i in range(3):
if len(vertices[face[i]]) == 3:
vertices[face[i]].extend(normals[face[i+3]])
if TEXTURING:
vertices[face[i]].extend(texcoords[face[i+6]])
elif vertices[face[i]][3:6] != normals[face[i+3]] or (TEXTURING and vertices[face[i]][6:] != texcoords[face[i+6]]): #if this vertex has a different normal/texcoord we have to duplicate it because opengl has only one index list
sf = face[i]
if TEXTURING:
key = vertices[face[i]][0], vertices[face[i]][1], vertices[face[i]][2], normals[face[i+3]][0], normals[face[i+3]][1], normals[face[i+3]][2], texcoords[face[i+6]][0], texcoords[face[i+6]][1], texcoords[face[i+6]][2]
else:
key = vertices[face[i]][0], vertices[face[i]][1], vertices[face[i]][2], normals[face[i+3]][0], normals[face[i+3]][1], normals[face[i+3]][2]
if newvertices_dict.has_key(key):
face[i] = len(vertices)+newvertices_dict[key]
if sf == face[i]: #or create duplicate
newvertices.append(list(key))
newvertices_dict[key] = len(newvertices)-1
face[i] = len(vertices)+len(newvertices)-1 #don't forget to update the index to the duplicated vertex+normal
vertices.extend(newvertices)
if normalwarning:
print "Warning: some face without a normal detected, calculating it (x" + str(normalwarning) +")"
print "Info: Writing the resulting Octree-file"
dummywarning = 0
out = pack('III', 0xDEADBEEF if (TEXTURING or GENTEX) else 0x6D616C62, len(nodes), len(vertices))
for node in nodes:
out += pack('IIffffffHHHHHHHH', node[0], node[1], node[2], node[3], node[4], node[5], node[6], node[7], node[8], node[9], node[10], node[11], node[12], node[13], node[14], node[15])
for vert in vertices:
try:
if TEXTURING:
out += pack('ffffffff', vert[0], vert[1], vert[2], vert[3], vert[4], vert[5], vert[6], vert[7])
elif GENTEX:
xcoord = (vert[0] - nodes[0][2]) / nodes[0][5]
ycoord = (vert[2] - nodes[0][4]) / nodes[0][7]
out += pack('ffffffff', vert[0], vert[1], vert[2], vert[3], vert[4], vert[5], (1.0 - xcoord) if (GENTEX == 2 or GENTEX == 4) else xcoord, (1.0 - ycoord) if (GENTEX == 3 or GENTEX == 4) else ycoord)
else:
out += pack('ffffff', vert[0], vert[1], vert[2], vert[3], vert[4], vert[5]) #the vertex includes the normal now, if not the vertex is unreferenced and this throws
except:
dummywarning += 1
if TEXTURING:
out += pack('ffffffff', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
else:
out += pack('ffffff', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if (len(vertices) <= MAX_USHORT): type = 'HHH'
else: type = 'III'
for face in faces:
out += pack(type, face[0], face[1], face[2])
of.write(bz2.compress(out))
if dummywarning:
print "Warning: unreferenced vertex detected, writing dummy vertex (x" + str(dummywarning) +")"
print "\nSUCCESS:\n\nnodes:\t\t", len(nodes), "\nvertices:\t", len(vertices), "\t( duplicatesWithDifferentNormalsOrTexcoords:", len(newvertices), ")", "\nfaces:\t\t", len(faces), "\n"
|
mit
| 7,716,171,793,022,086,000
| 49.696262
| 462
| 0.660337
| false
| 2.678765
| false
| false
| false
|
Effective-Quadratures/Effective-Quadratures
|
equadratures/scalers.py
|
1
|
7167
|
"""
Classes to scale data.
Some of these classes are called internally by other modules, but they can also be used independently as a pre-processing stage.
Scalers can fit to one set of data, and used to transform other data sets with the same number of dimensions.
Examples
--------
Fitting scaler implicitly during transform
>>> # Define some 1D sample data
>>> X = np.random.RandomState(0).normal(2,0.5,200)
>>> (X.mean(),X.std())
>>> (2.0354552465705806, 0.5107113843479977)
>>>
>>> # Scale to zero mean and unit variance
>>> X = eq.scalers.scaler_meanvar().transform(X)
>>> (X.mean(),X.std())
>>> (2.886579864025407e-17, 1.0)
Using the same scaling to transform train and test data
>>> # Define some 5D example data
>>> X = np.random.RandomState(0).uniform(-10,10,size=(50,5))
>>> y = X[:,0]**2 - X[:,4]
>>> # Split into train/test
>>> X_train, X_test,y_train,y_test = eq.datasets.train_test_split(X,y,train=0.7,random_seed=0)
>>> (X_train.min(),X_train.max())
>>> (-9.906090476149059, 9.767476761184525)
>>>
>>> # Define a scaler and fit to training split
>>> scaler = eq.scalers.scaler_minmax()
>>> scaler.fit(X_train)
>>>
>>> # Transform train and test data with same scaler
>>> X_train = scaler.transform(X_train)
>>> X_test = scaler.transform(X_test)
>>> (X_train.min(),X_train.max())
>>> (-1.0, 1.0)
>>>
>>> # Finally, e.g. of transforming data back again
>>> X_train = scaler.untransform(X_train)
>>> (X_train.min(),X_train.max())
>>> (-9.906090476149059, 9.767476761184525)
"""
import numpy as np
class scaler_minmax(object):
""" Scale the data to have a min/max of -1 to 1. """
def __init__(self):
self.fitted = False
def fit(self,X):
""" Fit scaler to data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to fit scaler to.
"""
if X.ndim == 1: X = X.reshape(-1,1)
self.Xmin = np.min(X,axis=0)
self.Xmax = np.max(X,axis=0)
self.fitted = True
def transform(self,X):
""" Transforms data. Calls :meth:`~equadratures.scalers.scaler_minmax.fit` fit internally if scaler not already fitted.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to transform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing transformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted: self.fit(X)
Xtrans = 2.0 * ( (X[:,:]-self.Xmin)/(self.Xmax - self.Xmin) ) - 1.0
return Xtrans
def untransform(self,X):
""" Untransforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to untransform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing untransformed data.
Raises
------
Exception
scaler has not been fitted
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted:
raise Exception('scaler has not been fitted')
Xuntrans = 0.5*(X[:,:]+1)*(self.Xmax - self.Xmin) + self.Xmin
return Xuntrans
class scaler_meanvar(object):
"""
Scale the data to have a mean of 0 and variance of 1.
"""
def __init__(self):
self.fitted = False
def fit(self,X):
""" Fit scaler to data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to fit scaler to.
"""
if X.ndim == 1: X = X.reshape(-1,1)
self.Xmean = np.mean(X,axis=0)
self.Xstd = np.std(X,axis=0)
self.fitted = True
def transform(self,X):
""" Transforms data. Calls :meth:`~equadratures.scalers.scaler_meanvar.fit` fit internally if scaler not already fitted.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to transform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing transformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted: self.fit(X)
eps = np.finfo(np.float64).tiny
Xtrans = (X[:,:]-self.Xmean)/(self.Xstd+eps)
return Xtrans
def untransform(self,X):
""" Untransforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to untransform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing untransformed data.
Raises
------
Exception
scaler has not been fitted
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted:
raise Exception('scaler has not been fitted')
eps = np.finfo(np.float64).tiny
Xuntrans = X[:,:]*(self.Xstd+eps) + self.Xmean
return Xuntrans
class scaler_custom(object):
""" Scale the data by the provided offset and divisor.
Parameters
----------
offset : float, numpy.ndarray
Offset to subtract from data. Either a float, or array with shape (number_of_samples, number_of_dimensions).
div : float, numpy.ndarray
Divisor to divide data with. Either a float, or array with shape (number_of_samples, number_of_dimensions).
"""
def __init__(self, offset, div):
self.offset = offset
self.div = div
self.fitted = True
def transform(self,X):
""" Transforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to transform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing transformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
eps = np.finfo(np.float64).tiny
Xtrans = (X - self.offset)/(self.div + eps)
return Xtrans
def untransform(self,X):
""" Untransforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to untransform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing untransformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
eps = np.finfo(np.float64).tiny
Xuntrans = X * (self.div + eps) + self.offset
return Xuntrans
|
lgpl-2.1
| 3,159,541,299,082,102,300
| 31.425339
| 128
| 0.571588
| false
| 3.773565
| true
| false
| false
|
cloudconductor/cloud_conductor_gui
|
gui_app/views/applicationDeployViews.py
|
1
|
8093
|
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, render_to_response
import ast
from ..forms import selecttForm
from ..forms import applicationForm
from ..utils import ApplicationUtil
from ..utils import ApplicationHistoryUtil
from ..utils import EnvironmentUtil
from ..utils import StringUtil
from ..utils.PathUtil import Path
from ..utils.PathUtil import Html
from ..enum.FunctionCode import FuncCode
from ..enum.ApplicationType import ApplicaionType
from ..enum.ProtocolType import ProtocolType
from ..utils import SessionUtil
from ..utils import SystemUtil
from ..logs import log
def applicationSelect(request):
try:
session = request.session
code = FuncCode.appDep_application.value
token = session.get('auth_token')
project_id = session.get('project_id')
application = ''
list = ''
list = ApplicationUtil.get_application_version(
code, token, project_id)
if request.method == "GET":
application = request.session.get('w_app_select')
return render(request, Html.appdeploy_applicationSelect,
{'list': list, 'application': application,
'message': ''})
elif request.method == "POST":
param = request.POST
# -- Session add
application = selectPut(param)
form = selecttForm(application)
if not form.is_valid():
return render(request, Html.appdeploy_applicationSelect,
{'list': list, 'application': application,
'form': form, 'message': ''})
request.session['w_app_select'] = application
return redirect(Path.appdeploy_environmentSelect)
except Exception as ex:
log.error(FuncCode.appDep_application.value, None, ex)
return render(request, Html.appdeploy_applicationSelect,
{'list': list, 'application': application,
'message': str(ex)})
def applicationCreate(request):
code = FuncCode.applicationCreate.value
apptype = list(ApplicaionType)
protocol = list(ProtocolType)
systems = None
try:
if not SessionUtil.check_login(request):
return redirect(Path.logout)
if not SessionUtil.check_permission(request, 'application', 'create'):
return render_to_response(Html.error_403)
token = request.session['auth_token']
project_id = request.session['project_id']
systems = SystemUtil.get_system_list2(code, token, project_id)
if request.method == "GET":
return render(request, Html.appdeploy_applicationCreate,
{'app': '', 'history': '', 'apptype': apptype,
'protocol': protocol, 'message': '',
'systems': systems, 'save': True})
else:
# -- Get a value from a form
p = request.POST
cpPost = p.copy()
# -- Validate check
form = applicationForm(p)
form.full_clean()
if not form.is_valid():
return render(request, Html.appdeploy_applicationCreate,
{'app': cpPost, 'history': cpPost,
'apptype': apptype,
'protocol': protocol, 'form': form,
'message': '', 'systems': systems,
'save': True})
# -- 1.Create a application, api call
app = ApplicationUtil.create_application(code, token, form.data)
# -- 2.Create a applicationhistory, api call
ApplicationHistoryUtil.create_history(
code, token, app.get('id'), form.data)
request.session['w_app_select'] = {"id": app.get("id"),
"name": app.get("name")}
return redirect(Path.appdeploy_environmentSelect)
except Exception as ex:
log.error(FuncCode.applicationCreate.value, None, ex)
return render(request, Html.appdeploy_applicationCreate,
{'app': request.POST, 'history': request.POST,
'apptype': apptype,
'protocol': protocol, 'form': '',
'systems': systems, 'message': str(ex), 'save': True})
def environmentSelect(request):
list = ''
try:
code = FuncCode.appDep_environment.value
session = request.session
environment = session.get('w_env_select')
token = session['auth_token']
project_id = session['project_id']
app = ApplicationUtil.get_application_detail(
code, token, session.get('w_app_select').get('id'))
list = EnvironmentUtil.get_environment_list_system_id(
code, token, project_id, app.get("system_id"))
if request.method == "GET":
return render(request, Html.appdeploy_environmentSelect,
{"list": list, 'environment': environment,
'message': ''})
elif request.method == "POST":
param = request.POST
environment = selectPut(param)
form = selecttForm(environment)
if not form.is_valid():
return render(request, Html.appdeploy_environmentSelect,
{"list": list, 'environment': environment,
'form': form,
'message': ''})
request.session['w_env_select'] = environment
return redirect(Path.appdeploy_confirm)
except Exception as ex:
log.error(FuncCode.appDep_environment.value, None, ex)
return render(request, Html.appdeploy_environmentSelect,
{"list": '', 'environment': '', 'message': str(ex)})
def confirm(request):
try:
code = FuncCode.appDep_confirm.value
session = request.session
app_session = session.get('w_app_select')
env_session = session.get('w_env_select')
if request.method == "GET":
return render(request, Html.appdeploy_confirm,
{'application': app_session,
'environment': env_session, 'message': ''})
elif request.method == "POST":
session = request.session
code = FuncCode.newapp_confirm.value
token = session.get('auth_token')
env_id = env_session.get('id')
app_id = app_session.get('id')
# -- application deploy
ApplicationUtil.deploy_application(code, token, env_id, app_id)
# -- session delete
sessionDelete(session)
return redirect(Path.top)
except Exception as ex:
log.error(FuncCode.newapp_confirm.value, None, ex)
session = request.session
return render(request, Html.appdeploy_confirm,
{"application": session.get('application'),
'environment': session.get('environment'),
'message': str(ex)})
def selectPut(req):
if StringUtil.isEmpty(req):
return None
select_param = req.get('id', None)
if StringUtil.isNotEmpty(select_param):
select_param = ast.literal_eval(select_param)
param = {
'id': str(select_param.get('id')),
'name': select_param.get('name'),
}
return param
else:
return select_param
def putBlueprint(param):
blueprint = param.get('blueprint', None)
if not (blueprint is None) and not (blueprint == ''):
blueprint = ast.literal_eval(blueprint)
param['blueprint_id'] = blueprint.get('id')
param['version'] = blueprint.get('version')
return param
def sessionDelete(session):
if 'w_env_select' in session:
del session['w_env_select']
if 'w_app_select' in session:
del session['w_app_select']
|
apache-2.0
| 7,722,206,676,188,103,000
| 34.034632
| 78
| 0.561596
| false
| 4.503617
| false
| false
| false
|
dan-cristian/haiot
|
gpio/io_common/__init__.py
|
1
|
3453
|
from common import Constant
from storage.model import m
from main.logger_helper import L
import abc
from common import utils
# update in db (without propagatting the change by default)
def update_custom_relay(pin_code, pin_value, notify=False, ignore_missing=False):
relay = m.ZoneCustomRelay.find_one({m.ZoneCustomRelay.gpio_pin_code: pin_code,
m.ZoneCustomRelay.gpio_host_name: Constant.HOST_NAME})
if relay is not None:
relay.relay_is_on = pin_value
relay.save_changed_fields(broadcast=notify)
L.l.info('Updated relay {} val={}'.format(pin_code, pin_value))
else:
if not ignore_missing:
L.l.warning('Unable to find relay pin {}'.format(pin_code))
# update in db (without propagatting the change by default)
def update_listener_custom_relay(relay, is_on):
relay.relay_is_on = is_on
relay.save_changed_fields(broadcast=True)
L.l.info('Updated listener relay {} val={}'.format(relay, is_on))
class Port:
_port_list = []
type = None
TYPE_GPIO = 'gpio'
TYPE_PIFACE = 'piface'
TYPE_PCF8574 = 'pcf8574'
_types = frozenset([TYPE_GPIO, TYPE_PIFACE, TYPE_PCF8574])
def __init__(self):
pass
class OutputPort(Port):
def __init__(self):
pass
class InputPort(Port):
def __init__(self):
pass
class IOPort(InputPort, OutputPort):
def __init__(self):
pass
class GpioBase:
__metaclass__ = abc.ABCMeta
@staticmethod
@abc.abstractmethod
def get_current_record(record):
return None, None
@staticmethod
@abc.abstractmethod
def get_db_record(key):
return None
def record_update(self, record, changed_fields):
# record = utils.json_to_record(self.obj, json_object)
current_record, key = self.get_current_record(record)
if current_record is not None:
new_record = self.obj()
kwargs = {}
for field in changed_fields:
val = getattr(record, field)
# setattr(new_record, field, val)
kwargs[field] = val
if record.host_name == Constant.HOST_NAME and record.source_host != Constant.HOST_NAME:
# https://stackoverflow.com/questions/1496346/passing-a-list-of-kwargs
self.set(key, **kwargs)
# do nothing, action done already as it was local
# save will be done on model.save
# record.save_changed_fields()
@staticmethod
@abc.abstractmethod
def set(key, values):
pass
@staticmethod
@abc.abstractmethod
def save(key, values):
pass
@staticmethod
@abc.abstractmethod
def get(key):
return None
@staticmethod
@abc.abstractmethod
def sync_to_db(key):
pass
@staticmethod
@abc.abstractmethod
def unload():
pass
def __init__(self, obj):
self.obj = obj
def format_piface_pin_code(board_index, pin_direction, pin_index):
return str(board_index) + ":" + str(pin_direction) + ":" + str(pin_index)
# port format is x:direction:y, e.g. 0:in:3, x=board, direction=in/out, y=pin index (0 based)
def decode_piface_pin(pin_code):
ar = pin_code.split(':')
if len(ar) == 3:
return int(ar[0]), ar[1], int(ar[2])
else:
L.l.error('Invalid piface pin code {}'.format(pin_code))
return None, None, None
|
gpl-2.0
| 1,524,281,052,770,179,800
| 25.775194
| 99
| 0.609036
| false
| 3.638567
| false
| false
| false
|
pimoroni/unicorn-hat-hd
|
examples/show-png.py
|
1
|
1382
|
#!/usr/bin/env python
import time
from sys import exit
try:
from PIL import Image
except ImportError:
exit('This script requires the pillow module\nInstall with: sudo pip install pillow')
import unicornhathd
print("""Unicorn HAT HD: Show a PNG image!
This basic example shows use of the Python Pillow library.
The tiny 16x16 bosses in lofi.png are from Oddball:
http://forums.tigsource.com/index.php?topic=8834.0
Licensed under Creative Commons Attribution-Noncommercial-Share Alike 3.0
Unported License.
Press Ctrl+C to exit!
""")
unicornhathd.rotation(0)
unicornhathd.brightness(0.6)
width, height = unicornhathd.get_shape()
img = Image.open('lofi.png')
try:
while True:
for o_x in range(int(img.size[0] / width)):
for o_y in range(int(img.size[1] / height)):
valid = False
for x in range(width):
for y in range(height):
pixel = img.getpixel(((o_x * width) + y, (o_y * height) + x))
r, g, b = int(pixel[0]), int(pixel[1]), int(pixel[2])
if r or g or b:
valid = True
unicornhathd.set_pixel(x, y, r, g, b)
if valid:
unicornhathd.show()
time.sleep(0.5)
except KeyboardInterrupt:
unicornhathd.off()
|
mit
| -8,984,369,968,961,083,000
| 24.592593
| 89
| 0.583213
| false
| 3.30622
| false
| false
| false
|
stuartlangridge/raspi-recorder
|
listener_daemon.py
|
1
|
2508
|
import threading, time, subprocess
from bluetooth import *
server_sock=BluetoothSocket( RFCOMM )
server_sock.bind(("",PORT_ANY))
server_sock.listen(1)
port = server_sock.getsockname()[1]
uuid = "c3091f5f-7e2f-4908-b628-18231dfb5034"
advertise_service( server_sock, "PiRecorder",
service_id = uuid,
service_classes = [ uuid, SERIAL_PORT_CLASS ],
profiles = [ SERIAL_PORT_PROFILE ],
)
print "Waiting for connection on RFCOMM channel %d" % port
client_sock, client_info = server_sock.accept()
print "Accepted connection from ", client_info
lock = threading.Lock()
def mainthread(sock):
try:
with lock:
sock.send("\r\nWelcome to recorder. [1] start recording, [2] stop.\r\n\r\n")
while True:
data = sock.recv(1)
if len(data) == 0: break
if data == "1":
with lock:
sock.send("Starting sound recorder\r\n")
os.system("supervisorctl -c ./supervisor.conf start sound_recorder")
elif data == "2":
with lock:
sock.send("Stopping sound recorder\r\n")
os.system("supervisorctl -c ./supervisor.conf stop sound_recorder")
else:
print "received [%s]" % data
with lock:
output = "unrecognised [%s]\r\n" % (data,)
sock.send(output)
except IOError:
print "got io error"
def heartbeat(sock):
while True:
time.sleep(5)
o = subprocess.check_output(["supervisorctl", "-c",
os.path.join(os.path.split(__file__)[0], "supervisor.conf"),
"status"])
procs = {}
for parts in [x.split() for x in o.split("\n")]:
if len(parts) > 1:
procs[parts[0]] = parts[1]
sr = procs.get("sound_recorder", "ABSENT")
svfs = os.statvfs(".")
bytes_remaining = svfs.f_frsize * svfs.f_bavail
bytes_total = svfs.f_frsize * svfs.f_blocks
with lock:
sock.send("heartbeat %s %s %s\r\n" % (
sr, bytes_remaining, bytes_total))
mainthread = threading.Thread(target=mainthread, args=(client_sock,))
mainthread.start()
heartbeatthread = threading.Thread(target=heartbeat, args=(client_sock,))
heartbeatthread.setDaemon(True)
heartbeatthread.start()
mainthread.join()
print "disconnected"
client_sock.close()
server_sock.close()
print "all done"
|
mit
| 294,923,898,243,346,560
| 33.356164
| 88
| 0.570175
| false
| 3.582857
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/communication/azure-communication-sms/azure/communication/sms/_models/_models.py
|
1
|
2064
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class SmsSendResult(msrest.serialization.Model):
"""Response for a single recipient.
All required parameters must be populated in order to send to Azure.
:param to: Required. The recipient's phone number in E.164 format.
:type to: str
:param message_id: The identifier of the outgoing Sms message. Only present if message
processed.
:type message_id: str
:param http_status_code: Required. HTTP Status code.
:type http_status_code: int
:param successful: Required. Indicates if the message is processed successfully or not.
:type successful: bool
:param error_message: Optional error message in case of 4xx/5xx/repeatable errors.
:type error_message: str
"""
_validation = {
'to': {'required': True},
'http_status_code': {'required': True},
'successful': {'required': True},
}
_attribute_map = {
'to': {'key': 'to', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'int'},
'successful': {'key': 'successful', 'type': 'bool'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SmsSendResult, self).__init__(**kwargs)
self.to = kwargs['to']
self.message_id = kwargs.get('message_id', None)
self.http_status_code = kwargs['http_status_code']
self.successful = kwargs['successful']
self.error_message = kwargs.get('error_message', None)
|
mit
| 8,139,079,774,969,169,000
| 37.943396
| 94
| 0.593508
| false
| 4.212245
| false
| false
| false
|
twoolie/ProjectNarwhal
|
narwhal/core/profile/admin.py
|
1
|
1124
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail.admin import AdminImageMixin
from treebeard.admin import TreeAdmin
from models import Profile
class ProfileAdmin(AdminImageMixin, admin.ModelAdmin):
search_fields = ('user__username', 'extra_data')
list_display = (#'user__username', 'user__date_joined',
'user', 'uploaded', 'downloaded',)
list_filter = ('user__is_staff',)
fields = ('user', 'avatar', 'key', 'downloaded', 'uploaded' )
#fieldsets = (
# (None, {
# 'fields': ( ('title', 'slug'),
# ('user', 'comments_enabled'),
# 'description', )
# }),
# (_('Files'), {
# 'fields': ( ('torrent', 'image'), ),
# }),
#(_('Quick Stats'), {
# 'classes': ('collapse',),
# 'fields': ( ('size', 'files'),
# ('seeders', 'leechers'),
# ('pub_date', 'comment_count'), )
#}),
#)
admin.site.register(Profile, ProfileAdmin)
|
gpl-3.0
| 3,874,579,077,347,851,300
| 31.114286
| 65
| 0.508007
| false
| 3.849315
| false
| false
| false
|
cappatar/knesset-data-pipelines
|
datapackage_pipelines_knesset/members/processors/load_members.py
|
1
|
1419
|
from datapackage_pipelines_knesset.common.base_processors.add_resource import AddResourceBaseProcessor
# only loads members with the following positionId:
SUPPORTED_POSITION_IDS = [43, 61]
class Processor(AddResourceBaseProcessor):
def _get_schema(self, resource_descriptor):
return resource_descriptor.get("schema", {
"fields": [
{"name": "url", "type": "string", "description": "url to download protocol from"},
{
"name": "kns_person_id", "type": "integer",
"description": "primary key from kns_person table"}
],
"primaryKey": ["kns_person_id"]
})
def _get_new_resource(self):
person_table = self.db_meta.tables.get("kns_person")
persontoposition_table = self.db_meta.tables.get("kns_persontoposition")
if person_table is None or persontoposition_table is None:
raise Exception("processor requires kns person tables to exist")
for db_row in self.db_session\
.query(person_table, persontoposition_table)\
.filter(persontoposition_table.p.PersonID==person_table.p.PersonID)\
.filter(persontoposition_table.p.PositionID.in_(SUPPORTED_POSITION_IDS))\
.all():
row = db_row._asdict()
yield {"kns_person_id": row["PersonID"]}
if __name__ == "__main__":
Processor.main()
|
mit
| -2,604,610,188,792,806,400
| 40.735294
| 102
| 0.613813
| false
| 3.898352
| false
| false
| false
|
valentinmetraux/hierophis
|
hierophis/maths/statistics/basic.py
|
1
|
2080
|
#!/usr/bin/env python
# -*- coding: utf 8 -*-
"""
Utility functions.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import numpy as np
import scipy.signal
def rms(a):
"""
Calculates the RMS of an array.
:param a: An array.
:returns: The RMS of the array.
"""
return np.sqrt(np.sum(a**2.0)/a.size)
def normalize(a, new_min=0.0, new_max=1.0):
"""
Normalize an array to [0,1] or to
arbitrary new min and max.
:param a: An array.
:param new_min: A float to be the new min, default 0.
:param new_max: A float to be the new max, default 1.
:returns: The normalized array.
"""
n = (a - np.amin(a)) / float(np.amax(a - np.amin(a)))
return n * (new_max - new_min) + new_min
def moving_average(a, method = "convolve", length=9, mode='valid'):
"""
Computes the mean in a moving window.
Methods: naive, fft, convolve
Length: Kernel length
Modes: full, valid, same
"""
if method == "fft":
boxcar = np.ones(length)/length
return scipy.signal.fftconvolve(a, boxcar, mode="valid")
elif method == "convolve":
boxcar = np.ones(length)/length
return np.convolve(a, boxcar, mode="valid")
else:
pad = np.floor(length/2)
if mode == 'full':
pad *= 2
# Make a padded version, paddding with first and last values
r = np.empty(a.shape[0] + 2*pad)
r[:pad] = a[0]
r[pad:-pad] = a
r[-pad:] = a[-1]
# Cumsum with shifting trick
s = np.cumsum(r, dtype=float)
s[length:] = s[length:] - s[:-length]
out = s[length-1:]/length
# Decide what to return
if mode == 'same':
if out.shape[0] != a.shape[0]:
# If size doesn't match, then interpolate.
out = (out[:-1, ...] + out[1:, ...]) / 2
return out
elif mode == 'valid':
return out[pad:-pad]
else: # mode=='full' and we used a double pad
return out
|
apache-2.0
| -7,067,858,668,146,052,000
| 23.77381
| 68
| 0.533173
| false
| 3.338684
| false
| false
| false
|
all-of-us/raw-data-repository
|
tests/api_tests/test_ppi_data_check_api.py
|
1
|
2788
|
from rdr_service.model.code import CodeType
from tests.helpers.unittest_base import BaseTestCase
class CheckPpiDataApiTest(BaseTestCase):
def setUp(self):
super(CheckPpiDataApiTest, self).setUp(with_consent_codes=True)
self.participant_summary = self.data_generator.create_database_participant_summary(email='test@example.com')
questions_and_answers = [
('first_question_code', 'first_answer_code'),
('Second_CODE', 'ANOTHER_ANSWER'),
('LAST_CODE', 'Final_Answer|with_additional_option')
]
questionnaire = self.data_generator.create_database_questionnaire_history()
for question_code_value, _ in questions_and_answers:
question_code = self.data_generator.create_database_code(
value=question_code_value,
codeType=CodeType.QUESTION
)
self.data_generator.create_database_questionnaire_question(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
codeId=question_code.codeId
)
questionnaire_response = self.data_generator.create_database_questionnaire_response(
participantId=self.participant_summary.participantId,
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version
)
for question_index, (_, answer_code_values) in enumerate(questions_and_answers):
question = questionnaire.questions[question_index]
for answer_value in answer_code_values.split('|'):
answer_code = self.data_generator.create_database_code(value=answer_value)
self.data_generator.create_database_questionnaire_response_answer(
questionnaireResponseId=questionnaire_response.questionnaireResponseId,
questionId=question.questionnaireQuestionId,
valueCodeId=answer_code.codeId
)
def test_case_insensitive_answer_code_matching(self):
"""Make sure case doesn't matter when matching answer codes against what the server has"""
ppi_check_payload = {
'ppi_data': {
self.participant_summary.email: {
'fIrSt_QuEsTiOn_CoDe': 'First_Answer_Code',
'SECOND_CODE': 'another_answer',
'last_code': 'Final_ANSWER|WITH_ADDITIONAL_OPTION'
}
}
}
response = self.send_post('CheckPpiData', ppi_check_payload)
response_error_count = response['ppi_results']['test@example.com']['errors_count']
self.assertEqual(0, response_error_count, 'Differences in case should not cause errors')
|
bsd-3-clause
| -8,436,569,744,233,131,000
| 45.466667
| 116
| 0.638451
| false
| 4.173653
| true
| false
| false
|
wooga/airflow
|
airflow/example_dags/example_nested_branch_dag.py
|
1
|
2019
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG demonstrating a workflow with nested branching. The join tasks are created with
``none_failed_or_skipped`` trigger rule such that they are skipped whenever their corresponding
``BranchPythonOperator`` are skipped.
"""
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.utils.dates import days_ago
with DAG(dag_id="example_nested_branch_dag", start_date=days_ago(2), schedule_interval="@daily") as dag:
branch_1 = BranchPythonOperator(task_id="branch_1", python_callable=lambda: "true_1")
join_1 = DummyOperator(task_id="join_1", trigger_rule="none_failed_or_skipped")
true_1 = DummyOperator(task_id="true_1")
false_1 = DummyOperator(task_id="false_1")
branch_2 = BranchPythonOperator(task_id="branch_2", python_callable=lambda: "true_2")
join_2 = DummyOperator(task_id="join_2", trigger_rule="none_failed_or_skipped")
true_2 = DummyOperator(task_id="true_2")
false_2 = DummyOperator(task_id="false_2")
false_3 = DummyOperator(task_id="false_3")
branch_1 >> true_1 >> join_1
branch_1 >> false_1 >> branch_2 >> [true_2, false_2] >> join_2 >> false_3 >> join_1
|
apache-2.0
| -1,070,214,598,085,398,900
| 47.071429
| 104
| 0.740961
| false
| 3.631295
| false
| false
| false
|
mojolab/LivingData
|
lib/livdatops.py
|
1
|
1153
|
import pandas
def getColRenameDict(mergersheet,sheet):
colrenamedict={}
originalcolnames=mergersheet[sheet].fillna("NA")
newcolnames=mergersheet[mergersheet.columns[0]]
for i in range(0,len(originalcolnames)):
colrenamedict[originalcolnames[i]]=newcolnames[i]
# if originalcolnames[i]!="NA":
# colrenamedict[originalcolnames[i]]=newcolnames[i]
return colrenamedict
def createMergedDFList(dflist,mergersheetname):
altereddfs={}
for sheet,matrix in dflist.iteritems():
if sheet == mergersheetname:
altereddfs[sheet]=matrix
mergersheet=matrix
else:
df=matrix
print df.columns
columnrenamedict=getColRenameDict(mergersheet,sheet)
print columnrenamedict
altereddf=df.rename(columns=columnrenamedict)
for key,value in columnrenamedict.iteritems():
if key =="NA":
altereddf[value]=0
print df,altereddf
altereddfs[sheet]=altereddf
finalsheet=[]
for sheet,matrix in altereddfs.iteritems():
if sheet!=mergersheetname:
finalsheet.append(matrix.fillna(0))
finalsheetm=pandas.concat(finalsheet)
finalsheetname=mergersheet.columns.values[0]
altereddfs[finalsheetname]=finalsheetm
return altereddfs
|
apache-2.0
| -1,906,478,291,639,418,000
| 30.162162
| 55
| 0.774501
| false
| 2.987047
| false
| false
| false
|
CSC-IT-Center-for-Science/pouta-blueprints
|
pebbles/views/authorize_instances.py
|
1
|
3153
|
from flask import abort, request, Response, Blueprint
import datetime
import logging
import re
from pebbles.models import InstanceToken
from pebbles.server import restful
authorize_instances = Blueprint('authorize_instances', __name__)
class AuthorizeInstancesView(restful.Resource):
def get(self):
token = ''
instance_id = ''
# The idea here is to check if the original-token and instance-id headers are already present, sent by the nginx proxy of the openshift app,
# if the headers are present that means the authentication had taken place previously and a cookie exists for the openshift app,
# in this case - obtain the info contained in the headers
if 'ORIGINAL-TOKEN' in request.headers and 'INSTANCE-ID' in request.headers:
token = request.headers['ORIGINAL-TOKEN']
instance_id = request.headers['INSTANCE-ID']
# otherwise, the x-original-uri consists of the query string info (which is sent by the openshift driver to the nginx of the openshift app)
# The query string has the token info and instance id
# NOTE: This is only used when the authentication is being done for the first time!
elif 'X-ORIGINAL-URI' in request.headers:
h_uri = request.headers['X-ORIGINAL-URI']
regex_query_capture = re.search('.*\\?(.*)=(.*)&(.*)=(.*)', h_uri) # parse the query string
if regex_query_capture and len(regex_query_capture.groups()) == 4:
if regex_query_capture.group(1) == 'token' and regex_query_capture.group(3) == 'instance_id':
token = regex_query_capture.group(2)
instance_id = regex_query_capture.group(4)
elif regex_query_capture.group(1) == 'instance_id' and regex_query_capture.group(3) == 'token':
instance_id = regex_query_capture.group(2)
token = regex_query_capture.group(4)
if not token and not instance_id:
logging.warn('No instance token or id found from the headers')
return abort(401)
instance_token_obj = InstanceToken.query.filter_by(token=token).first()
if not instance_token_obj:
logging.warn("instance token object %s not found" % token)
return abort(401)
curr_time = datetime.datetime.utcnow()
expires_on = instance_token_obj.expires_on
if curr_time > expires_on:
logging.warn("instance token %s has expired" % token)
return abort(403)
if instance_token_obj.instance_id != instance_id:
logging.warn("instance id %s from the token does not match the instance_id %s passed as a parameter" % (instance_token_obj.instance_id, instance_id))
return abort(403)
resp = Response("Authorized")
# send the headers back to nginx proxy running on the openshift based instance,
# which is going to store it as a cookie for the next time, the authorization takes place
resp.headers["TOKEN"] = instance_token_obj.token
resp.headers["INSTANCE-ID"] = instance_id
return resp
|
mit
| 2,302,791,649,978,159,000
| 49.047619
| 161
| 0.64732
| false
| 4.121569
| false
| false
| false
|
weiqiangdragonite/blog_tmp
|
python/baidu/myip.py
|
1
|
1085
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# http://ip.taobao.com/instructions.php
import socket
#
common_headers = \
"Host: ip.taobao.com\r\n" + \
"Connection: Keep-Alive\r\n" + \
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + \
"User-Agent: Mozilla/5.0 (X11; Linux) AppleWebKit/538.1 (KHTML, like Gecko) Chrome/18.0.1025.133 Safari/538.1 Midori/0.5\r\n" + \
"Accept-Language: en-us;q=0.750\r\n"
# 通过 GET
get_headers = \
"GET /service/getIpInfo.php?ip=myip HTTP/1.1\r\n" + \
common_headers + \
"\r\n"
# 通过 POST
post_headers = \
"POST /service/getIpInfo2.php HTTP/1.1\r\n" + \
common_headers + \
"Content-Length: 7\r\n" + \
"\r\n" + \
"ip=myip";
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("ip.taobao.com", 80))
s.send(get_headers)
buffer = []
while True:
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
data = ''.join(buffer)
s.close()
print data
|
gpl-2.0
| -721,930,583,678,637,200
| 21.93617
| 133
| 0.563603
| false
| 2.51049
| false
| false
| false
|
dhanababum/accessdb
|
accessdb/utils.py
|
1
|
8395
|
# -*- coding: utf-8 -*-
# Copyright 2017 Dhana Babu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import string
import tempfile
import shutil
import pypyodbc as odbc
from .access_api import create
_MS_ACCESS_TYPES = {
'BIT',
'BYTE',
'SHORT',
'LONG',
'CURRENCY',
'SINGLE',
'DOUBLE',
'DATETIME',
'TEXT',
'MEMO',
'PRIMARY', # CUSTOM Type for handling AUTOINCREMENT
}
SCHEMA_FILE = 'schema.ini'
_TEXT_SEPARATORS = {
r',': 'CSVDelimited',
r'\t': 'TabDelimited'
}
def _text_formater(sep):
separator = _TEXT_SEPARATORS.get(sep, 'Delimited({})')
return separator.format(sep)
def _stringify_path(db_path):
dtr, path = os.path.split(db_path)
if dtr == '':
db_path = os.path.join('.', path)
return db_path
def _push_access_db(temp_dir, text_file, data_columns,
header_columns, dtype, path, table_name, sep,
append, overwrite, delete='file'):
table = Table(temp_dir, text_file,
table_name,
data_columns,
header_columns,
dtype, sep, append)
schema_file = os.path.join(temp_dir, SCHEMA_FILE)
try:
with SchemaWriter(temp_dir, text_file, data_columns,
header_columns, dtype, sep, schema_file) as schema:
schema.write()
with AccessDBConnection(path, overwrite) as con:
cursor = con.cursor()
if not append:
cursor.execute(table.create_query())
cursor.execute(table.insert_query())
con.commit()
finally:
if delete == 'folder':
shutil.rmtree(temp_dir)
else:
os.unlink(schema_file)
def _get_random_file():
return ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
class DataTypeNotFound(Exception):
pass
class SchemaWriter(object):
def __init__(self, temp_dir, text_file, df_columns,
columns, dtype, sep, schema_file):
self.temp_dir = temp_dir
self.text_file = text_file
self.df_columns = df_columns
self.columns = columns
self.dtype = dtype
self.sep = sep
self.path = schema_file
def __enter__(self):
self.fp = open(self.path, 'w')
return self
def __exit__(self, *args):
self.fp.close()
def formater(self):
yield '[%s]' % self.text_file
yield 'ColNameHeader=True'
yield 'Format=%s' % _text_formater(self.sep)
self.dcols = {col: ('Col%s' % (i + 1))
for i, col in enumerate(self.df_columns)}
if not isinstance(self.dtype, dict):
self.dtype = {}
for col in self.df_columns:
ctype = self.dtype.get(col, 'text').upper()
if ctype not in _MS_ACCESS_TYPES:
raise DataTypeNotFound(
'Provided Data Type Not Found %s' % ctype)
if ctype == 'PRIMARY':
ctype = 'TEXT'
yield '{c_col}="{d_col}" {c_type}'.format(
c_col=self.dcols[col],
d_col=col,
c_type=ctype.capitalize())
def write(self):
for line in self.formater():
self.fp.write(line)
self.fp.write('\n')
class Table(object):
def __init__(self, temp_dir, text_file,
table_name, df_columns, columns,
dtype, sep, append):
self.temp_dir = temp_dir
self.text_file = text_file
self.df_columns = df_columns
self.table_name = table_name
self.df_columns = df_columns
self.columns = columns
self.dtype = dtype
self.sep = sep
self.append = append
if not isinstance(self.dtype, dict):
self.dtype = {}
def _get_colunm_type(self, col):
ctype = self.dtype.get(col, 'TEXT').upper()
if ctype not in _MS_ACCESS_TYPES:
raise Exception
return ctype
def formater(self):
for col in self.df_columns:
c_type = self._get_colunm_type(col)
if c_type == 'PRIMARY':
c_type = 'AUTOINCREMENT PRIMARY KEY'
if self.columns:
if col not in self.columns:
continue
col = self.columns[col]
yield '`{c_col}` {c_type}'.format(c_col=col,
c_type=c_type)
def insert_formater(self):
for col in self.df_columns:
if self._get_colunm_type(col) == 'PRIMARY':
continue
if not self.columns:
self.columns = dict(zip(self.df_columns, self.df_columns))
if self.columns:
if col not in self.columns:
continue
cus_col = self.columns[col]
yield col, cus_col
def built_columns(self):
return '(%s)' % ','.join(self.formater())
def create_query(self):
return "CREATE TABLE `{table_name}`{columns}".format(
table_name=self.table_name,
columns=self.built_columns())
@staticmethod
def required_columns(cols):
return ','.join('`%s`' % c for c in cols)
def insert_query(self):
custom_columns = []
columns = []
for col1, col2 in self.insert_formater():
columns.append(col1)
custom_columns.append(col2)
return """
INSERT INTO `{table_name}`({columns})
SELECT {required_cols} FROM [TEXT;HDR=YES;FMT={separator};
Database={temp_dir}].{text_file}
""".format(temp_dir=self.temp_dir,
text_file=self.text_file,
columns=self.required_columns(custom_columns),
required_cols=self.required_columns(columns),
table_name=self.table_name,
separator=_text_formater(self.sep))
class AccessDBConnection(object):
def __init__(self, db_path, overwrite):
self.overwrite = overwrite
self.db_path = _stringify_path(db_path)
def __enter__(self):
if not os.path.isfile(self.db_path) or self.overwrite:
create(self.db_path)
odbc_conn_str = '''DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};
DBQ=%s''' % (self.db_path)
self.con = odbc.connect(odbc_conn_str)
return self.con
def __exit__(self, *args):
self.con.close()
def to_accessdb(self, path, table_name,
header_columns=None, dtype='str', engine='text',
sep=',', append=False, overwrite=False):
if self.empty:
return
temp_dir = tempfile.mkdtemp()
text_file = '%s.txt' % _get_random_file()
text_path = os.path.join(temp_dir, text_file)
self.to_csv(text_path, index=False)
_push_access_db(temp_dir, text_file,
self.columns.tolist(),
header_columns, dtype, path, table_name,
sep, append, overwrite, 'folder')
def create_accessdb(path, text_path, table_name,
header_columns=None, dtype='str',
engine='text', sep=',', append=False, overwrite=False):
temp_dir, text_file = os.path.split(os.path.abspath(text_path))
with open(text_path) as fp:
file_columns = fp.readline().strip('\n').split(sep)
_push_access_db(temp_dir, text_file,
file_columns,
header_columns, dtype, path, table_name,
sep, append, overwrite)
|
apache-2.0
| 6,640,736,085,775,844,000
| 31.921569
| 79
| 0.534485
| false
| 3.901022
| false
| false
| false
|
jimsize/PySolFC
|
pysollib/games/harp.py
|
1
|
13061
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.mfxutil import kwdefault
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import CautiousDefaultHint
from pysollib.hint import KlondikeType_Hint
from pysollib.games.spider import Spider_RowStack, Spider_SS_Foundation, \
Spider_Hint
from pysollib.util import ACE, KING
from pysollib.stack import \
AC_RowStack, \
BO_RowStack, \
KingAC_RowStack, \
SS_FoundationStack, \
Spider_SS_RowStack, \
StackWrapper, \
WasteStack, \
WasteTalonStack, \
SS_RowStack
# ************************************************************************
# * Double Klondike (Klondike with 2 decks and 9 rows)
# ************************************************************************
class DoubleKlondike(Game):
Layout_Method = staticmethod(Layout.harpLayout)
Foundation_Class = SS_FoundationStack
RowStack_Class = KingAC_RowStack
Hint_Class = KlondikeType_Hint
def createGame(self, max_rounds=-1, num_deal=1, **layout):
# create layout
l, s = Layout(self), self.s
kwdefault(layout, rows=9, waste=1, texts=1, playcards=19)
self.Layout_Method(l, **layout)
self.setSize(l.size[0], l.size[1])
# create stacks
s.talon = WasteTalonStack(l.s.talon.x, l.s.talon.y, self,
max_rounds=max_rounds, num_deal=num_deal)
s.waste = WasteStack(l.s.waste.x, l.s.waste.y, self)
for r in l.s.foundations:
s.foundations.append(
self.Foundation_Class(r.x, r.y, self, suit=r.suit))
for r in l.s.rows:
s.rows.append(self.RowStack_Class(r.x, r.y, self))
# default
l.defaultAll()
# extra
if max_rounds > 1:
anchor = 'nn'
if layout.get("texts"):
anchor = 'nnn'
l.createRoundText(s.talon, anchor)
return l
def startGame(self, flip=0):
for i in range(len(self.s.rows)):
self.s.talon.dealRow(rows=self.s.rows[i+1:], flip=flip, frames=0)
self._startAndDealRowAndCards()
shallHighlightMatch = Game._shallHighlightMatch_AC
# ************************************************************************
# * Double Klondike by Threes
# ************************************************************************
class DoubleKlondikeByThrees(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, num_deal=3)
# ************************************************************************
# * Gargantua (Double Klondike with one redeal)
# * Pantagruel
# ************************************************************************
class Gargantua(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=2)
class Pantagruel(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1)
# ************************************************************************
# * Harp (Double Klondike with 10 non-king rows and no redeal)
# ************************************************************************
class BigHarp(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1, rows=10)
#
# game overrides
#
# no real need to override, but this way the layout
# looks a little bit different
def startGame(self):
for i in range(len(self.s.rows)):
self.s.talon.dealRow(rows=self.s.rows[:i], flip=0, frames=0)
self._startAndDealRowAndCards()
# ************************************************************************
# * Steps (Harp with 7 rows)
# ************************************************************************
class Steps(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=2, rows=7)
# ************************************************************************
# * Triple Klondike
# * Triple Klondike by Threes
# * Chinese Klondike
# ************************************************************************
class TripleKlondike(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, rows=13)
class TripleKlondikeByThrees(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, rows=13, num_deal=3)
class ChineseKlondike(DoubleKlondike):
RowStack_Class = StackWrapper(BO_RowStack, base_rank=KING)
def createGame(self):
DoubleKlondike.createGame(self, rows=12)
# ************************************************************************
# * Lady Jane
# * Inquisitor
# ************************************************************************
class LadyJane(DoubleKlondike):
Hint_Class = Spider_Hint
RowStack_Class = Spider_SS_RowStack
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=2, num_deal=3)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
class Inquisitor(DoubleKlondike):
RowStack_Class = SS_RowStack
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=3, num_deal=3)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_SS
# ************************************************************************
# * Arabella
# ************************************************************************
class Arabella(DoubleKlondike):
Hint_Class = Spider_Hint
RowStack_Class = StackWrapper(Spider_SS_RowStack, base_rank=KING)
def createGame(self):
DoubleKlondike.createGame(self, rows=13, max_rounds=1, playcards=24)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
# ************************************************************************
# * Big Deal
# ************************************************************************
class BigDeal(DoubleKlondike):
RowStack_Class = KingAC_RowStack
def createGame(self, rows=12, max_rounds=2, XOFFSET=0):
l, s = Layout(self), self.s
self.setSize(l.XM+(rows+2)*l.XS, l.YM+8*l.YS)
x, y = l.XM, l.YM
for i in range(rows):
s.rows.append(self.RowStack_Class(x, y, self))
x += l.XS
for i in range(2):
y = l.YM
for j in range(8):
s.foundations.append(
SS_FoundationStack(x, y, self, suit=j % 4))
y += l.YS
x += l.XS
x, y = l.XM, self.height-l.YS
s.talon = WasteTalonStack(x, y, self, max_rounds=max_rounds)
l.createText(s.talon, 'n')
x += l.XS
s.waste = WasteStack(x, y, self)
s.waste.CARD_XOFFSET = XOFFSET
l.createText(s.waste, 'n')
if max_rounds > 1:
l.createRoundText(s.talon, 'nnn')
self.setRegion(s.rows, (-999, -999, l.XM+rows*l.XS-l.CW//2, 999999),
priority=1)
l.defaultStackGroups()
# ************************************************************************
# * Delivery
# ************************************************************************
class Delivery(BigDeal):
Hint_Class = CautiousDefaultHint
RowStack_Class = StackWrapper(SS_RowStack, max_move=1)
def createGame(self):
dx = self.app.images.CARDW//10
BigDeal.createGame(self, rows=12, max_rounds=1, XOFFSET=dx)
shallHighlightMatch = Game._shallHighlightMatch_SS
def startGame(self):
self._startDealNumRows(2)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
# ************************************************************************
# * Double Kingsley
# ************************************************************************
class DoubleKingsley(DoubleKlondike):
Foundation_Class = StackWrapper(SS_FoundationStack, base_rank=KING, dir=-1)
RowStack_Class = StackWrapper(KingAC_RowStack, base_rank=ACE, dir=1)
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1)
# ************************************************************************
# * Thieves of Egypt
# ************************************************************************
class ThievesOfEgypt(DoubleKlondike):
Layout_Method = staticmethod(Layout.klondikeLayout)
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=2)
def startGame(self):
# rows: 1 3 5 7 9 10 8 6 4 2
row = 0
for i in (0, 2, 4, 6, 8, 9, 7, 5, 3, 1):
for j in range(i):
self.s.talon.dealRow(rows=[self.s.rows[row]], frames=0)
row += 1
self._startAndDealRowAndCards()
# ************************************************************************
# * Brush
# ************************************************************************
class Brush(DoubleKlondike):
Layout_Method = staticmethod(Layout.klondikeLayout)
Foundation_Class = Spider_SS_Foundation
RowStack_Class = Spider_RowStack
Hint_Class = Spider_Hint
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=1)
def startGame(self):
self._startDealNumRows(3)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
# register the game
registerGame(GameInfo(21, DoubleKlondike, "Double Klondike",
GI.GT_KLONDIKE, 2, -1, GI.SL_BALANCED))
registerGame(GameInfo(28, DoubleKlondikeByThrees, "Double Klondike by Threes",
GI.GT_KLONDIKE, 2, -1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(25, Gargantua, "Gargantua",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(15, BigHarp, "Big Harp",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(51, Steps, "Steps",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(273, TripleKlondike, "Triple Klondike",
GI.GT_KLONDIKE, 3, -1, GI.SL_BALANCED))
registerGame(GameInfo(274, TripleKlondikeByThrees, "Triple Klondike by Threes",
GI.GT_KLONDIKE, 3, -1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(495, LadyJane, "Lady Jane",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(496, Inquisitor, "Inquisitor",
GI.GT_KLONDIKE, 2, 2, GI.SL_BALANCED))
registerGame(GameInfo(497, Arabella, "Arabella",
GI.GT_KLONDIKE, 3, 0, GI.SL_BALANCED))
registerGame(GameInfo(545, BigDeal, "Big Deal",
GI.GT_KLONDIKE | GI.GT_ORIGINAL, 4, 1, GI.SL_BALANCED))
registerGame(GameInfo(562, Delivery, "Delivery",
GI.GT_FORTY_THIEVES | GI.GT_ORIGINAL, 4, 0,
GI.SL_BALANCED))
registerGame(GameInfo(590, ChineseKlondike, "Chinese Klondike",
GI.GT_KLONDIKE, 3, -1, GI.SL_BALANCED,
suits=(0, 1, 2)))
registerGame(GameInfo(591, Pantagruel, "Pantagruel",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(668, DoubleKingsley, "Double Kingsley",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(678, ThievesOfEgypt, "Thieves of Egypt",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(689, Brush, "Brush",
GI.GT_2DECK_TYPE | GI.GT_ORIGINAL, 2, 0,
GI.SL_MOSTLY_SKILL))
|
gpl-3.0
| 6,819,752,331,048,072,000
| 34.3
| 79
| 0.537861
| false
| 3.414641
| false
| false
| false
|
etherkit/OpenBeacon2
|
client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-gi.repository.GdkPixbuf.py
|
1
|
6760
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Import hook for PyGObject's "gi.repository.GdkPixbuf" package.
"""
import glob
import os
import subprocess
from PyInstaller.config import CONF
from PyInstaller.compat import (
exec_command_stdout, is_darwin, is_win, is_linux, open_file, which)
from PyInstaller.utils.hooks import (
collect_glib_translations, get_gi_typelibs, get_gi_libdir, logger)
loaders_path = os.path.join('gdk-pixbuf-2.0', '2.10.0', 'loaders')
destpath = "lib/gdk-pixbuf-2.0/2.10.0/loaders"
cachedest = "lib/gdk-pixbuf-2.0/2.10.0"
# If the "gdk-pixbuf-query-loaders" command is not in the current ${PATH}, or
# is not in the GI lib path, GDK and thus GdkPixbuf is unavailable. Return with
# a non-fatal warning.
gdk_pixbuf_query_loaders = None
try:
libdir = get_gi_libdir('GdkPixbuf', '2.0')
except ValueError:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, '
'since GdkPixbuf library not found'
)
libdir = None
if libdir:
# Distributions either package gdk-pixbuf-query-loaders in the GI libs
# directory (not on the path), or on the path with or without a -x64 suffix
# depending on the architecture
cmds = [
os.path.join(libdir, 'gdk-pixbuf-2.0/gdk-pixbuf-query-loaders'),
'gdk-pixbuf-query-loaders-64',
'gdk-pixbuf-query-loaders',
]
for cmd in cmds:
gdk_pixbuf_query_loaders = which(cmd)
if gdk_pixbuf_query_loaders is not None:
break
if gdk_pixbuf_query_loaders is None:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, since '
'"gdk-pixbuf-query-loaders" is not in $PATH or gi lib dir.'
)
# Else, GDK is available. Let's do this.
else:
binaries, datas, hiddenimports = get_gi_typelibs('GdkPixbuf', '2.0')
datas += collect_glib_translations('gdk-pixbuf')
# To add support for a new platform, add a new "elif" branch below with
# the proper is_<platform>() test and glob for finding loaders on that
# platform.
if is_win:
ext = "*.dll"
elif is_darwin or is_linux:
ext = "*.so"
# If loader detection is supported on this platform, bundle all
# detected loaders and an updated loader cache.
if ext:
loader_libs = []
# Bundle all found loaders with this user application.
pattern = os.path.join(libdir, loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Sometimes the loaders are stored in a different directory from
# the library (msys2)
if not loader_libs:
pattern = os.path.join(libdir, '..', 'lib', loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Filename of the loader cache to be written below.
cachefile = os.path.join(CONF['workpath'], 'loaders.cache')
# Run the "gdk-pixbuf-query-loaders" command and capture its
# standard output providing an updated loader cache; then write
# this output to the loader cache bundled with this frozen
# application.
#
# On OSX we use @executable_path to specify a path relative to the
# generated bundle. However, on non-Windows we need to rewrite the
# loader cache because it isn't relocatable by default. See
# https://bugzilla.gnome.org/show_bug.cgi?id=737523
#
# To make it easier to rewrite, we just always write
# @executable_path, since its significantly easier to find/replace
# at runtime. :)
#
# If we need to rewrite it...
if not is_win:
# To permit string munging, decode the encoded bytes output by
# this command (i.e., enable the "universal_newlines" option).
# Note that:
#
# * Under Python 2.7, "cachedata" will be a decoded "unicode"
# object. * Under Python 3.x, "cachedata" will be a decoded
# "str" object.
#
# On Fedora, the default loaders cache is /usr/lib64, but the
# libdir is actually /lib64. To get around this, we pass the
# path to the loader command, and it will create a cache with
# the right path.
cachedata = exec_command_stdout(gdk_pixbuf_query_loaders,
*loader_libs)
cd = []
prefix = '"' + os.path.join(libdir, 'gdk-pixbuf-2.0', '2.10.0')
plen = len(prefix)
# For each line in the updated loader cache...
for line in cachedata.splitlines():
if line.startswith('#'):
continue
if line.startswith(prefix):
line = '"@executable_path/' + cachedest + line[plen:]
cd.append(line)
# Rejoin these lines in a manner preserving this object's
# "unicode" type under Python 2.
cachedata = u'\n'.join(cd)
# Write the updated loader cache to this file.
with open_file(cachefile, 'w') as fp:
fp.write(cachedata)
# Else, GdkPixbuf will do the right thing on Windows, so no changes
# to the loader cache are required. For efficiency and reliability,
# this command's encoded byte output is written as is without being
# decoded.
else:
with open_file(cachefile, 'wb') as fp:
fp.write(subprocess.check_output(gdk_pixbuf_query_loaders))
# Bundle this loader cache with this frozen application.
datas.append((cachefile, cachedest))
# Else, loader detection is unsupported on this platform.
else:
logger.warning(
'GdkPixbuf loader bundling unsupported on your platform.'
)
|
gpl-3.0
| -3,919,858,749,350,087,700
| 39.969697
| 79
| 0.569822
| false
| 4.222361
| false
| false
| false
|
all-of-us/raw-data-repository
|
rdr_service/alembic/versions/72365b7c0037_add_gender_identity_enums.py
|
1
|
1232
|
"""add gender identity enums
Revision ID: 72365b7c0037
Revises: 9c957ce496bf
Create Date: 2019-06-05 08:56:34.278852
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import GenderIdentity
# revision identifiers, used by Alembic.
revision = "72365b7c0037"
down_revision = "9c957ce496bf"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant_summary", sa.Column("gender_identity", model.utils.Enum(GenderIdentity), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "gender_identity")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
bsd-3-clause
| 7,862,067,207,526,346,000
| 23.64
| 119
| 0.685065
| false
| 3.602339
| false
| false
| false
|
joshfriend/memegen
|
tests/test_routes_templates.py
|
1
|
2217
|
# pylint: disable=unused-variable
# pylint: disable=misplaced-comparison-constant
from .conftest import load
def describe_get():
def when_default_text(client):
response = client.get("/templates/iw")
assert 200 == response.status_code
assert dict(
name="Insanity Wolf",
description="http://knowyourmeme.com/memes/insanity-wolf",
aliases=['insanity', 'insanity-wolf', 'iw'],
styles=[],
example="http://localhost/iw/does-testing/in-production",
) == load(response)
def when_no_default_text(client):
response = client.get("/templates/keanu")
assert 200 == response.status_code
assert "http://localhost/keanu/your-text/goes-here" == \
load(response)['example']
def when_alternate_sytles_available(client):
response = client.get("/templates/sad-biden")
assert 200 == response.status_code
assert ['down', 'scowl', 'window'] == load(response)['styles']
def when_dashes_in_key(client):
response = client.get("/templates/awkward-awesome")
assert 200 == response.status_code
def it_returns_list_when_no_key(client):
response = client.get("/templates/")
assert 200 == response.status_code
data = load(response)
assert "http://localhost/templates/iw" == data['Insanity Wolf']
assert len(data) >= 20 # there should be many memes
def it_redirects_when_text_is_provided(client):
response = client.get("/templates/iw/top/bottom")
assert 302 == response.status_code
assert '<a href="/iw/top/bottom">' in load(response, as_json=False)
def it_redirects_when_key_is_an_alias(client):
response = client.get("/templates/insanity-wolf")
assert 302 == response.status_code
assert '<a href="/templates/iw">' in load(response, as_json=False)
def describe_post():
def it_returns_an_error(client):
response = client.post("/templates/")
assert 403 == response.status_code
assert dict(
message="https://raw.githubusercontent.com/jacebrowning/memegen/master/CONTRIBUTING.md"
) == load(response)
|
mit
| -5,729,464,290,669,171,000
| 31.602941
| 99
| 0.626071
| false
| 3.770408
| false
| false
| false
|
JIC-CSB/dtoolcore
|
docs/source/conf.py
|
1
|
5148
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u"dtoolcore"
copyright = u"2017, Tjelvar Olsson"
author = u"Tjelvar Olsson"
repo_name = u"dtoolcore"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"3.13.0"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Set the readthedocs theme.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
print('using readthedocs theme...')
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{}doc'.format(repo_name)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{}.tex'.format(repo_name),
u'{} Documentation'.format(repo_name),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
author, repo_name, u'Core API for managing (scientific) data',
'Miscellaneous'),
]
|
mit
| 835,693,816,536,538,400
| 30.012048
| 79
| 0.673271
| false
| 3.833209
| false
| false
| false
|
praekelt/vumi-go
|
go/billing/migrations/0009_auto__chg_field_messagecost_tag_pool__add_index_messagecost_message_di.py
|
1
|
10898
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool'], null=True))
# Adding index on 'MessageCost', fields ['message_direction']
db.create_index(u'billing_messagecost', ['message_direction'])
# Adding unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Adding index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
def backwards(self, orm):
# Removing index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing index on 'MessageCost', fields ['message_direction']
db.delete_index(u'billing_messagecost', ['message_direction'])
# User chose to not deal with backwards NULL issues for 'MessageCost.tag_pool'
raise RuntimeError("Cannot reverse this migration. 'MessageCost.tag_pool' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.gouser': {
'Meta': {'object_name': 'GoUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'billing.account': {
'Meta': {'object_name': 'Account'},
'account_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'alert_credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'alert_threshold': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.GoUser']"})
},
u'billing.lineitem': {
'Meta': {'object_name': 'LineItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_direction': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'statement': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Statement']"}),
'tag_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'total_cost': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'billing.messagecost': {
'Meta': {'unique_together': "[['account', 'tag_pool', 'message_direction']]", 'object_name': 'MessageCost', 'index_together': "[['account', 'tag_pool', 'message_direction']]"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'tag_pool': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.TagPool']", 'null': 'True', 'blank': 'True'})
},
u'billing.statement': {
'Meta': {'object_name': 'Statement'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_date': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'billing.tagpool': {
'Meta': {'object_name': 'TagPool'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'billing.transaction': {
'Meta': {'object_name': 'Transaction'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'credit_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'credit_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'session_created': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Pending'", 'max_length': '20'}),
'tag_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['billing']
|
bsd-3-clause
| -5,296,161,936,349,393,000
| 75.216783
| 188
| 0.569279
| false
| 3.626622
| false
| false
| false
|
cajone/pychess
|
lib/pychess/System/TaskQueue.py
|
1
|
2187
|
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475160
# Was accepted into Python 2.5, but earlier versions still have
# to do stuff manually
import threading
from pychess.compat import Queue
def TaskQueue():
if hasattr(Queue, "task_done"):
return Queue()
return _TaskQueue()
class _TaskQueue(Queue):
def __init__(self):
Queue.__init__(self)
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def _put(self, item):
Queue._put(self, item)
self.unfinished_tasks += 1
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notifyAll()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
|
gpl-3.0
| 3,400,890,274,600,280,000
| 33.171875
| 79
| 0.622771
| false
| 4.263158
| false
| false
| false
|
deter-project/magi
|
magi/messaging/transportTCP.py
|
1
|
2821
|
import socket
import logging
import time
from asyncore import dispatcher
from transport import Transport
import transportStream
from magimessage import DefaultCodec
log = logging.getLogger(__name__)
class TCPServer(Transport):
""" Simple TCP Server that returns new TCP clients as 'messages' """
def __init__(self, address = None, port = None):
Transport.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((address, port))
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is None:
return
sock, addr = pair
log.info('Incoming connection from %s', repr(addr))
newTrans = TCPTransport(sock)
newTrans.saveHost = addr[0]
newTrans.savePort = addr[1]
self.inmessages.append(newTrans)
def serverOnly(self):
return True
def __repr__(self):
return "TCPServer %s:%d" % (self.addr[0], self.addr[1])
__str__ = __repr__
class TCPTransport(transportStream.StreamTransport):
"""
This class implements a TCP connection that streams MAGI messages back and forth. It
uses the StreamTransport for most work, extending it just for the connecting and reconnecting
portion.
"""
def __init__(self, sock = None, codec=DefaultCodec, address = None, port = None):
"""
Create a new TCP Transport. If sock is provided, it is used, otherwise starts with
an unconnected socket.
"""
transportStream.StreamTransport.__init__(self, sock=sock, codec=codec)
self.closed = False
self.saveHost = ""
self.savePort = -1
if address is not None and port is not None:
self.connect(address, port)
def connect(self, host, port):
"""
Attempt to connect this socket.
"""
self.saveHost = host
self.savePort = port
self.closed = False
log.info("connect %s:%d", self.saveHost, self.savePort)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
log.info("If connection fails, it will retry shortly.")
dispatcher.connect(self, (self.saveHost, self.savePort))
def reconnect(self):
"""
Attempt a reconnect of a socket that was closed or never fully connected
"""
self.connect(self.saveHost, self.savePort)
def handle_write(self):
"""
Override stream version so we can add hosttime to outgoing packets
"""
if self.txMessage.isDone():
try:
msg = self.outmessages.pop(0)
msg.hosttime = int(time.time())
self.txMessage = transportStream.TXTracker(codec=self.codec, msg=msg)
except IndexError:
return
#keep sending till you can
while not self.txMessage.isDone():
bytesWritten = self.send(self.txMessage.getData())
self.txMessage.sent(bytesWritten)
#if no more can be written, break out
if bytesWritten == 0:
break
def __repr__(self):
return "TCPTransport %s:%d" % (self.saveHost, self.savePort)
__str__ = __repr__
|
gpl-2.0
| -4,412,502,271,619,851,000
| 25.613208
| 95
| 0.698688
| false
| 3.26883
| false
| false
| false
|
drtuxwang/system-config
|
bin/battery.py
|
1
|
3916
|
#!/usr/bin/env python3
"""
Monitor laptop battery
"""
import argparse
import signal
import sys
from typing import List
import power_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_summary_flag(self) -> bool:
"""
Return summary flag.
"""
return self._args.summary_flag
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(description='Monitor laptop battery.')
parser.add_argument(
'-s',
action='store_true',
dest='summary_flag',
help='Show summary'
)
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
@staticmethod
def _show_battery(battery: power_mod.Battery) -> None:
model = (
battery.get_oem() + ' ' + battery.get_name() + ' ' +
battery.get_type() + ' ' + str(battery.get_capacity_max()) +
'mAh/' + str(battery.get_voltage()) + 'mV'
)
if battery.get_charge() == '-':
state = '-'
if battery.get_rate() > 0:
state += str(battery.get_rate()) + 'mA'
if battery.get_voltage() > 0:
power = '{0:4.2f}'.format(float(
battery.get_rate()*battery.get_voltage()) / 1000000)
state += ', ' + str(power) + 'W'
hours = '{0:3.1f}'.format(float(
battery.get_capacity()) / battery.get_rate())
state += ', ' + str(hours) + 'h'
elif battery.get_charge() == '+':
state = '+'
if battery.get_rate() > 0:
state += str(battery.get_rate()) + 'mA'
if battery.get_voltage() > 0:
power = '{0:4.2f}'.format(float(
battery.get_rate()*battery.get_voltage()) / 1000000)
state += ', ' + str(power) + 'W'
else:
state = 'Unused'
print(
model + " = ", battery.get_capacity(),
"mAh [" + state + "]",
sep=""
)
@staticmethod
def _show_summary(batteries: List[power_mod.Battery]) -> None:
capacity = 0
rate = 0
for battery in batteries:
if battery.is_exist():
capacity += battery.get_capacity()
if battery.get_charge() == '-':
rate -= battery.get_rate()
elif battery.get_charge() == '+':
rate += battery.get_rate()
if capacity:
if rate:
print("{0:d}mAh [{1:+d}mAh]".format(capacity, rate))
else:
print("{0:d}mAh [Unused]".format(capacity))
def run(self) -> int:
"""
Start program
"""
options = Options()
batteries = power_mod.Battery.factory()
if options.get_summary_flag():
self._show_summary(batteries)
else:
for battery in batteries:
if battery.is_exist():
self._show_battery(battery)
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
|
gpl-2.0
| -5,714,600,251,821,818,000
| 26.384615
| 79
| 0.470123
| false
| 3.904287
| false
| false
| false
|
karllessard/tensorflow
|
tensorflow/python/keras/layers/preprocessing/text_vectorization.py
|
1
|
29394
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TFIDF = category_encoding.TFIDF
INT = category_encoding.INT
BINARY = category_encoding.BINARY
COUNT = category_encoding.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one sample = one string) into either a list of
token indices (one sample = 1D tensor of integer token indices) or a dense
representation (one sample = 1D tensor of float values representing data about
the sample's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each sample contains the following steps:
1. standardize each sample (usually lowercasing + punctuation stripping)
2. split each sample into substrings (usually words)
3. recombine substrings into tokens (usually ngrams)
4. index tokens (associate a unique int value with each token)
5. transform each sample using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to", "split],
["another", "string", "to", "split"]]`. This makes the callable site
natively compatible with `tf.strings.split()`.
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is `(max_tokens -
1 - (1 if output == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
'lower_and_strip_punctuation' (lowercase and remove punctuation) or a
Callable. Default is 'lower_and_strip_punctuation'.
split: Optional specification for splitting the input text. Values can be
None (no splitting), 'whitespace' (split on ASCII whitespace), or a
Callable. The default is 'whitespace'.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be "int", "binary", "count" or "tf-idf", configuring the layer as follows:
"int": Outputs integer indices, one integer index per split string
token. When output == "int", 0 is reserved for masked locations;
this reduces the vocab size to max_tokens-2 instead of max_tokens-1
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in "binary", "count", and "tf-idf" modes. If
True, the output will have its feature axis padded to `max_tokens` even if
the number of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape [batch_size, max_tokens] regardless of
vocabulary size. Defaults to True.
vocabulary: An optional list of vocabulary terms, or a path to a text file
containing a vocabulary to load into this layer. The file should contain
one token per line. If the list or file contains the same token multiple
times, an error will be thrown.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> text_dataset = tf.data.Dataset.from_tensor_slices(["foo", "bar", "baz"])
>>> max_features = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> embedding_dims = 2
>>>
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len)
>>>
>>> # Now that the vocab layer has been created, call `adapt` on the text-only
>>> # dataset to create the vocabulary. You don't have to batch, but for large
>>> # datasets this means we're not keeping spare copies of the dataset.
>>> vectorize_layer.adapt(text_dataset.batch(64))
>>>
>>> # Create the model that uses the vectorize text layer
>>> model = tf.keras.models.Sequential()
>>>
>>> # Start by creating an explicit input layer. It needs to have a shape of
>>> # (1,) (because we need to guarantee that there is exactly one string
>>> # input per batch), and the dtype needs to be 'string'.
>>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
>>>
>>> # The first layer in our model is the vectorization layer. After this
>>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab
>>> # indices.
>>> model.add(vectorize_layer)
>>>
>>> # Now, the model can map strings to integers, and you can add an embedding
>>> # layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> model.predict(input_data)
array([[2, 1, 4, 0],
[1, 3, 0, 0]])
Example:
This example instantiates a TextVectorization layer by passing a list
of vocabulary terms to the layer's __init__ method.
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = get_layer_class()(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>>
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=True,
vocabulary=None,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, BINARY, TFIDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
self._max_tokens = max_tokens
# In INT mode, the zero value is reserved for padding (per Keras standard
# padding approaches). In non-INT modes, there is no padding so we can set
# the OOV value to zero instead of one.
self._oov_value = 1 if output_mode == INT else 0
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._pad_to_max = pad_to_max_tokens
self._vocab_size = 0
self._called = False
super(TextVectorization, self).__init__(
combiner=None,
**kwargs)
base_preprocessing_layer._kpl_gauge.get_cell("V2").set("TextVectorization")
mask_token = "" if output_mode in [None, INT] else None
self._index_lookup_layer = self._get_index_lookup_class()(
max_tokens=max_tokens, mask_token=mask_token, vocabulary=vocabulary)
# If this layer is configured for string or integer output, we do not
# create a vectorization layer (as the output is not vectorized).
if self._output_mode in [None, INT]:
self._vectorize_layer = None
else:
if max_tokens is not None and self._pad_to_max:
max_elements = max_tokens
else:
max_elements = None
self._vectorize_layer = self._get_vectorization_class()(
max_tokens=max_elements, output_mode=self._output_mode)
# These are V1/V2 shim points. There are V1 implementations in the V1 class.
def _get_vectorization_class(self):
return category_encoding.CategoryEncoding
def _get_index_lookup_class(self):
return string_lookup.StringLookup
# End of V1/V2 shim points.
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
if len(input_shape) == 1:
input_shape = tuple(input_shape) + (1,)
return tensor_shape.TensorShape(input_shape)
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
if len(input_shape) == 1:
input_shape = input_shape + [self._output_sequence_length]
else:
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64 if self._output_mode == INT else K.floatx()
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
as a NumPy array, a string tensor, or as a list of texts.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, (list, tuple, np.ndarray)):
data = ops.convert_to_tensor_v2_with_dispatch(data)
if isinstance(data, ops.Tensor):
if data.shape.rank == 1:
data = array_ops.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._preprocess(data)
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 0:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, 0))
shape = dataset_ops.get_legacy_output_shapes(data)
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or an array as input, got {}".format(
type(data)))
self._index_lookup_layer.adapt(preprocessed_inputs)
if self._vectorize_layer:
if isinstance(data, ops.Tensor):
integer_data = self._index_lookup_layer(preprocessed_inputs)
else:
integer_data = preprocessed_inputs.map(self._index_lookup_layer)
self._vectorize_layer.adapt(integer_data)
def get_vocabulary(self):
return self._index_lookup_layer.get_vocabulary()
def get_config(self):
# This does not include the 'vocabulary' arg, since if the vocab was passed
# at init time it's now stored in variable state - we don't need to
# pull it off disk again.
config = {
"max_tokens": self._max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._pad_to_max,
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self,
vocab,
df_data=None,
oov_df_value=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and DF data for this layer directly, instead
of analyzing a dataset through 'adapt'. It should be used whenever the vocab
(and optionally document frequency) information is already known. If
vocabulary data is already present in the layer, this method will replace
it.
Arguments:
vocab: An array of string tokens.
df_data: An array of document frequency data. Only necessary if the layer
output_mode is TFIDF.
oov_df_value: The document frequency of the OOV token. Only necessary if
output_mode is TFIDF.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
if self._output_mode != TFIDF and df_data is not None:
raise ValueError("df_data should only be set if output_mode is TFIDF. "
"output_mode is %s." % self._output_mode)
if (self._output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self._pad_to_max):
raise RuntimeError(("When using TextVectorization in {mode} mode and "
"pad_to_max_tokens is False, the vocabulary cannot "
"be changed after the layer is "
"called.").format(mode=self._output_mode))
self._index_lookup_layer.set_vocabulary(vocab)
# When doing raw or integer output, we don't have a Vectorize layer to
# manage. In this case, we can return directly.
if self._output_mode in [None, INT]:
return
if not self._pad_to_max or self._max_tokens is None:
num_tokens = self._index_lookup_layer.vocab_size()
self._vectorize_layer.set_num_elements(num_tokens)
if self._output_mode == TFIDF:
if df_data is None:
raise ValueError("df_data must be set if output_mode is TFIDF")
if len(vocab) != len(df_data):
raise ValueError("df_data must be the same length as vocab. "
"len(df_data) is %s, len(vocab) is %s" %
(len(vocab), len(df_data)))
if oov_df_value is None:
raise ValueError("You must pass an oov_df_value when output_mode is "
"TFIDF.")
df_data = self._convert_to_ndarray(df_data)
if not isinstance(oov_df_value, np.ndarray):
oov_df_value = np.array([oov_df_value])
df_data = np.insert(df_data, 0, oov_df_value)
self._vectorize_layer.set_tfidf_data(df_data)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None:
if input_shape.ndims > 1 and not input_shape[-1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the innermost "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TFIDF:
self.set_vocabulary(
updates[_VOCAB_NAME],
updates[_IDF_NAME],
updates[_OOV_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if tf_utils.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
if inputs.shape.ndims > 1:
inputs = array_ops.squeeze(inputs, axis=-1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
self._called = True
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
indexed_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Once we have the dense tensor, we can return it if we weren't given a
# fixed output sequence length. If we were, though, we have to dynamically
# choose whether to pad or trim it based on each tensor.
# We need to convert to dense if we have a ragged tensor.
if tf_utils.is_ragged(indexed_data):
dense_data = indexed_data.to_tensor(default_value=0)
else:
dense_data = indexed_data
if self._output_sequence_length is None:
return dense_data
else:
sequence_len = K.shape(dense_data)[1]
pad_amt = self._output_sequence_length - sequence_len
pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])
slice_fn = lambda: dense_data[:, :self._output_sequence_length]
output_tensor = control_flow_ops.cond(
sequence_len < self._output_sequence_length,
true_fn=pad_fn,
false_fn=slice_fn)
output_shape = output_tensor.shape.as_list()
output_shape[-1] = self._output_sequence_length
output_tensor.set_shape(tensor_shape.TensorShape(output_shape))
return output_tensor
# If we're not returning integers here, we rely on the vectorization layer
# to create the output.
return self._vectorize_layer(indexed_data)
|
apache-2.0
| 7,755,867,636,308,005,000
| 44.082822
| 101
| 0.666667
| false
| 3.958255
| true
| false
| false
|
entropyx/callme
|
callme/proxy.py
|
1
|
9608
|
# Copyright (c) 2009-2014, Christian Haintz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of callme nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import socket
import time
import uuid
import kombu
from callme import base
from callme import exceptions as exc
from callme import protocol as pr
LOG = logging.getLogger(__name__)
REQUEST_TIMEOUT = 60
class Proxy(base.Base):
"""This Proxy class is used to handle the communication with the rpc
server.
:keyword server_id: default id of the Server (can be declared later
see :func:`use_server`)
:keyword amqp_host: the host of where the AMQP Broker is running
:keyword amqp_user: the username for the AMQP Broker
:keyword amqp_password: the password for the AMQP Broker
:keyword amqp_vhost: the virtual host of the AMQP Broker
:keyword amqp_port: the port of the AMQP Broker
:keyword ssl: use SSL connection for the AMQP Broker
:keyword timeout: default timeout for calls in seconds
:keyword durable: make all exchanges and queues durable
:keyword auto_delete: delete server queues after all connections are closed
not applicable for client queues
"""
def __init__(self,
server_exchange_name,
server_queue_name=None,
server_routing_key=None,
amqp_host='localhost',
amqp_user='guest',
amqp_password='guest',
amqp_vhost='/',
amqp_port=5672,
ssl=False,
timeout=REQUEST_TIMEOUT,
durable=False,
auto_delete=True,
):
super(Proxy, self).__init__(amqp_host, amqp_user, amqp_password,
amqp_vhost, amqp_port, ssl)
self._uuid = str(uuid.uuid4())
self._server_exchange_name = server_exchange_name
self._server_queue_name = server_queue_name
self._server_routing_key = server_routing_key
self._timeout = timeout
self._is_received = False
self._corr_id = None
self._response = None
self._exchange_name = 'client_{0}_ex_{1}'.format(self._server_exchange_name, self._uuid)
self._queue_name = 'client_{0}_queue_{1}'.format(self._server_queue_name, self._uuid) if self._server_queue_name else ''
self._durable = durable
self._auto_delete = auto_delete
# create queue
queue = self._make_queue(self._queue_name, None,
durable=self._durable,
auto_delete=True)
# create consumer
consumer = kombu.Consumer(channel=self._conn,
queues=queue,
callbacks=[self._on_response],
accept=['pickle'])
consumer.consume()
def use_server(self, exchange_name=None, queue_name=None, timeout=None):
"""Use the specified server and set an optional timeout for the method
call.
Typical use:
>> my_proxy.use_server('foo_exchange','foo.receive').a_remote_func()
:keyword exchange_name: the exchange_name where the call will be made
:keyword queue_name: the queue_name where the call will be made
:keyword timeout: set or overrides the call timeout in seconds
:rtype: return `self` to cascade further calls
"""
if exchange_name is not None:
self._server_exchange_name= exchange_name
if queue_name is not None:
self._server_queue_name= queue_name
if timeout is not None:
self._timeout = timeout
return self
def _on_response(self, response, message):
"""This method is automatically called when a response is incoming and
decides if it is the message we are waiting for - the message with the
result.
:param response: the body of the amqp message already deserialized
by kombu
:param message: the plain amqp kombu.message with additional
information
"""
LOG.debug("Got response: {0}".format(response))
try:
message.ack()
except Exception:
LOG.exception("Failed to acknowledge AMQP message.")
else:
LOG.debug("AMQP message acknowledged.")
# check response type
if not isinstance(response, pr.RpcResponse):
LOG.warning("Response is not a `RpcResponse` instance.")
return
# process response
try:
if self._corr_id == message.properties['correlation_id']:
self._response = response
self._is_received = True
except KeyError:
LOG.error("Message has no `correlation_id` property.")
def __request(self, func_name, func_args, func_kwargs):
"""The remote-method-call execution function.
:param func_name: name of the method that should be executed
:param func_args: arguments for the remote-method
:param func_kwargs: keyword arguments for the remote-method
:type func_name: string
:type func_args: list of parameters
:rtype: result of the method
"""
self._corr_id = str(uuid.uuid4())
request = pr.RpcRequest(func_name, func_args, func_kwargs)
LOG.debug("Publish request: {0}".format(request))
# publish request
with kombu.producers[self._conn].acquire(block=True) as producer:
type = 'topic'
exchange = self._make_exchange(
self._server_exchange_name,
type=type,
durable=self._durable,
auto_delete=self._auto_delete)
producer.publish(body=request,
serializer='pickle',
exchange=exchange,
reply_to=self._queue_name,
correlation_id=self._corr_id,
routing_key=self._server_routing_key)
# start waiting for the response
self._wait_for_result()
self._is_received = False
# handler response
result = self._response.result
LOG.debug("Result: {!r}".format(result))
if self._response.is_exception:
raise result
return result
def _wait_for_result(self):
"""Waits for the result from the server, checks every second if
a timeout occurred. If a timeout occurred - the `RpcTimeout` exception
will be raised.
"""
start_time = time.time()
while not self._is_received:
try:
self._conn.drain_events(timeout=1)
except socket.timeout:
if self._timeout > 0:
if time.time() - start_time > self._timeout:
raise exc.RpcTimeout("RPC Request timeout")
def __getattr__(self, name):
"""This method is invoked, if a method is being called, which doesn't
exist on Proxy. It is used for RPC, to get the function which should
be called on the Server.
"""
# magic method dispatcher
LOG.debug("Recursion: {0}".format(name))
return _Method(self.__request, name)
# ===========================================================================
class _Method(object):
"""This class is used to realize remote-method-calls.
:param send: name of the function that should be executed on Proxy
:param name: name of the method which should be called on the Server
"""
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self._send = send
self._name = name
def __getattr__(self, name):
return _Method(self._send, "{0}.{1}".format(self._name, name))
def __call__(self, *args, **kw):
return self._send(self._name, args, kw)
# ===========================================================================
|
bsd-3-clause
| 4,423,005,917,173,876,700
| 38.216327
| 128
| 0.600125
| false
| 4.508681
| false
| false
| false
|
etamponi/resilient-protocol
|
resilient/ensemble.py
|
1
|
6786
|
import hashlib
import numpy
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.tree.tree import DecisionTreeClassifier
from sklearn.utils.fixes import unique
from sklearn import preprocessing
from sklearn.utils.random import check_random_state
from resilient.logger import Logger
from resilient.selection_strategies import SelectBestPercent
from resilient.train_set_generators import RandomCentroidPDFTrainSetGenerator
from resilient.weighting_strategies import CentroidBasedWeightingStrategy
__author__ = 'Emanuele Tamponi <emanuele.tamponi@diee.unica.it>'
MAX_INT = numpy.iinfo(numpy.int32).max
class TrainingStrategy(BaseEstimator):
def __init__(self,
base_estimator=DecisionTreeClassifier(max_features='auto'),
train_set_generator=RandomCentroidPDFTrainSetGenerator(),
random_sample=None):
self.base_estimator = base_estimator
self.train_set_generator = train_set_generator
self.random_sample = random_sample
def train_estimators(self, n, inp, y, weighting_strategy, random_state):
classifiers = []
weight_generator = self.train_set_generator.get_sample_weights(
n, inp, y, random_state
)
for i, weights in enumerate(weight_generator):
if self.random_sample is not None:
ix = random_state.choice(
len(y),
size=int(self.random_sample*len(y)),
p=weights, replace=True
)
weights = numpy.bincount(ix, minlength=len(y))
s = weights.sum()
weights = numpy.array([float(w) / s for w in weights])
Logger.get().write("!Training estimator:", (i+1))
est = self._make_estimator(inp, y, weights, random_state)
weighting_strategy.add_estimator(est, inp, y, weights)
classifiers.append(est)
return classifiers
def _make_estimator(self, inp, y, sample_weights, random_state):
seed = random_state.randint(MAX_INT)
est = clone(self.base_estimator)
est.set_params(random_state=check_random_state(seed))
est.fit(inp, y, sample_weight=sample_weights)
return est
class ResilientEnsemble(BaseEstimator, ClassifierMixin):
def __init__(self,
pipeline=None,
n_estimators=10,
training_strategy=TrainingStrategy(),
weighting_strategy=CentroidBasedWeightingStrategy(),
selection_strategy=SelectBestPercent(),
multiply_by_weight=False,
use_prob=True,
random_state=None):
self.pipeline = pipeline
self.n_estimators = n_estimators
self.training_strategy = training_strategy
self.weighting_strategy = weighting_strategy
self.selection_strategy = selection_strategy
self.multiply_by_weight = multiply_by_weight
self.use_prob = use_prob
self.random_state = random_state
# Training time attributes
self.classes_ = None
self.n_classes_ = None
self.classifiers_ = None
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.random_state_ = None
def fit(self, inp, y):
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.classes_, y = unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
self.random_state_ = check_random_state(self.random_state)
if self.pipeline is not None:
inp = self.pipeline.fit_transform(inp)
self.weighting_strategy.prepare(inp, y)
self.classifiers_ = self.training_strategy.train_estimators(
self.n_estimators, inp, y,
self.weighting_strategy, self.random_state_
)
# Reset it to null because the previous line uses self.predict
self.precomputed_probs_ = None
self.precomputed_weights_ = None
return self
def predict_proba(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, (N, n_classes_), each row sums to one
if self.precomputed_probs_ is None:
self._precompute(inp)
prob = numpy.zeros((len(inp), self.n_classes_))
for i in range(len(inp)):
active_indices = self.selection_strategy.get_indices(
self.precomputed_weights_[i], self.random_state_
)
prob[i] = self.precomputed_probs_[i][active_indices].sum(axis=0)
preprocessing.normalize(prob, norm='l1', copy=False)
return prob
def predict(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, N, one label per instance
if self.pipeline is not None:
inp = self.pipeline.transform(inp)
p = self.predict_proba(inp)
return self.classes_[numpy.argmax(p, axis=1)]
def _precompute(self, inp):
self.precomputed_probs_ = numpy.zeros(
(len(inp), len(self.classifiers_), self.n_classes_)
)
self.precomputed_weights_ = numpy.zeros(
(len(inp), len(self.classifiers_))
)
for i, x in enumerate(inp):
Logger.get().write(
"!Computing", len(inp), "probabilities and weights:", (i+1)
)
for j, cls in enumerate(self.classifiers_):
prob = cls.predict_proba(x)[0]
if not self.use_prob:
max_index = prob.argmax()
prob = numpy.zeros_like(prob)
prob[max_index] = 1
self.precomputed_probs_[i][j] = prob
self.precomputed_weights_[i] = (
self.weighting_strategy.weight_estimators(x)
)
if self.multiply_by_weight:
for j in range(len(self.classifiers_)):
self.precomputed_probs_[i][j] *= (
self.precomputed_weights_[i][j]
)
def get_directory(self):
current_state = self.random_state
current_selection = self.selection_strategy
self.random_state = None
self.selection_strategy = None
filename = hashlib.md5(str(self)).hexdigest()
self.random_state = current_state
self.selection_strategy = current_selection
return filename
def get_filename(self):
return self.get_directory() + "/ensemble"
def __eq__(self, other):
return isinstance(other, ResilientEnsemble) and (
self.get_directory() == other.get_directory()
)
def __hash__(self):
return hash(self.get_directory())
|
gpl-2.0
| 4,599,996,119,543,902,700
| 37.338983
| 77
| 0.600796
| false
| 4.110236
| false
| false
| false
|
andrewschaaf/pj-closure
|
js/goog/array.py
|
1
|
3829
|
#<pre>Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.</pre>
# TODO the rest
from goog import bind, isString
ARRAY_PROTOTYPE_ = Array.prototype
def concat(var_args):
return ARRAY_PROTOTYPE_.concat.apply(ARRAY_PROTOTYPE_, arguments)
def forEach(arr, f, opt_obj):
#DIFF: goog runs this if-statement at load-time
if ARRAY_PROTOTYPE_.forEach:
# TODO assert(arr.length != None)
ARRAY_PROTOTYPE_.forEach.call(arr, f, opt_obj)
else:
arr2 = (arr.split('') if isString(arr) else arr)
for i in range(len(arr)):
if i in arr2:
f.call(opt_obj, arr2[i], i, arr)
def map(arr, f, opt_obj):
#DIFF: goog runs this if-statement at load-time
if ARRAY_PROTOTYPE_.map:
#TODO goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.map.call(arr, f, opt_obj)
else:
l = len(arr)
res = Array(l)
arr2 = (arr.split('') if isString(arr) else arr)
for i in range(l):
if i in arr2:
res[i] = f.call(opt_obj, arr2[i], i, arr)
return res
def reduce(arr, f, val, opt_obj):
if arr.reduce:
if opt_obj:
return arr.reduce(bind(f, opt_obj), val)
else:
return arr.reduce(f, val)
rval = val
def f(val, index):
rval = f.call(opt_obj, rval, val, index, arr)
forEach(arr, f)
return rval
def slice(arr, start, opt_end):
#goog.asserts.assert(arr.length != null);
# passing 1 arg to slice is not the same as passing 2 where the second is
# null or undefined (in that case the second argument is treated as 0).
# we could use slice on the arguments object and then use apply instead of
# testing the length
if arguments.length <= 2:
return ARRAY_PROTOTYPE_.slice.call(arr, start)
else:
return ARRAY_PROTOTYPE_.slice.call(arr, start, opt_end)
def splice(arr, index, howMany, var_args):
#goog.asserts.assert(arr.length != null)
return ARRAY_PROTOTYPE_.splice.apply(
arr, slice(arguments, 1))
def insertAt(arr, obj, opt_i):
splice(arr, opt_i, 0, obj)
def filter(arr, f, opt_obj):
if ARRAY_PROTOTYPE_.filter:
#goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.filter.call(arr, f, opt_obj)
else:
res = []
resLength = 0
arr2 = arr.split('') if isString(arr) else arr
for i in range(len(arr)):
if i in arr2:
val = arr2[i]
if f.call(opt_obj, val, i, arr):
# Is this better than .push?
resLength += 1
res[resLength] = val
return res
def indexOf(arr, obj, opt_fromIndex):
if ARRAY_PROTOTYPE_.indexOf:
#goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.indexOf.call(arr, obj, opt_fromIndex)
else:
fromIndex = (
0
if opt_fromIndex == None else
(
Math.max(0, arr.length + opt_fromIndex)
if opt_fromIndex < 0 else
opt_fromIndex))
if isString(arr):
# Array.prototype.indexOf uses === so only strings should be found.
if not isString(obj) or len(obj) != 1:
return -1
return arr.indexOf(obj, fromIndex)
for i in range(fromIndex, len(arr)):
if (i in arr) and (arr[i] == obj):
return i
return -1
|
apache-2.0
| -2,837,759,687,464,881,000
| 25.226027
| 76
| 0.628362
| false
| 3.255952
| false
| false
| false
|
ilastik/ilastik-0.5
|
ilastik/modules/unsupervised_decomposition/core/unsupervisedMgr.py
|
1
|
7290
|
from ilastik.core.baseModuleMgr import BaseModuleDataItemMgr, BaseModuleMgr
import numpy
import traceback, sys
from ilastik.core import jobMachine
from PyQt4 import QtCore
import os
import algorithms
from ilastik.core.volume import DataAccessor
from ilastik.core.overlayMgr import OverlayItem
""" Import all algorithm plugins"""
pathext = os.path.dirname(__file__)
try:
for f in os.listdir(os.path.abspath(pathext + '/algorithms')):
module_name, ext = os.path.splitext(f) # Handles no-extension files, etc.
if ext == '.py': # Important, ignore .pyc/othesr files.
module = __import__('ilastik.modules.unsupervised_decomposition.core.algorithms.' + module_name)
except Exception, e:
print e
traceback.print_exc()
pass
for i, c in enumerate(algorithms.unsupervisedDecompositionBase.UnsupervisedDecompositionBase.__subclasses__()):
print "Loaded unsupervised decomposition algorithm:", c.name
#*******************************************************************************
# U n s u p e r v i s e d I t e m M o d u l e M g r *
#*******************************************************************************
class UnsupervisedItemModuleMgr(BaseModuleDataItemMgr):
name = "Unsupervised_Decomposition"
def __init__(self, dataItemImage):
BaseModuleDataItemMgr.__init__(self, dataItemImage)
self.dataItemImage = dataItemImage
self.overlays = []
self.inputData = None
def setInputData(self, data):
self.inputData = data
#*******************************************************************************
# U n s u p e r v i s e d D e c o m p o s i t i o n M o d u l e M g r *
#*******************************************************************************
class UnsupervisedDecompositionModuleMgr(BaseModuleMgr):
name = "Unsupervised_Decomposition"
def __init__(self, dataMgr):
BaseModuleMgr.__init__(self, dataMgr)
self.dataMgr = dataMgr
self.unsupervisedMethod = algorithms.unsupervisedDecompositionPCA.UnsupervisedDecompositionPCA
if self.dataMgr.module["Unsupervised_Decomposition"] is None:
self.dataMgr.module["Unsupervised_Decomposition"] = self
def computeResults(self, inputOverlays):
self.decompThread = UnsupervisedDecompositionThread(self.dataMgr, inputOverlays, self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod)
self.decompThread.start()
return self.decompThread
def finalizeResults(self):
activeItem = self.dataMgr[self.dataMgr._activeImageNumber]
activeItem._dataVol.unsupervised = self.decompThread.result
#create overlays for unsupervised decomposition:
if self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname] is None:
data = self.decompThread.result[:,:,:,:,:]
myColor = OverlayItem.qrgb(0, 0, 0)
for o in range(0, data.shape[4]):
data2 = OverlayItem.normalizeForDisplay(data[:,:,:,:,o:(o+1)])
# for some strange reason we have to invert the data before displaying it
ov = OverlayItem(255 - data2, color = myColor, alpha = 1.0, colorTable = None, autoAdd = True, autoVisible = True)
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname + " component %d" % (o+1)] = ov
# remove outdated overlays (like PCA components 5-10 if a decomposition with 4 components is done)
numOverlaysBefore = len(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.keys())
finished = False
while finished != True:
o = o + 1
# assumes consecutive numbering
key = "Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname + " component %d" % (o+1)
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.remove(key)
numOverlaysAfter = len(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.keys())
if(numOverlaysBefore == numOverlaysAfter):
finished = True
else:
numOverlaysBefore = numOverlaysAfter
else:
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname]._data = DataAccessor(self.decompThread.result)
#*******************************************************************************
# U n s u p e r v i s e d D e c o m p o s i t i o n T h r e a d *
#*******************************************************************************
class UnsupervisedDecompositionThread(QtCore.QThread):
def __init__(self, dataMgr, overlays, unsupervisedMethod = algorithms.unsupervisedDecompositionPCA.UnsupervisedDecompositionPCA, unsupervisedMethodOptions = None):
QtCore.QThread.__init__(self, None)
self.reshapeToFeatures(overlays)
self.dataMgr = dataMgr
self.count = 0
self.numberOfJobs = 1
self.stopped = False
self.unsupervisedMethod = unsupervisedMethod
self.unsupervisedMethodOptions = unsupervisedMethodOptions
self.jobMachine = jobMachine.JobMachine()
self.result = []
def reshapeToFeatures(self, overlays):
# transform to feature matrix
# ...first find out how many columns and rows the feature matrix will have
numFeatures = 0
numPoints = overlays[0].shape[0] * overlays[0].shape[1] * overlays[0].shape[2] * overlays[0].shape[3]
for overlay in overlays:
numFeatures += overlay.shape[4]
# ... then copy the data
features = numpy.zeros((numPoints, numFeatures), dtype=numpy.float)
currFeature = 0
for overlay in overlays:
currData = overlay[:,:,:,:,:]
features[:, currFeature:currFeature+overlay.shape[4]] = currData.reshape(numPoints, (currData.shape[4]))
currFeature += currData.shape[4]
self.features = features
self.origshape = overlays[0].shape
def decompose(self):
# V contains the component spectra/scores, W contains the projected data
unsupervisedMethod = self.unsupervisedMethod()
V, W = unsupervisedMethod.decompose(self.features)
self.result = (W.T).reshape((self.origshape[0], self.origshape[1], self.origshape[2], self.origshape[3], W.shape[0]))
def run(self):
self.dataMgr.featureLock.acquire()
try:
jobs = []
job = jobMachine.IlastikJob(UnsupervisedDecompositionThread.decompose, [self])
jobs.append(job)
self.jobMachine.process(jobs)
self.dataMgr.featureLock.release()
except Exception, e:
print "######### Exception in UnsupervisedThread ##########"
print e
traceback.print_exc(file=sys.stdout)
self.dataMgr.featureLock.release()
|
bsd-2-clause
| -7,100,022,085,646,028,000
| 48.598639
| 213
| 0.610288
| false
| 4.074902
| false
| false
| false
|
Jason-Gew/Python_modules
|
authenticate.py
|
1
|
2754
|
#!/usr/bin/env python
#
# authenticate.py module is create by Jason/Ge Wu
# Purpose to fast set up and verify username & password
# for system or software access.
from getpass import getpass # Disable password display on console
import base64 # If necessary, use more advanced encryption such as AES, MD5
encryp_pass = ""
def set_authentication(pass_length, set_timeout):
global encryp_pass
while set_timeout > 0:
select1 = raw_input("\nWould you like to setup a new Password for Login? (Y/n): ")
if select1 == 'Y' or select1 == 'y':
while set_timeout > 0:
buff1 = getpass(prompt = "\nPlease Enter your Password: ")
if not buff1.isspace():
buff2 = getpass(prompt = "Please Enter your Password again: ")
if buff1 == buff2:
if len(buff2) < pass_length:
print "-> Password must have {} characters or more!".format(pass_length)
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
else:
encryp_pass = base64.b64encode(buff2)
print "\n ==== Password Setup Success ====\n"
del buff1, buff2
return True
else:
print "-> Password does not match! Please Try Again!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
else:
print "-> Invalid Password!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
elif select1 == 'N' or select1 == 'n':
return False
break
else:
if set_timeout > 0:
print "-> Please enter \'Y\' or \'n\' character only!"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
else:
print "\nTime Out, please re-run the program and Try Carefully!\n"
exit(1)
def console_authenticate(set_timeout):
while set_timeout > 0:
buff = getpass(prompt = "\nPlease enter your Password: ")
encryp_buffer = base64.b64encode(buff)
if encryp_buffer == encryp_pass:
print "\n ==== Authentication Success ==== \n"
del buff, encryp_buffer
return True
elif buff == '':
print "-> Password cannot be empty!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
else:
set_timeout -= 1
if set_timeout > 0:
print "-> Invalid Password, Please Try Again!"
print "-> You still have {} chance(s)...".format(set_timeout)
else:
print "\n ==== Authentication Fail ==== \n"
return False
# For testing purpose...
if __name__ == "__main__":
if set_authentication(6,4):
if console_authenticate(3):
print "Done"
else:
print "Failed"
exit(1)
else:
print "No Authentication!"
exit(0)
|
gpl-3.0
| 333,663,628,608,991,300
| 28.94382
| 84
| 0.603849
| false
| 3.263033
| false
| false
| false
|
uclouvain/osis
|
base/migrations/0054_scoresencoding.py
|
1
|
2269
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-02 15:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0053_auto_20160529_2355'),
]
operations = [
migrations.RunSQL(
"""
DROP VIEW IF EXISTS app_scores_encoding;
CREATE OR REPLACE VIEW app_scores_encoding AS
SELECT row_number() OVER () as id,
base_programmanager.id as program_manager_id,
program_manager_person.id as pgm_manager_person_id,
base_offeryear.id as offer_year_id,
base_learningunityear.id as learning_unit_year_id,
count(base_examenrollment.id) as total_exam_enrollments,
sum(case when base_examenrollment.score_final is not null or base_examenrollment.justification_final is not null then 1 else 0 end) exam_enrollments_encoded
from base_examenrollment
join base_sessionexam on base_sessionexam.id = base_examenrollment.session_exam_id
join base_learningunityear on base_learningunityear.id = base_sessionexam.learning_unit_year_id
join base_offeryearcalendar on base_offeryearcalendar.id = base_sessionexam.offer_year_calendar_id
join base_learningunitenrollment on base_learningunitenrollment.id = base_examenrollment.learning_unit_enrollment_id
join base_offerenrollment on base_offerenrollment.id = base_learningunitenrollment.offer_enrollment_id
join base_offeryear on base_offeryear.id = base_offerenrollment.offer_year_id
join base_programmanager on base_programmanager.offer_year_id = base_offeryear.id
join base_person program_manager_person on program_manager_person.id = base_programmanager.person_id
where base_offeryearcalendar.start_date <= CURRENT_TIMESTAMP::date
and base_offeryearcalendar.end_date >= CURRENT_TIMESTAMP::date
group by
base_programmanager.id,
program_manager_person.id,
base_offeryear.id,
base_learningunityear.id
;
""",
elidable=True
),
]
|
agpl-3.0
| 3,287,373,462,270,548,000
| 39.517857
| 172
| 0.654473
| false
| 3.839255
| false
| false
| false
|
314r/joliebulle
|
joliebulle/view/base.py
|
1
|
1815
|
#joliebulle 3.6
#Copyright (C) 2010-2016 Pierre Tavares
#Copyright (C) 2012-2015 joliebulle's authors
#See AUTHORS file.
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 3
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from PyQt5 import QtGui
from base import ImportBase
from view.yeastview import *
def getFermentablesQtModel():
model = QtGui.QStandardItemModel()
for f in ImportBase().listeFermentables:
item = QtGui.QStandardItem(f.name)
item.setData(f, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getHopsQtModel():
model = QtGui.QStandardItemModel()
for h in ImportBase().listeHops :
item = QtGui.QStandardItem(h.name)
item.setData(h, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getMiscsQtModel():
model = QtGui.QStandardItemModel()
for m in ImportBase().listeMiscs:
item = QtGui.QStandardItem(m.name)
item.setData(m, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getYeastsQtModel():
model = QtGui.QStandardItemModel()
for y in ImportBase().listeYeasts:
item = QtGui.QStandardItem(YeastView(y).yeastDetailDisplay())
item.setData(y, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
|
gpl-3.0
| 553,826,155,992,883,500
| 32.018182
| 76
| 0.770799
| false
| 3.264388
| false
| false
| false
|
shawncaojob/LC
|
QUESTIONS/44_wildcard_matching.py
|
1
|
1859
|
# 44. Wildcard Matching My Submissions QuestionEditorial Solution
# Total Accepted: 59032 Total Submissions: 333961 Difficulty: Hard
# Implement wildcard pattern matching with support for '?' and '*'.
#
# '?' Matches any single character.
# '*' Matches any sequence of characters (including the empty sequence).
#
# The matching should cover the entire input string (not partial).
#
# The function prototype should be:
# bool isMatch(const char *s, const char *p)
#
# Some examples:
# isMatch("aa","a") false
# isMatch("aa","aa") true
# isMatch("aaa","aa") false
# isMatch("aa", "*") true
# isMatch("aa", "a*") true
# isMatch("ab", "?*") true
# isMatch("aab", "c*a*b") false
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
print(s, p)
m, n = len(s), len(p)
dp = [ [ False for y in xrange(n + 1) ] for x in xrange(m + 1) ]
dp[0][0] = True
for j in xrange(1, n + 1):
if p[j-1] == "*" and dp[0][j-1]:
dp[0][j] = True
for i in xrange(1, m + 1):
for j in xrange(1, n + 1):
if (s[i-1] == p[j-1] or p[j-1] == "?") and dp[i-1][j-1]: #Cur char matching
dp[i][j] = True
if p[j-1] == "*":
if dp[i][j-1] or dp[i-1][j]: # Matching 0 or more
dp[i][j] = True
for row in dp:
print(row)
return dp[-1][-1]
if __name__ == "__main__":
print(Solution().isMatch("aa","a"))
print(Solution().isMatch("aa","aa"))
print(Solution().isMatch("aaa","aa"))
print(Solution().isMatch("aa", "*"))
print(Solution().isMatch("aa", "a*"))
print(Solution().isMatch("ab", "?*"))
print(Solution().isMatch("aab", "c*a*b"))
|
gpl-3.0
| -199,543,660,837,520,030
| 30.508475
| 94
| 0.507262
| false
| 3.150847
| false
| false
| false
|
caioariede/pyq
|
sizzle/match.py
|
1
|
3323
|
from .selector import Selector
class MatchEngine(object):
pseudo_fns = {}
selector_class = Selector
def __init__(self):
self.register_pseudo('not', self.pseudo_not)
self.register_pseudo('has', self.pseudo_has)
def register_pseudo(self, name, fn):
self.pseudo_fns[name] = fn
@staticmethod
def pseudo_not(matcher, node, value):
return not matcher.match_node(matcher.parse_selector(value)[0], node)
@staticmethod
def pseudo_has(matcher, node, value):
for node, body in matcher.iter_data([node]):
if body:
return any(
matcher.match_data(matcher.parse_selector(value)[0], body))
def parse_selector(self, selector):
return self.selector_class.parse(selector)
def match(self, selector, data):
selectors = self.parse_selector(selector)
nodeids = {}
for selector in selectors:
for node in self.match_data(selector, data):
nodeid = id(node)
if nodeid not in nodeids:
nodeids[nodeid] = None
yield node
def match_data(self, selector, data):
for node, body in self._iter_data(data):
match = self.match_node(selector, node)
if match:
next_selector = selector.next_selector
if next_selector:
if body:
for node in self.match_data(next_selector, body):
yield node
else:
yield node
if body and not selector.combinator == self.selector_class.CHILD:
for node in self.match_data(selector, body):
yield node
def match_node(self, selector, node):
match = all(self.match_rules(selector, node))
if match and selector.attrs:
match &= all(self.match_attrs(selector, node))
if match and selector.pseudos:
match &= all(self.match_pseudos(selector, node))
return match
def match_rules(self, selector, node):
if selector.typ:
yield self.match_type(selector.typ, node)
if selector.id_:
yield self.match_id(selector.id_, node)
def match_attrs(self, selector, node):
for a in selector.attrs:
lft, op, rgt = a
yield self.match_attr(lft, op, rgt, node)
def match_pseudos(self, selector, d):
for p in selector.pseudos:
name, value = p
if name not in self.pseudo_fns:
raise Exception('Selector not implemented: {}'.format(name))
yield self.pseudo_fns[name](self, d, value)
def _iter_data(self, data):
for tupl in self.iter_data(data):
if len(tupl) != 2:
raise Exception(
'The iter_data method must yield pair tuples containing '
'the node and its body (empty if not available)')
yield tupl
def match_type(self, typ, node):
raise NotImplementedError
def match_id(self, id_, node):
raise NotImplementedError
def match_attr(self, lft, op, rgt, no):
raise NotImplementedError
def iter_data(self, data):
raise NotImplementedError
|
mit
| 5,607,412,709,546,960,000
| 30.951923
| 79
| 0.563948
| false
| 4.227735
| false
| false
| false
|
beyoungwoo/C_glibc_Sample
|
_Algorithm/ProjectEuler_python/euler_42.py
|
1
|
2038
|
#!/usr/bin/python -Wall
# -*- coding: utf-8 -*-
"""
<div id="content">
<div style="text-align:center;" class="print"><img src="images/print_page_logo.png" alt="projecteuler.net" style="border:none;" /></div>
<h2>Coded triangle numbers</h2><div id="problem_info" class="info"><h3>Problem 42</h3><span>Published on Friday, 25th April 2003, 06:00 pm; Solved by 46003; Difficulty rating: 5%</span></div>
<div class="problem_content" role="problem">
<p>The <i>n</i><sup>th</sup> term of the sequence of triangle numbers is given by, <i>t<sub>n</sub></i> = ½<i>n</i>(<i>n</i>+1); so the first ten triangle numbers are:</p>
<p style="text-align:center;">1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...</p>
<p>By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = <i>t</i><sub>10</sub>. If the word value is a triangle number then we shall call the word a triangle word.</p>
<p>Using <a href="project/resources/p042_words.txt">words.txt</a> (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?</p>
</div><br />
<br /></div>
"""
import re
tri=[ ]
for i in range (1, 50):
res = (i*(i+1)/2)
tri.append(res)
def is_triangle(num):
global tri
tri_len = len(tri)
for i in range(0, tri_len):
if (num == tri[i]):
return True
elif (num < tri[i]):
return False
return False
count = 0
fread = open("p42words.txt", "r")
for line in fread:
text = re.split("\"", line)
total_text = list(text)
len_t = len(total_text)
for i in range(0, len_t):
if total_text[i].startswith(','):
continue
ret = [ord(c) for c in total_text[i]]
len_ret = len(ret)
if (is_triangle(sum(ret) - (64 * len_ret)) == True):
count += 1
print total_text[i], sum(ret) - (64 * len_ret)
print "total=", count
#a = 'hi'
#print [ord(c) for c in a]
|
gpl-3.0
| -5,037,118,179,064,823,000
| 37.45283
| 309
| 0.626104
| false
| 2.903134
| false
| false
| false
|
Squishymedia/feedingdb
|
src/feeddb/feed/migrations/0058_muscleowl_emg_sono.py
|
1
|
40048
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SonoSensor.muscle'
db.add_column(u'feed_sonosensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
# Adding field 'EmgSensor.muscle'
db.add_column(u'feed_emgsensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SonoSensor.muscle'
db.delete_column(u'feed_sonosensor', 'muscle_id')
# Deleting field 'EmgSensor.muscle'
db.delete_column(u'feed_emgsensor', 'muscle_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feed.ageunit': {
'Meta': {'ordering': "['label']", 'object_name': 'AgeUnit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ageunit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anatomicallocation': {
'Meta': {'ordering': "['label']", 'object_name': 'AnatomicalLocation'},
'category': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anatomicallocation_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anteriorposterioraxis': {
'Meta': {'object_name': 'AnteriorPosteriorAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anteriorposterioraxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behavior': {
'Meta': {'ordering': "['label']", 'object_name': 'Behavior'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'behavior_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behaviorowl': {
'Meta': {'object_name': 'BehaviorOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.BehaviorOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.channel': {
'Meta': {'object_name': 'Channel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channel_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.channellineup': {
'Meta': {'ordering': "['position']", 'object_name': 'ChannelLineup'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Channel']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channellineup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.depthaxis': {
'Meta': {'object_name': 'DepthAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'depthaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.developmentstage': {
'Meta': {'ordering': "['label']", 'object_name': 'DevelopmentStage'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'developmentstage_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.dorsalventralaxis': {
'Meta': {'object_name': 'DorsalVentralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dorsalventralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.electrodetype': {
'Meta': {'ordering': "['label']", 'object_name': 'ElectrodeType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'electrodetype_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgchannel': {
'Meta': {'object_name': 'EmgChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'emg_amplification': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Emgfiltering']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.EmgSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.emgfiltering': {
'Meta': {'ordering': "['label']", 'object_name': 'Emgfiltering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emgfiltering_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgsensor': {
'Meta': {'ordering': "['id']", 'object_name': 'EmgSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.emgsetup': {
'Meta': {'object_name': 'EmgSetup', '_ormbases': [u'feed.Setup']},
'preamplifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.eventchannel': {
'Meta': {'object_name': 'EventChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.eventsetup': {
'Meta': {'object_name': 'EventSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.experiment': {
'Meta': {'object_name': 'Experiment'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impl_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_age': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subj_ageunit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AgeUnit']", 'null': 'True', 'blank': 'True'}),
'subj_devstage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DevelopmentStage']"}),
'subj_tooth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']"}),
'subject_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.forcechannel': {
'Meta': {'object_name': 'ForceChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ForceSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.forcesensor': {
'Meta': {'object_name': 'ForceSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.forcesetup': {
'Meta': {'object_name': 'ForceSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.illustration': {
'Meta': {'object_name': 'Illustration'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'illustration_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.kinematicschannel': {
'Meta': {'object_name': 'KinematicsChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.KinematicsSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.kinematicssensor': {
'Meta': {'object_name': 'KinematicsSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.kinematicssetup': {
'Meta': {'object_name': 'KinematicsSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.mediallateralaxis': {
'Meta': {'object_name': 'MedialLateralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mediallateralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.muscleowl': {
'Meta': {'object_name': 'MuscleOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.MuscleOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.pressurechannel': {
'Meta': {'object_name': 'PressureChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.PressureSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.pressuresensor': {
'Meta': {'object_name': 'PressureSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.pressuresetup': {
'Meta': {'object_name': 'PressureSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.proximaldistalaxis': {
'Meta': {'object_name': 'ProximalDistalAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proximaldistalaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.restraint': {
'Meta': {'ordering': "['label']", 'object_name': 'Restraint'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'restraint_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sensor': {
'Meta': {'object_name': 'Sensor'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sensor_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loc_ap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'loc_dv': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_ml': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MedialLateralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_pd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ProximalDistalAxis']", 'null': 'True', 'blank': 'True'}),
'loc_side': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Side']"}),
'location_freetext': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.session': {
'Meta': {'ordering': "['position']", 'object_name': 'Session'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.Channel']", 'through': u"orm['feed.ChannelLineup']", 'symmetrical': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_anesthesia_sedation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_restraint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Restraint']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.setup': {
'Meta': {'object_name': 'Setup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'setup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sampling_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.side': {
'Meta': {'ordering': "['label']", 'object_name': 'Side'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'side_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sonochannel': {
'Meta': {'object_name': 'SonoChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'crystal1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals1_related'", 'to': u"orm['feed.SonoSensor']"}),
'crystal2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals2_related'", 'to': u"orm['feed.SonoSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.sonosensor': {
'Meta': {'object_name': 'SonoSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.sonosetup': {
'Meta': {'object_name': 'SonoSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'}),
'sonomicrometer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.strainchannel': {
'Meta': {'object_name': 'StrainChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.StrainSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.strainsensor': {
'Meta': {'object_name': 'StrainSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.strainsetup': {
'Meta': {'object_name': 'StrainSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.study': {
'Meta': {'ordering': "['title']", 'object_name': 'Study'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'approval_secured': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'study_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'funding_agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.studyprivate': {
'Meta': {'object_name': 'StudyPrivate'},
'approval': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'studyprivate_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'funding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lab': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pi': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.subject': {
'Meta': {'object_name': 'Subject'},
'breed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subject_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Taxon']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.taxon': {
'Meta': {'ordering': "['genus']", 'object_name': 'Taxon'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'taxon_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.trial': {
'Meta': {'object_name': 'Trial'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behavior_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'behavior_primary': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Behavior']"}),
'behavior_secondary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trial_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estimated_duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'food_property': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_treatment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'waveform_picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'feed.unit': {
'Meta': {'ordering': "['technique', 'label']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['feed']
|
gpl-3.0
| -4,522,338,344,012,879,000
| 81.40535
| 195
| 0.546519
| false
| 3.521632
| false
| false
| false
|
alphagov/notifications-api
|
migrations/versions/0321_drop_postage_constraints.py
|
1
|
2423
|
"""
Revision ID: 0321_drop_postage_constraints
Revises: 0320_optimise_notifications
Create Date: 2020-06-08 11:48:53.315768
"""
import os
from alembic import op
revision = '0321_drop_postage_constraints'
down_revision = '0320_optimise_notifications'
environment = os.environ['NOTIFY_ENVIRONMENT']
def upgrade():
if environment not in ["live", "production"]:
op.execute('ALTER TABLE notifications DROP CONSTRAINT IF EXISTS chk_notifications_postage_null')
op.execute('ALTER TABLE notification_history DROP CONSTRAINT IF EXISTS chk_notification_history_postage_null')
op.execute('ALTER TABLE templates DROP CONSTRAINT IF EXISTS chk_templates_postage')
op.execute('ALTER TABLE templates_history DROP CONSTRAINT IF EXISTS chk_templates_history_postage')
def downgrade():
# The downgrade command must not be run in production - it will lock the tables for a long time
if environment not in ["live", "production"]:
op.execute("""
ALTER TABLE notifications ADD CONSTRAINT "chk_notifications_postage_null"
CHECK (
CASE WHEN notification_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE notification_history ADD CONSTRAINT "chk_notification_history_postage_null"
CHECK (
CASE WHEN notification_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
|
mit
| 3,433,414,074,737,906,700
| 34.115942
| 118
| 0.574907
| false
| 4.723197
| false
| false
| false
|
jayc0b0/Projects
|
Python/Security/caesarCypher.py
|
1
|
1545
|
# Jacob Orner (jayc0b0)
# Caesar Cypher script
def main():
# Declare variables and take input
global alphabet
alphabet = ['a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x',
'y', 'z']
global messageArray
messageArray = []
choice = int(input("Enter 1 to encode. 2 to decode.\n>> "))
if choice == 1:
encode()
elif choice == 2:
pass # Implement decode() and add here
else:
print "Invalid choice"
main()
def encode():
message = raw_input("Enter a string to encode (letters only):\n>> ")
cypherShift = int(input("Enter an integer from 1-25 for the shift:\n>> "))
# Verify input
if not message.isalpha():
print "Please enter only letters into your message."
main()
else:
pass
if cypherShift < 1 or cypherShift > 25:
print "Invalid number. Please enter a valid shift value."
main()
else:
pass
# Break string into an array of letters and shift
messageArray = list(message)
for letter in messageArray:
if alphabet.index(letter) + cypherShift < 25:
messageArray[letter] = alphabet[alphabet.index(letter) + cypherShift]
else:
letter = alphabet[(alphabet.index(letter) + cypherShift) % 25]
# Output cyphered text
message = " ".join(messageArray)
print "Your cyphered message is:"
print message
main()
|
mit
| 5,803,349,331,164,338,000
| 26.105263
| 81
| 0.552104
| false
| 3.669834
| false
| false
| false
|
chrispitzer/toucan-sam
|
toucansam/core/models.py
|
1
|
5910
|
import re
from urlparse import urlparse, parse_qs
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.utils.safestring import mark_safe
from durationfield.db.models.fields.duration import DurationField
class ActiveSongsManager(models.Manager):
def get_query_set(self):
qs = super(ActiveSongsManager, self).get_query_set()
qs = qs.filter(active=True)
return qs
class Song(models.Model):
title = models.CharField(max_length=255, blank=True)
short_title = models.CharField(max_length=30, blank=True)
artist = models.CharField(max_length=255, blank=True)
key = models.CharField(max_length=25, blank=True)
singers = models.CharField(max_length=255, blank=True)
cheat_sheet = models.CharField(max_length=255, blank=True)
lyrics_with_chords = models.TextField(blank=True)
video_link = models.URLField(max_length=255, blank=True)
run_time = DurationField(default=2*60*1000000) # default: two minutes
difficulty = models.IntegerField(default=3,
choices=(
(1, 'Real Easy'),
(2, 'Easy'),
(3, 'Normal'),
(4, 'Hard'),
(5, 'Real Hard'),
),
validators=[
MinValueValidator(1),
MaxValueValidator(5),
])
proposed = models.BooleanField(default=True)
active = models.BooleanField(default=True)
objects = models.Manager()
active_objects = ActiveSongsManager()
def save(self, *args, **kwargs):
if not self.short_title and self.title:
self.short_title = self.title[-30:]
super(Song, self).save(*args, **kwargs)
@property
def milliseconds(self):
return self.run_time.total_seconds() * 1000
@property
def column_width(self):
return reduce(lambda a, b: max(a, len(b)), re.split("[\r\n]+", self.lyrics_with_chords), 0)
@property
def lyrics_formatted(self):
"""
Assumes that lyrics with chords interleaves lines with chords and lines with lyrics
"""
def tokenize(s):
return re.split(r'(\w+)', s)
def chordify(chord, cssclass="chord"):
return '<span class="{}">{}</span>'.format(cssclass, chord)
def lineify(line):
return u"<p>{}</p>".format(line)
output = []
chord_line = None
chord_regex = re.compile(r"^(\W*[ABCDEFG]b?(m|min|maj|maj)?\d*\W*)+$", flags=re.IGNORECASE)
for line in re.split("[\r\n]+", self.lyrics_with_chords):
line = line.rstrip()
if chord_regex.match(line):
if chord_line:
formatted_line = ""
for chord in tokenize(chord_line):
if re.match("\W", chord):
formatted_line += chord
else:
formatted_line += chordify(chord, cssclass="chord inline")
output.append(lineify(formatted_line))
chord_line = line
continue
if chord_line:
formatted_line = ""
#make sure line is as long as chords
line = line.ljust(len(chord_line))
#replace spaces at the beginning & end of the line with -- but not the middle!
frontspaces, line, endspaces = re.split(r"(\S[\s\S]*\S|\S)", line)
space = ' '
line = [space]*len(frontspaces) + list(line) + [space]*len(endspaces)
chords = tokenize(chord_line)
for chord in chords:
l = len(chord)
if not (chord+" ").isspace():
formatted_line += chordify(chord)
formatted_line += "".join(line[:l])
line = line[l:]
line = formatted_line + "".join(line)
chord_line = None
output.append(lineify(line))
return mark_safe(u"\n".join(output)) # todo: sanitize input
def has_no_lyrics(self):
return len(self.lyrics_with_chords) < 50
def youtube_video_id(self):
try:
parsed = urlparse(self.video_link)
if parsed.netloc.endswith('youtube.com'):
query = parse_qs(parsed.query)
return query.get('v', [None])[0]
except:
return None
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('song', args=[self.id])
class Meta:
ordering = ["title"]
class Gig(models.Model):
name = models.CharField(max_length=255)
date = models.DateTimeField(null=True)
def __unicode__(self):
return self.name or "undefined"
class SetItem(models.Model):
song = models.ForeignKey(Song, related_name='setitems')
set_list = models.ForeignKey("SetList", related_name='setitems')
order = models.IntegerField()
class SetList(models.Model):
gig = models.ForeignKey(Gig)
songs = models.ManyToManyField(Song, related_name="set_lists", through=SetItem)
show_proposed = models.BooleanField(default=False)
@property
def name(self):
return self.gig.name
@property
def ordered_songs(self):
return self.songs.order_by('setitems__order')
@property
def run_time(self):
microseconds = int(self.songs.aggregate(s=models.Sum('run_time'))['s'])
return timedelta(microseconds=microseconds)
def __unicode__(self):
return self.name or "undefined"
|
gpl-2.0
| 8,150,954,551,318,501,000
| 34.39521
| 101
| 0.558545
| false
| 3.998647
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.