sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real general matrix.
"""
status = _libcublas.cublasDgemv_v2(handle,
_CUBLAS_OP[trans], m, n,
ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(x), incx,
ctypes.byref(ctypes.c_double(beta)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for real general matrix.
|
entailment
|
def cublasCgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for complex general matrix.
"""
status = _libcublas.cublasCgemv_v2(handle,
_CUBLAS_OP[trans], m, n,
ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for complex general matrix.
|
entailment
|
def cublasZgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for complex general matrix.
"""
status = _libcublas.cublasZgemv_v2(handle,
_CUBLAS_OP[trans], m, n,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for complex general matrix.
|
entailment
|
def cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on real general matrix.
"""
status = _libcublas.cublasSger_v2(handle,
m, n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx,
int(y), incy, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on real general matrix.
|
entailment
|
def cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on real general matrix.
"""
status = _libcublas.cublasDger_v2(handle,
m, n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx,
int(y), incy, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on real general matrix.
|
entailment
|
def cublasCgeru(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on complex general matrix.
"""
status = _libcublas.cublasCgeru_v2(handle,
m, n, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(x), incx, int(y), incy, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on complex general matrix.
|
entailment
|
def cublasZgerc(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on complex general matrix.
"""
status = _libcublas.cublasZgerc_v2(handle,
m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(x), incx, int(y), incy, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on complex general matrix.
|
entailment
|
def cublasSsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric-banded matrix.
"""
status = _libcublas.cublasSsbmv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n, k,
ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(x), incx,
ctypes.byref(ctypes.c_float(beta)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for real symmetric-banded matrix.
|
entailment
|
def cublasDsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric-banded matrix.
"""
status = _libcublas.cublasDsbmv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n, k,
ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(x), incx,
ctypes.byref(ctypes.c_double(beta)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for real symmetric-banded matrix.
|
entailment
|
def cublasSspmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric-packed matrix.
"""
status = _libcublas.cublasSspmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n,
ctypes.byref(ctypes.c_float(alpha)),
ctypes.byref(ctypes.c_float(AP)),
int(x),
incx,
ctypes.byref(ctypes.c_float(beta)),
int(y),
incy)
cublasCheckStatus(status)
|
Matrix-vector product for real symmetric-packed matrix.
|
entailment
|
def cublasDspmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric-packed matrix.
"""
status = _libcublas.cublasDspmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n,
ctypes.byref(ctypes.c_double(alpha)),
ctypes.byref(ctypes.c_double(AP)),
int(x),
incx,
ctypes.byref(ctypes.c_double(beta)),
int(y),
incy)
cublasCheckStatus(status)
|
Matrix-vector product for real symmetric-packed matrix.
|
entailment
|
def cublasSspr(handle, uplo, n, alpha, x, incx, AP):
"""
Rank-1 operation on real symmetric-packed matrix.
"""
status = _libcublas.cublasSspr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx, int(AP))
cublasCheckStatus(status)
|
Rank-1 operation on real symmetric-packed matrix.
|
entailment
|
def cublasDspr(handle, uplo, n, alpha, x, incx, AP):
"""
Rank-1 operation on real symmetric-packed matrix.
"""
status = _libcublas.cublasDspr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(AP))
cublasCheckStatus(status)
|
Rank-1 operation on real symmetric-packed matrix.
|
entailment
|
def cublasSspr2(handle, uplo, n, alpha, x, incx, y, incy, AP):
"""
Rank-2 operation on real symmetric-packed matrix.
"""
status = _libcublas.cublasSspr2_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx, int(y), incy, int(AP))
cublasCheckStatus(status)
|
Rank-2 operation on real symmetric-packed matrix.
|
entailment
|
def cublasDspr2(handle, uplo, n, alpha, x, incx, y, incy, AP):
"""
Rank-2 operation on real symmetric-packed matrix.
"""
status = _libcublas.cublasDspr2_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(y), incy, int(AP))
cublasCheckStatus(status)
|
Rank-2 operation on real symmetric-packed matrix.
|
entailment
|
def cublasSsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric matrix.
"""
status = _libcublas.cublasSsymv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(x), incx,
ctypes.byref(ctypes.c_float(beta)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for real symmetric matrix.
|
entailment
|
def cublasDsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric matrix.
"""
status = _libcublas.cublasDsymv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(x), incx,
ctypes.byref(ctypes.c_double(beta)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for real symmetric matrix.
|
entailment
|
def cublasCsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for complex symmetric matrix.
"""
status = _libcublas.cublasCsymv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for complex symmetric matrix.
|
entailment
|
def cublasZsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for complex symmetric matrix.
"""
status = _libcublas.cublasZsymv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for complex symmetric matrix.
|
entailment
|
def cublasSsyr(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on real symmetric matrix.
"""
status = _libcublas.cublasSsyr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on real symmetric matrix.
|
entailment
|
def cublasDsyr(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on real symmetric matrix.
"""
status = _libcublas.cublasDsyr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on real symmetric matrix.
|
entailment
|
def cublasCsyr(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on complex symmetric matrix.
"""
status = _libcublas.cublasCsyr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(x), incx, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on complex symmetric matrix.
|
entailment
|
def cublasZsyr(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on complex symmetric matrix.
"""
status = _libcublas.cublasZsyr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(x), incx, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on complex symmetric matrix.
|
entailment
|
def cublasSsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-2 operation on real symmetric matrix.
"""
status = _libcublas.cublasSsyr2_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx, int(y), incy,
int(A), lda)
cublasCheckStatus(status)
|
Rank-2 operation on real symmetric matrix.
|
entailment
|
def cublasDsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-2 operation on real symmetric matrix.
"""
status = _libcublas.cublasDsyr2_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(y), incy,
int(A), lda)
cublasCheckStatus(status)
|
Rank-2 operation on real symmetric matrix.
|
entailment
|
def cublasStbmv(handle, uplo, trans, diag, n, k, A, lda, x, incx):
"""
Matrix-vector product for real triangular-banded matrix.
"""
status = _libcublas.cublasStbmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, k, int(A), lda, int(x), incx)
cublasCheckStatus(status)
|
Matrix-vector product for real triangular-banded matrix.
|
entailment
|
def cublasStpmv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Matrix-vector product for real triangular-packed matrix.
"""
status = _libcublas.cublasStpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Matrix-vector product for real triangular-packed matrix.
|
entailment
|
def cublasCtpmv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Matrix-vector product for complex triangular-packed matrix.
"""
status = _libcublas.cublasCtpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Matrix-vector product for complex triangular-packed matrix.
|
entailment
|
def cublasDtpmv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Matrix-vector product for real triangular-packed matrix.
"""
status = _libcublas.cublasDtpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Matrix-vector product for real triangular-packed matrix.
|
entailment
|
def cublasZtpmv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Matrix-vector product for complex triangular-packed matrix.
"""
status = _libcublas.cublasZtpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Matrix-vector product for complex triangular-packed matrix.
|
entailment
|
def cublasStpsv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Solve real triangular-packed system with one right-hand side.
"""
status = _libcublas.cublasStpsv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Solve real triangular-packed system with one right-hand side.
|
entailment
|
def cublasDtpsv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Solve real triangular-packed system with one right-hand side.
"""
status = _libcublas.cublasDtpsv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Solve real triangular-packed system with one right-hand side.
|
entailment
|
def cublasCtpsv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Solve complex triangular-packed system with one right-hand side.
"""
status = _libcublas.cublasCtpsv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Solve complex triangular-packed system with one right-hand side.
|
entailment
|
def cublasZtpsv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Solve complex triangular-packed system with one right-hand size.
"""
status = _libcublas.cublasZtpsv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status)
|
Solve complex triangular-packed system with one right-hand size.
|
entailment
|
def cublasCtrmv(handle, uplo, trans, diag, n, A, lda, x, incx):
"""
Matrix-vector product for complex triangular matrix.
"""
status = _libcublas.cublasCtrmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(A), lda, int(x), incx)
cublasCheckStatus(status)
|
Matrix-vector product for complex triangular matrix.
|
entailment
|
def cublasDtrmv(handle, uplo, trans, diag, n, A, lda, x, inx):
"""
Matrix-vector product for real triangular matrix.
"""
status = _libcublas.cublasDtrmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(A), lda, int(x), inx)
cublasCheckStatus(status)
|
Matrix-vector product for real triangular matrix.
|
entailment
|
def cublasChpmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy):
"""
Matrix-vector product for Hermitian-packed matrix.
"""
status = _libcublas.cublasChpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(AP), int(x), incx,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for Hermitian-packed matrix.
|
entailment
|
def cublasZhpmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy):
"""
Matrix-vector product for Hermitian-packed matrix.
"""
status = _libcublas.cublasZhpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(AP), int(x), incx,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for Hermitian-packed matrix.
|
entailment
|
def cublasCher(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on Hermitian matrix.
"""
status = _libcublas.cublasCher_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, alpha, int(x), incx, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on Hermitian matrix.
|
entailment
|
def cublasZher(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on Hermitian matrix.
"""
status = _libcublas.cublasZher_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, alpha, int(x), incx, int(A), lda)
cublasCheckStatus(status)
|
Rank-1 operation on Hermitian matrix.
|
entailment
|
def cublasChpr(handle, uplo, n, alpha, x, incx, AP):
"""
Rank-1 operation on Hermitian-packed matrix.
"""
status = _libcublas.cublasChpr_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, ctypes.byref(ctypes.c_float(alpha)),
int(x), incx, int(AP))
cublasCheckStatus(status)
|
Rank-1 operation on Hermitian-packed matrix.
|
entailment
|
def cublasZhpr(handle, uplo, n, alpha, x, incx, AP):
"""
Rank-1 operation on Hermitian-packed matrix.
"""
status = _libcublas.cublasZhpr_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(AP))
cublasCheckStatus(status)
|
Rank-1 operation on Hermitian-packed matrix.
|
entailment
|
def cublasChpr2(handle, uplo, n, alpha, x, inx, y, incy, AP):
"""
Rank-2 operation on Hermitian-packed matrix.
"""
status = _libcublas.cublasChpr2_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(x), incx, int(y), incy, int(AP))
cublasCheckStatus(status)
|
Rank-2 operation on Hermitian-packed matrix.
|
entailment
|
def cublasZhpr2(handle, uplo, n, alpha, x, inx, y, incy, AP):
"""
Rank-2 operation on Hermitian-packed matrix.
"""
status = _libcublas.cublasZhpr2_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(x), incx, int(y), incy, int(AP))
cublasCheckStatus(status)
|
Rank-2 operation on Hermitian-packed matrix.
|
entailment
|
def cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for real general matrix.
"""
status = _libcublas.cublasSgemm_v2(handle,
_CUBLAS_OP[transa],
_CUBLAS_OP[transb], m, n, k,
ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_float(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for real general matrix.
|
entailment
|
def cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for real general matrix.
"""
status = _libcublas.cublasDgemm_v2(handle,
_CUBLAS_OP[transa],
_CUBLAS_OP[transb], m, n, k,
ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for real general matrix.
|
entailment
|
def cublasZgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for complex general matrix.
"""
status = _libcublas.cublasZgemm_v2(handle,
_CUBLAS_OP[transa],
_CUBLAS_OP[transb], m, n, k,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for complex general matrix.
|
entailment
|
def cublasSsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for symmetric matrix.
"""
status = _libcublas.cublasSsymm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
m, n, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_float(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for symmetric matrix.
|
entailment
|
def cublasDsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for real symmetric matrix.
"""
status = _libcublas.cublasDsymm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
m, n, ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for real symmetric matrix.
|
entailment
|
def cublasCsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for complex symmetric matrix.
"""
status = _libcublas.cublasCsymm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
m, n, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for complex symmetric matrix.
|
entailment
|
def cublasSsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc):
"""
Rank-k operation on real symmetric matrix.
"""
status = _libcublas.cublasSsyrk_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda,
ctypes.byref(ctypes.c_float(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Rank-k operation on real symmetric matrix.
|
entailment
|
def cublasDsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc):
"""
Rank-k operation on real symmetric matrix.
"""
status = _libcublas.cublasDsyrk_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status)
|
Rank-k operation on real symmetric matrix.
|
entailment
|
def cublasZsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc):
"""
Rank-k operation on complex symmetric matrix.
"""
status = _libcublas.cublasZsyrk_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status)
|
Rank-k operation on complex symmetric matrix.
|
entailment
|
def cublasSsyr2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Rank-2k operation on real symmetric matrix.
"""
status = _libcublas.cublasSsyr2k_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_float(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Rank-2k operation on real symmetric matrix.
|
entailment
|
def cublasDsyr2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Rank-2k operation on real symmetric matrix.
"""
status = _libcublas.cublasDsyr2k_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Rank-2k operation on real symmetric matrix.
|
entailment
|
def cublasStrmm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, C, ldc):
"""
Matrix-matrix product for real triangular matrix.
"""
status = _libcublas.cublasStrmm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb, int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for real triangular matrix.
|
entailment
|
def cublasZtrmm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, C, ldc):
"""
Matrix-matrix product for complex triangular matrix.
"""
status = _libcublas.cublasZtrmm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb, int(C), ldc)
cublasCheckStatus(status)
|
Matrix-matrix product for complex triangular matrix.
|
entailment
|
def cublasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb):
"""
Solve a real triangular system with multiple right-hand sides.
"""
status = _libcublas.cublasStrsm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb)
cublasCheckStatus(status)
|
Solve a real triangular system with multiple right-hand sides.
|
entailment
|
def cublasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb):
"""
Solve a real triangular system with multiple right-hand sides.
"""
status = _libcublas.cublasDtrsm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(B), ldb)
cublasCheckStatus(status)
|
Solve a real triangular system with multiple right-hand sides.
|
entailment
|
def cublasZtrsm(handle, side, uplo, transa, diag, m, n, alpha, A, lda, B, ldb):
"""
Solve complex triangular system with multiple right-hand sides.
"""
status = _libcublas.cublasZtrsm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb)
cublasCheckStatus(status)
|
Solve complex triangular system with multiple right-hand sides.
|
entailment
|
def cublasZherk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc):
"""
Rank-k operation on Hermitian matrix.
"""
status = _libcublas.cublasZherk_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(ctypes.c_double(alpha)),
int(A), lda,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status)
|
Rank-k operation on Hermitian matrix.
|
entailment
|
def cublasCher2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Rank-2k operation on Hermitian matrix.
"""
status = _libcublas.cublasCher2k_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status)
|
Rank-2k operation on Hermitian matrix.
|
entailment
|
def cublasSdgmm(handle, mode, m, n, A, lda, x, incx, C, ldc):
"""
Matrix-diagonal matrix product for real general matrix.
"""
status = _libcublas.cublasSdgmm(handle,
_CUBLAS_SIDE[mode],
m, n,
int(A), lda,
int(x), incx,
int(C), ldc)
cublasCheckStatus(status)
|
Matrix-diagonal matrix product for real general matrix.
|
entailment
|
def load_EROS_lc(filename='lm0010n22323.time'):
"""
Read an EROS light curve and return its data.
Parameters
----------
filename : str, optional
A light-curve filename.
Returns
-------
dates : numpy.ndarray
An array of dates.
magnitudes : numpy.ndarray
An array of magnitudes.
errors : numpy.ndarray
An array of magnitudes errors.
"""
module_path = dirname(__file__)
file_path = join(module_path, 'lightcurves', filename)
data = np.loadtxt(file_path)
date = data[:, 0]
mag = data[:, 1]
err = data[:, 2]
return date, mag, err
|
Read an EROS light curve and return its data.
Parameters
----------
filename : str, optional
A light-curve filename.
Returns
-------
dates : numpy.ndarray
An array of dates.
magnitudes : numpy.ndarray
An array of magnitudes.
errors : numpy.ndarray
An array of magnitudes errors.
|
entailment
|
def load_rf_model():
"""
Return the UPSILoN random forests classifier.
The classifier is trained using OGLE and EROS periodic variables
(Kim et al. 2015).
Returns
-------
clf : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests classifier.
"""
import gzip
try:
import cPickle as pickle
except:
import pickle
module_path = dirname(__file__)
file_path = join(module_path, 'models/rf.model.sub.github.gz')
# For Python 3.
if sys.version_info.major >= 3:
clf = pickle.load(gzip.open(file_path, 'rb'), encoding='latin1')
# For Python 2.
else:
clf = pickle.load(gzip.open(file_path, 'rb'))
return clf
|
Return the UPSILoN random forests classifier.
The classifier is trained using OGLE and EROS periodic variables
(Kim et al. 2015).
Returns
-------
clf : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests classifier.
|
entailment
|
def sample_dropout_mask(x, dropout_probability=.5, columns=None, stream=None, target=None,
dropout_mask=None, dropout_prob_array=None):
""" Samples a dropout mask and applies it in place"""
assert x.flags.c_contiguous
if columns is not None:
assert len(columns) == 2
x_tmp = x
x = extract_columns(x, columns[0], columns[1])
shape = x.shape
if dropout_prob_array is None:
dropout_prob_array = gpuarray.empty(shape, x.dtype, allocator=memory_pool.allocate)
sampler.fill_uniform(dropout_prob_array, stream)
if dropout_mask is None:
dropout_mask = gpuarray.empty(shape, np.int8, allocator=memory_pool.allocate)
if target is None: target = x
all_kernels['sample_dropout_mask'](
x, target, dropout_mask, dropout_prob_array,
np.float32(dropout_probability))
if columns is not None:
insert_columns(x, x_tmp, columns[0])
return dropout_mask
|
Samples a dropout mask and applies it in place
|
entailment
|
def reads(s, filename=None, loader=None, implicit_tuple=True, allow_errors=False):
"""Load but don't evaluate a GCL expression from a string."""
return ast.reads(s,
filename=filename or '<input>',
loader=loader or default_loader,
implicit_tuple=implicit_tuple,
allow_errors=allow_errors)
|
Load but don't evaluate a GCL expression from a string.
|
entailment
|
def read(filename, loader=None, implicit_tuple=True, allow_errors=False):
"""Load but don't evaluate a GCL expression from a file."""
with open(filename, 'r') as f:
return reads(f.read(),
filename=filename,
loader=loader,
implicit_tuple=implicit_tuple,
allow_errors=allow_errors)
|
Load but don't evaluate a GCL expression from a file.
|
entailment
|
def loads(s, filename=None, loader=None, implicit_tuple=True, env={}, schema=None):
"""Load and evaluate a GCL expression from a string."""
ast = reads(s, filename=filename, loader=loader, implicit_tuple=implicit_tuple)
if not isinstance(env, framework.Environment):
# For backwards compatibility we accept an Environment object. Otherwise assume it's a dict
# whose bindings will add/overwrite the default bindings.
env = framework.Environment(dict(_default_bindings, **env))
obj = framework.eval(ast, env)
return mod_schema.validate(obj, schema)
|
Load and evaluate a GCL expression from a string.
|
entailment
|
def load(filename, loader=None, implicit_tuple=True, env={}, schema=None):
"""Load and evaluate a GCL expression from a file."""
with open(filename, 'r') as f:
return loads(f.read(),
filename=filename,
loader=loader,
implicit_tuple=implicit_tuple,
env=env,
schema=schema)
|
Load and evaluate a GCL expression from a file.
|
entailment
|
def linear_scheduler_up(init_value, target_value, duration):
""" Increases linearly and then stays flat """
value = init_value
t = 0
while True:
yield value
t += 1
if t < duration:
value = init_value + t * (target_value - init_value) / duration
else:
value = target_value
|
Increases linearly and then stays flat
|
entailment
|
def linear_scheduler_up_down(init_value, target_value, final_value,
duration_up, t_decrease, duration_down):
""" Increases linearly to target_value, stays at target_value until
t_decrease and then decreases linearly
"""
value = init_value
t = 0
while True:
yield value
t += 1
if t < duration_up:
value = init_value + t * (target_value - init_value) / \
float(duration_up)
elif t > t_decrease:
value = target_value - (t - t_decrease) * \
(target_value - final_value) / \
float(duration_down)
else:
value = target_value
|
Increases linearly to target_value, stays at target_value until
t_decrease and then decreases linearly
|
entailment
|
def load(stream, overrides=None, **kwargs):
"""
Loads a YAML configuration from a string or file-like object.
Parameters
----------
stream : str or object
Either a string containing valid YAML or a file-like object
supporting the .read() interface.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
"""
global is_initialized
if not is_initialized:
initialize()
if isinstance(stream, basestring):
string = stream
else:
string = '\n'.join(stream.readlines())
# processed_string = preprocess(string)
proxy_graph = yaml.load(string, **kwargs)
from . import init
init_dict = proxy_graph.get('init', {})
init(**init_dict)
if overrides is not None:
handle_overrides(proxy_graph, overrides)
return instantiate_all(proxy_graph)
|
Loads a YAML configuration from a string or file-like object.
Parameters
----------
stream : str or object
Either a string containing valid YAML or a file-like object
supporting the .read() interface.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
|
entailment
|
def load_path(path, overrides=None, **kwargs):
"""
Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
"""
f = open(path, 'r')
content = ''.join(f.readlines())
f.close()
if not isinstance(content, str):
raise AssertionError("Expected content to be of type str but it is "+str(type(content)))
return load(content, **kwargs)
|
Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
|
entailment
|
def handle_overrides(graph, overrides):
"""
Handle any overrides for this model configuration.
Parameters
----------
graph : dict or object
A dictionary (or an ObjectProxy) containing the object graph
loaded from a YAML file.
overrides : dict
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
"""
for key in overrides:
levels = key.split('.')
part = graph
for lvl in levels[:-1]:
try:
part = part[lvl]
except KeyError:
raise KeyError("'%s' override failed at '%s'", (key, lvl))
try:
part[levels[-1]] = overrides[key]
except KeyError:
raise KeyError("'%s' override failed at '%s'", (key, levels[-1]))
|
Handle any overrides for this model configuration.
Parameters
----------
graph : dict or object
A dictionary (or an ObjectProxy) containing the object graph
loaded from a YAML file.
overrides : dict
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
|
entailment
|
def instantiate_all(graph):
"""
Instantiate all ObjectProxy objects in a nested hierarchy.
Parameters
----------
graph : dict or object
A dictionary (or an ObjectProxy) containing the object graph
loaded from a YAML file.
Returns
-------
graph : dict or object
The dictionary or object resulting after the recursive instantiation.
"""
def should_instantiate(obj):
classes = [ObjectProxy, dict, list]
return True in [isinstance(obj, cls) for cls in classes]
if not isinstance(graph, list):
for key in graph:
if should_instantiate(graph[key]):
graph[key] = instantiate_all(graph[key])
if hasattr(graph, 'keys'):
for key in graph.keys():
if should_instantiate(key):
new_key = instantiate_all(key)
graph[new_key] = graph[key]
del graph[key]
if isinstance(graph, ObjectProxy):
graph = graph.instantiate()
if isinstance(graph, list):
for i, elem in enumerate(graph):
if should_instantiate(elem):
graph[i] = instantiate_all(elem)
return graph
|
Instantiate all ObjectProxy objects in a nested hierarchy.
Parameters
----------
graph : dict or object
A dictionary (or an ObjectProxy) containing the object graph
loaded from a YAML file.
Returns
-------
graph : dict or object
The dictionary or object resulting after the recursive instantiation.
|
entailment
|
def multi_constructor(loader, tag_suffix, node):
"""
Constructor function passed to PyYAML telling it how to construct
objects from argument descriptions. See PyYAML documentation for
details on the call signature.
"""
yaml_src = yaml.serialize(node)
mapping = loader.construct_mapping(node)
if '.' not in tag_suffix:
classname = tag_suffix
rval = ObjectProxy(classname, mapping, yaml_src)
else:
classname = try_to_import(tag_suffix)
rval = ObjectProxy(classname, mapping, yaml_src)
return rval
|
Constructor function passed to PyYAML telling it how to construct
objects from argument descriptions. See PyYAML documentation for
details on the call signature.
|
entailment
|
def multi_constructor_pkl(loader, tag_suffix, node):
"""
Constructor function passed to PyYAML telling it how to load
objects from paths to .pkl files. See PyYAML documentation for
details on the call signature.
"""
mapping = loader.construct_yaml_str(node)
if tag_suffix != "" and tag_suffix != u"":
raise AssertionError('Expected tag_suffix to be "" but it is "'+tag_suffix+'"')
rval = ObjectProxy(None, {}, yaml.serialize(node))
rval.instance = serial.load(mapping)
return rval
|
Constructor function passed to PyYAML telling it how to load
objects from paths to .pkl files. See PyYAML documentation for
details on the call signature.
|
entailment
|
def initialize():
"""
Initialize the configuration system by installing YAML handlers.
Automatically done on first call to load() specified in this file.
"""
global is_initialized
# Add the custom multi-constructor
yaml.add_multi_constructor('!obj:', multi_constructor)
yaml.add_multi_constructor('!pkl:', multi_constructor_pkl)
yaml.add_multi_constructor('!import:', multi_constructor_import)
yaml.add_multi_constructor('!include:', multi_constructor_include)
def import_constructor(loader, node):
value = loader.construct_scalar(node)
return try_to_import(value)
yaml.add_constructor('!import', import_constructor)
yaml.add_implicit_resolver(
'!import',
re.compile(r'(?:[a-zA-Z_][\w_]+\.)+[a-zA-Z_][\w_]+')
)
is_initialized = True
|
Initialize the configuration system by installing YAML handlers.
Automatically done on first call to load() specified in this file.
|
entailment
|
def instantiate(self):
"""
Instantiate this object with the supplied parameters in `self.kwds`,
or if already instantiated, return the cached instance.
"""
if self.instance is None:
self.instance = checked_call(self.cls, self.kwds)
#endif
try:
self.instance.yaml_src = self.yaml_src
except AttributeError:
pass
return self.instance
|
Instantiate this object with the supplied parameters in `self.kwds`,
or if already instantiated, return the cached instance.
|
entailment
|
def feed_forward(self, input_data, prediction=False):
"""Propagate forward through the layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : ``GPUArray``
The activations of the output units.
"""
if input_data.shape[1] != self.W.shape[0]:
raise ValueError('Number of outputs from previous layer (%d) '
'does not match number of inputs to this layer (%d)' %
(input_data.shape[1], self.W.shape[0]))
activations = linalg.dot(input_data, self.W)
activations = add_vec_to_mat(activations, self.b, inplace=True)
return activations
|
Propagate forward through the layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : ``GPUArray``
The activations of the output units.
|
entailment
|
def fasper(x, y, ofac, hifac, n_threads, MACC=4):
"""
Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies)
"""
#Check dimensions of input arrays
n = long(len(x))
if n != len(y):
print('Incompatible arrays.')
return
#print x, y, hifac, ofac
nout = int(0.5*ofac*hifac*n)
nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power
nfreq = 64 # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2*nfreq
ndim = long(2*nfreq)
#Compute the mean, variance
ave = y.mean()
##sample variance because the divisor is N-1
var = ((y - y.mean())**2).sum()/(len(y) - 1)
# and range of the data.
xmin = x.min()
xmax = x.max()
xdif = xmax - xmin
#extrapolate the data into the workspaces
if is_pyfftw:
wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
else:
wk1 = zeros(ndim, dtype='complex')
wk2 = zeros(ndim, dtype='complex')
fac = ndim/(xdif*ofac)
fndim = ndim
ck = ((x - xmin)*fac) % fndim
ckk = (2.0*ck) % fndim
for j in range(0, n):
__spread__(y[j] - ave, wk1, ndim, ck[j], MACC)
__spread__(1.0, wk2, ndim, ckk[j], MACC)
#Take the Fast Fourier Transforms.
if is_pyfftw:
fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk1 = fft_wk1() * len(wk1)
fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk2 = fft_wk2() * len(wk2)
else:
wk1 = ifft(wk1)*len(wk1)
wk2 = ifft(wk2)*len(wk1)
wk1 = wk1[1:nout + 1]
wk2 = wk2[1:nout + 1]
rwk1 = wk1.real
iwk1 = wk1.imag
rwk2 = wk2.real
iwk2 = wk2.imag
df = 1.0/(xdif*ofac)
#Compute the Lomb value for each frequency
hypo2 = 2.0*abs(wk2)
hc2wt = rwk2/hypo2
hs2wt = iwk2/hypo2
cwt = sqrt(0.5 + hc2wt)
swt = sign(hs2wt)*(sqrt(0.5 - hc2wt))
den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2
cterm = (cwt*rwk1 + swt*iwk1)**2./den
sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den)
wk1 = df*(arange(nout, dtype='float') + 1.)
wk2 = (cterm + sterm)/(2.0*var)
pmax = wk2.max()
jmax = wk2.argmax()
#Significance estimation
#expy = exp(-wk2)
#effm = 2.0*(nout)/ofac
#sig = effm*expy
#ind = (sig > 0.01).nonzero()
#sig[ind] = 1.0-(1.0-expy[ind])**effm
#Estimate significance of largest peak value
expy = exp(-pmax)
effm = 2.0*(nout)/ofac
prob = effm*expy
if prob > 0.01:
prob = 1.0 - (1.0 - expy)**effm
return wk1, wk2, nout, jmax, prob
|
Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies)
|
entailment
|
def get_no_validate(self, key):
"""Return an item without validating the schema."""
x, env = self.get_thunk_env(key)
# Check if this is a Thunk that needs to be lazily evaluated before we
# return it.
if isinstance(x, framework.Thunk):
x = framework.eval(x, env)
return x
|
Return an item without validating the schema.
|
entailment
|
def env(self, current_scope):
"""Return an environment that will look up in current_scope for keys in
this tuple, and the parent env otherwise.
"""
return self.__env_cache.get(
current_scope.ident,
framework.Environment, current_scope,
names=self.keys(),
parent=framework.Environment({'self': current_scope}, parent=self.__parent_env))
|
Return an environment that will look up in current_scope for keys in
this tuple, and the parent env otherwise.
|
entailment
|
def get_thunk_env(self, k):
"""Return the thunk AND environment for validating it in for the given key.
There might be different envs in case the thunk comes from a different (composed) tuple. If the thunk needs its
environment bound on retrieval, that will be done here.
"""
if k not in self.__items:
raise exceptions.EvaluationError('Unknown key: %r in tuple %r' % (k, self))
x = self.__items[k]
env = self.env(self)
# Bind this to the tuple's parent environment
if isinstance(x, framework.BindableThunk):
return x.bind(self.__parent_env), env
return x, env
|
Return the thunk AND environment for validating it in for the given key.
There might be different envs in case the thunk comes from a different (composed) tuple. If the thunk needs its
environment bound on retrieval, that will be done here.
|
entailment
|
def attach_schema(self, schem):
"""Add a tuple schema to this object (externally imposed)"""
self.tuple_schema = schema.AndSchema.make(self.tuple_schema, schem)
|
Add a tuple schema to this object (externally imposed)
|
entailment
|
def get_schema_spec(self, key):
"""Return the evaluated schema expression from a subkey."""
member_node = self._ast_node.member.get(key, None)
if not member_node:
return schema.AnySchema()
s = framework.eval(member_node.member_schema, self.env(self))
if not isinstance(s, schema.Schema):
raise ValueError('Node %r with schema node %r should evaluate to Schema, got %r' % (member_node, member_node.member_schema, s))
return s
|
Return the evaluated schema expression from a subkey.
|
entailment
|
def get_required_fields(self):
"""Return the names of fields that are required according to the schema."""
return [m.name for m in self._ast_node.members if m.member_schema.required]
|
Return the names of fields that are required according to the schema.
|
entailment
|
def get_member_node(self, key):
"""Return the AST node for the given member, from the first tuple that serves it."""
for tup, _ in self.lookups:
if key in tup:
return tup.get_member_node(key)
raise RuntimeError('Key not found in composite tuple: %r' % key)
|
Return the AST node for the given member, from the first tuple that serves it.
|
entailment
|
def exportable_keys(self):
"""Return a list of keys that are exportable from this tuple.
Returns all keys that are not private in any of the tuples.
"""
keys = collections.defaultdict(list)
for tup in self._tuples:
for key, private in tup._keys_and_privacy().items():
keys[key].append(private)
return [k for k, ps in keys.items() if not any(ps)]
|
Return a list of keys that are exportable from this tuple.
Returns all keys that are not private in any of the tuples.
|
entailment
|
def resolve(self, current_file, rel_path):
"""Search the filesystem."""
search_path = [path.dirname(current_file)] + self.search_path
target_path = None
for search in search_path:
if self.exists(path.join(search, rel_path)):
target_path = path.normpath(path.join(search, rel_path))
break
if not target_path:
raise exceptions.EvaluationError('No such file: %r, searched %s' %
(rel_path, ':'.join(search_path)))
return target_path, path.abspath(target_path)
|
Search the filesystem.
|
entailment
|
def resolve(self, current_file, rel_path):
"""Search the filesystem."""
p = path.join(path.dirname(current_file), rel_path)
if p not in self.file_dict:
raise RuntimeError('No such fake file: %r' % p)
return p, p
|
Search the filesystem.
|
entailment
|
def check_call_arguments(to_call, kwargs):
"""
Check the call signature against a dictionary of proposed arguments,
raising an informative exception in the case of mismatch.
Parameters
----------
to_call : class or callable
Function or class to examine (in the case of classes, the
constructor call signature is analyzed)
kwargs : dict
Dictionary mapping parameter names (including positional
arguments) to proposed values.
"""
if 'self' in kwargs.keys():
raise TypeError("Your dictionary includes an entry for 'self', "
"which is just asking for trouble")
orig_to_call = getattr(to_call, '__name__', str(to_call))
if not isinstance(to_call, types.FunctionType):
if hasattr(to_call, '__init__'):
to_call = to_call.__init__
elif hasattr(to_call, '__call__'):
to_call = to_call.__call__
args, varargs, keywords, defaults = inspect.getargspec(to_call)
if any(not isinstance(arg, str) for arg in args):
raise TypeError('%s uses argument unpacking, which is deprecated and '
'unsupported by this pylearn2' % orig_to_call)
if varargs is not None:
raise TypeError('%s has a variable length argument list, but '
'this is not supported by config resolution' %
orig_to_call)
if keywords is None:
bad_keywords = [arg_name for arg_name in kwargs.keys()
if arg_name not in args]
if len(bad_keywords) > 0:
bad = ', '.join(bad_keywords)
args = [ arg for arg in args if arg != 'self' ]
if len(args) == 0:
matched_str = '(It does not support any keywords, actually)'
else:
matched = [ match(keyword, args) for keyword in bad_keywords ]
matched_str = 'Did you mean %s?' % (', '.join(matched))
raise TypeError('%s does not support the following '
'keywords: %s. %s' %
(orig_to_call, bad, matched_str))
if defaults is None:
num_defaults = 0
else:
num_defaults = len(defaults)
required = args[:len(args) - num_defaults]
missing = [arg for arg in required if arg not in kwargs]
if len(missing) > 0:
#iff the im_self (or __self__) field is present, this is a
# bound method, which has 'self' listed as an argument, but
# which should not be supplied by kwargs
is_bound = hasattr(to_call, 'im_self') or hasattr(to_call, '__self__')
if len(missing) > 1 or missing[0] != 'self' or not is_bound:
if 'self' in missing:
missing.remove('self')
missing = ', '.join([str(m) for m in missing])
raise TypeError('%s did not get these expected '
'arguments: %s' % (orig_to_call, missing))
|
Check the call signature against a dictionary of proposed arguments,
raising an informative exception in the case of mismatch.
Parameters
----------
to_call : class or callable
Function or class to examine (in the case of classes, the
constructor call signature is analyzed)
kwargs : dict
Dictionary mapping parameter names (including positional
arguments) to proposed values.
|
entailment
|
def predict(rf_model, features):
"""
Return label and probability estimated.
Parameters
----------
rf_model : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests model.
features : array_like
A list of features estimated by UPSILoN.
Returns
-------
label : str
A predicted label (i.e. class).
probability : float
Class probability.
flag : int
Classification flag.
"""
import numpy as np
from upsilon.extract_features.feature_set import get_feature_set
feature_set = get_feature_set()
# Grab only necessary features.
cols = [feature for feature in features if feature in feature_set]
cols = sorted(cols)
filtered_features = []
for i in range(len(cols)):
filtered_features.append(features[cols[i]])
filtered_features = np.array(filtered_features).reshape(1, -1)
# Classify.
classes = rf_model.classes_
# Note that we're classifying a single source, so [0] need tobe added.
probabilities = rf_model.predict_proba(filtered_features)[0]
# Classification flag.
flag = 0
if features['period_SNR'] < 20. or is_period_alias(features['period']):
flag = 1
# Return class, probability, and flag.
max_index = np.where(probabilities == np.max(probabilities))
return classes[max_index][0], probabilities[max_index][0], flag
|
Return label and probability estimated.
Parameters
----------
rf_model : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests model.
features : array_like
A list of features estimated by UPSILoN.
Returns
-------
label : str
A predicted label (i.e. class).
probability : float
Class probability.
flag : int
Classification flag.
|
entailment
|
def fmt(str, args=None, env=None):
"""fmt(string, [tuple]) -> string
Interpolate a string, replacing {patterns} with the variables with the same
name. If given a tuple, use the keys from the tuple to substitute. If not
given a tuple, uses the current environment as the variable source.
"""
# Normally, we'd just call str.format(**args), but we only want to evaluate
# values from the tuple which are actually used in the string interpolation,
# so we use proxy objects.
# If no args are given, we're able to take the current environment.
args = args or env
proxies = {k: StringInterpolationProxy(args, k) for k in args.keys()}
return str.format(**proxies)
|
fmt(string, [tuple]) -> string
Interpolate a string, replacing {patterns} with the variables with the same
name. If given a tuple, use the keys from the tuple to substitute. If not
given a tuple, uses the current environment as the variable source.
|
entailment
|
def compose_all(tups):
"""Compose all given tuples together."""
from . import ast # I weep for humanity
return functools.reduce(lambda x, y: x.compose(y), map(ast.make_tuple, tups), ast.make_tuple({}))
|
Compose all given tuples together.
|
entailment
|
def has_key(tup, key):
"""has(tuple, string) -> bool
Return whether a given tuple has a key and the key is bound.
"""
if isinstance(tup, framework.TupleLike):
return tup.is_bound(key)
if isinstance(tup, dict):
return key in tup
if isinstance(tup, list):
if not isinstance(key, int):
raise ValueError('Key must be integer when checking list index')
return key < len(tup)
raise ValueError('Not a tuple-like object: %r' % tup)
|
has(tuple, string) -> bool
Return whether a given tuple has a key and the key is bound.
|
entailment
|
def flatten(list_of_lists):
"""flatten([[A]]) -> [A]
Flatten a list of lists.
"""
ret = []
for lst in list_of_lists:
if not isinstance(lst, list):
raise ValueError('%r is not a list' % lst)
ret.extend(lst)
return ret
|
flatten([[A]]) -> [A]
Flatten a list of lists.
|
entailment
|
def extract_dark(prihdr, scihdu):
"""Extract superdark data from ``DARKFILE`` or ``DRKCFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
dark : ndarray or `None`
Superdark, if any. Subtract this to apply ``DARKCORR``.
"""
if prihdr.get('PCTECORR', 'OMIT') == 'COMPLETE':
darkfile = prihdr.get('DRKCFILE', 'N/A')
else:
darkfile = prihdr.get('DARKFILE', 'N/A')
if darkfile == 'N/A':
return None
darkfile = from_irafpath(darkfile)
ampstring = prihdr['CCDAMP']
# Calculate DARKTIME
exptime = prihdr.get('EXPTIME', 0.0)
flashdur = prihdr.get('FLASHDUR', 0.0)
darktime = exptime + flashdur
if exptime > 0: # Not BIAS
darktime += 3.0
with fits.open(darkfile) as hdudark:
if ampstring == 'ABCD':
dark = np.concatenate(
(hdudark['sci', 1].data,
hdudark['sci', 2].data[::-1, :]), axis=1)
elif ampstring in ('A', 'B', 'AB'):
dark = extract_ref(scihdu, hdudark['sci', 2])
else:
dark = extract_ref(scihdu, hdudark['sci', 1])
dark = dark * darktime
return dark
|
Extract superdark data from ``DARKFILE`` or ``DRKCFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
dark : ndarray or `None`
Superdark, if any. Subtract this to apply ``DARKCORR``.
|
entailment
|
def extract_flash(prihdr, scihdu):
"""Extract postflash data from ``FLSHFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
flash : ndarray or `None`
Postflash, if any. Subtract this to apply ``FLSHCORR``.
"""
flshfile = prihdr.get('FLSHFILE', 'N/A')
flashsta = prihdr.get('FLASHSTA', 'N/A')
flashdur = prihdr.get('FLASHDUR', 0.0)
if flshfile == 'N/A' or flashdur <= 0:
return None
if flashsta != 'SUCCESSFUL':
warnings.warn('Flash status is {0}'.format(flashsta),
AstropyUserWarning)
flshfile = from_irafpath(flshfile)
ampstring = prihdr['CCDAMP']
with fits.open(flshfile) as hduflash:
if ampstring == 'ABCD':
flash = np.concatenate(
(hduflash['sci', 1].data,
hduflash['sci', 2].data[::-1, :]), axis=1)
elif ampstring in ('A', 'B', 'AB'):
flash = extract_ref(scihdu, hduflash['sci', 2])
else:
flash = extract_ref(scihdu, hduflash['sci', 1])
flash = flash * flashdur
return flash
|
Extract postflash data from ``FLSHFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
flash : ndarray or `None`
Postflash, if any. Subtract this to apply ``FLSHCORR``.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.