hip stringlengths 140 3.32k | cuda stringlengths 84 3.33k |
|---|---|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/hip\HIPContext.h>
#include <hip/hip_runtime.h>
namespace at { namespace hip {
/**
Computes ceil(a / b)
*/
template <typename T>
__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
return (a + b - 1) / b;
}
namespace {
// Thre... |
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
namespace at { namespace cuda {
/**
Computes ceil(a / b)
*/
template <typename T>
__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
return (a + b - 1) / b;
}
namespace {
// Threads per block for our apply kernel
// FIXME: use occupancy ... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/hip\HIPContext.h>
#include <ATen/native/Repeat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>... |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/Repeat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/repeat_interleave_native.h>
#endif
template <typename index... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#in... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#in... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/nativ... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
name... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/nativ... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
name... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/nativ... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/nativ... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
name... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace at::native {
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {
int64_t ndim... |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace at::native {
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {
int64_t ndim = self.dim();
// sort the strides in descending order ac... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/sspaddmm_native.h>
#endif
namespace at::native {
// sparse, sp... |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/sspaddmm_native.h>
#endif
namespace at::native {
// sparse, sparse, sparse, dense, real, real -> sparse
Tensor& _sspaddmm... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#in... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native... |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/TensorBase.h>
#include <ATen/hip/detail\TensorInfo.cuh>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at {
namespace hip {
namespace detail {
TORCH_HIP_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
... |
#pragma once
#include <ATen/core/TensorBase.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at {
namespace cuda {
namespace detail {
TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
using at::native::canUse32BitIndexMath;
template <typen... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/util/BFloat16-math.h>
// NOTE: HIP... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/util/BFloat16-math.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a ... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
#pragma once
#include <assert.h>
#if defined(__HIP_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
#include <hip/hip_runtime.h>
#endif
namespace at {
namespace hip {
namespace detail {
template <typename Value>
struct DivMod {
Value div, mod;
C10_HOST_DEVICE DivMod(Value div, ... |
#pragma once
#include <assert.h>
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
#include <cuda_runtime.h>
#endif
namespace at {
namespace cuda {
namespace detail {
template <typename Value>
struct DivMod {
Value div, mod;
C10_HOST_DEVICE DivMod(Value div, Va... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#incl... |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace internal {
template <typename scalar_t>
std::tuple<Tensor, Tensor, Tensor> unique_hip_template(
const Tensor& self,
const bool consecutive,
const bool return_inverse,
c... |
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace internal {
template <typename scalar_t>
std::tuple<Tensor, Tensor, Tensor> unique_cuda_template(
const Tensor& self,
const bool consecutive,
const bool return_inverse,
const bool return_counts);
} // namespace internal
} // na... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/ValidateCompressedIndicesCommon.h>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
template <typename func_t>
struct HIPKernelLauncher {
static void launch(Ten... |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/ValidateCompressedIndicesCommon.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
template <typename func_t>
struct CUDAKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
... |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
namespace at {
namespace native {
Tensor& relu_quantized_hip_(Tensor& self) {
const auto zero_point = self.q_zero_point();
AT_DISPATCH_QINT_TYPES(
sel... |
#include <ATen/ATen.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at {
namespace native {
Tensor& relu_quantized_cuda_(Tensor& self) {
const auto zero_point = self.q_zero_point();
AT_DISPATCH_QINT_TYPES(
self.scalar_type(), "qrelu_cuda", [&]() {
auto iter = ... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunction... |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunction... |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_empty_affine_quantized.h>
#... |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/hip/detail\TensorInfo.cuh>
#include <c10/macros/Macros.h>
namespace at {
class Tensor;
}
namespace c10 {
class Scalar;
}
namespace at { namespace native {
void s_addmm_out_sparse_dense_hip_worker(int64_t nnz, int64_t m, int64_t ... |
#pragma once
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <c10/macros/Macros.h>
namespace at {
class Tensor;
}
namespace c10 {
class Scalar;
}
namespace at { namespace native {
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tenso... |
// !!! This is a file automatically generated by hipify!!!
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/hip\HIPGeneratorImpl.h>, which has a #pragma once.
// Stores RNG state... |
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/cuda/CUDAGeneratorImpl.h>, which has a #pragma once.
// Stores RNG state values. Passed as a kernel argument.
// See Note [CUDA G... |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, hipStre... |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(para... |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, hipStrea... |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(param... |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, hipStrea... |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(param... |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim128(Launch_params<FMHA_fprop_params> &... |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim128(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_b... |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &l... |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf... |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim64(Launch_params<FMHA_fprop_params> &l... |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim64(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
#pragma once
#include <ATen/CollapseDims.h>
namespace at {
namespace hip {
namespace detail {
#define MAX_TENSORINFO_DIMS 25
template <typename T, typename IndexType>
struct TensorInfo {
TensorInfo();
TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);
void reduc... |
#pragma once
#include <ATen/CollapseDims.h>
namespace at {
namespace cuda {
namespace detail {
#define MAX_TENSORINFO_DIMS 25
template <typename T, typename IndexType>
struct TensorInfo {
TensorInfo();
TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);
void reduc... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This fil... |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/m... |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 6... |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)... |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64,... |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
f... |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64... |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
f... |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, ... |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmh... |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64... |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
f... |
// !!! This is a file automatically generated by hipify!!!
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/hip\HIPGraphsUtils.cuh>, which has a #pragma once.
namespace at {
name... |
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/cuda/CUDAGraphsUtils.cuh>, which has a #pragma once.
namespace at {
namespace cuda {
namespace philox {
// In-kernel call to ret... |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, ... |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmh... |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false,... |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kMinBlocksPe... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <c10/hip/HIPException.h>
int safeDeviceCount() {
int count;
hipError_t err = hipGetDeviceCount(&count);
if (err == hipErrorInsufficientDriver || err == hipErrorNoDevice) {
return 0;
}
... |
#include <gtest/gtest.h>
#include <c10/cuda/CUDAException.h>
int safeDeviceCount() {
int count;
cudaError_t err = cudaGetDeviceCount(&count);
if (err == cudaErrorInsufficientDriver || err == cudaErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { ... |
// !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/hip\HIPContext.h>
#include <c10/util/Optional.h>
#include <assert.h>
using namespace at;
// optional in cuda files
TEST(OptionalTest, OptionalTestHIP) {
if (!at::cuda::is_available()) return;... |
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Optional.h>
#include <assert.h>
using namespace at;
// optional in cuda files
TEST(OptionalTest, OptionalTestCUDA) {
if (!at::cuda::is_available()) return;
c10::optional<int64_t> trivially_destructible;
c10:... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/hip\HIPContext.h>
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(
PackedTensor... |
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(
PackedTensorAccessor64<float, 1, RestrictPtrTraits> resa,
PackedTensorAccessor64<float, 2, Res... |
// !!! This is a file automatically generated by hipify!!!
#pragma once
// TODO: Remove once torchvision has been updated to use the ATen header
#include <ATen/hip\Atomic.cuh>
### |
#pragma once
// TODO: Remove once torchvision has been updated to use the ATen header
#include <ATen/cuda/Atomic.cuh>
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
// TODO: Remove this header
#include <ATen/hip\DeviceUtils.cuh>
### |
#pragma once
// TODO: Remove this header
#include <ATen/cuda/DeviceUtils.cuh>
### |
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.o... |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable... |
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.o... |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable... |
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr... |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
void did_not_fail_di... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace... |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at::native {
template<typename scalar_t>
struct AbsFunc... |
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr... |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
__global__ void cud... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#i... |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
/**
* Device ke... |
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr... |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_asser... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#i... |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_a... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#i... |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_a... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/contrib/aten/aten_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(ATen, ATenOp<HIPContext>);
template<>
at::Backend ATenOp<HIPContext>::backend() const {
return at::Backend::HIP;
}
}
### |
#include "caffe2/contrib/aten/aten_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(ATen, ATenOp<CUDAContext>);
template<>
at::Backend ATenOp<CUDAContext>::backend() const {
return at::Backend::CUDA;
}
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/contrib/gloo/broadcast_ops.h"
#include "caffe2/core/hip/context_gpu.h"
#include <gloo/hip_broadcast_one_to_all.h>
namespace caffe2 {
namespace gloo {
template <class Context>
void BroadcastOp<Context>::initializeAlgorithm() {
if (init_.t... |
#include "caffe2/contrib/gloo/broadcast_ops.h"
#include "caffe2/core/context_gpu.h"
#include <gloo/cuda_broadcast_one_to_all.h>
namespace caffe2 {
namespace gloo {
template <class Context>
void BroadcastOp<Context>::initializeAlgorithm() {
if (init_.template IsType<float>()) {
algorithm_.reset(new ::gloo::Cu... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/contrib/gloo/common_world_ops.h"
#include "caffe2/core/hip/context_gpu.h"
#include <gloo/hip.h>
#include <gloo/transport/tcp/device.h>
namespace caffe2 {
namespace gloo {
template <>
void CreateCommonWorld<HIPContext>::initializeForContext... |
#include "caffe2/contrib/gloo/common_world_ops.h"
#include "caffe2/core/context_gpu.h"
#include <gloo/cuda.h>
#include <gloo/transport/tcp/device.h>
namespace caffe2 {
namespace gloo {
template <>
void CreateCommonWorld<CUDAContext>::initializeForContext() {
static std::once_flag once;
std::call_once(once, [&]... |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <cstddef>
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/logging.h"
#include <rccl.h>
#include <unordered_map>
#define NCCL_VERSION_MIN(major, minor, patch) \
((NCCL_MAJOR > ma... |
#pragma once
#include <cstddef>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/logging.h"
#include <nccl.h>
#include <unordered_map>
#define NCCL_VERSION_MIN(major, minor, patch) \
((NCCL_MAJOR > major) || \
((NCCL_MAJOR == major) && ... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
REGISTER_BLOB_DESERIALIZER(TensorHIP, TensorDeserializer);
}
} // namespace caffe2
### |
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
REGISTER_BLOB_DESERIALIZER(TensorCUDA, TensorDeserializer);
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scal... |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/c... |
// !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include "caffe2/core/context.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/event.h"
namespace caffe2 {
TEST(EventHIPTest, EventBasics) {
if (!HasHipGPU())
return;
DeviceOption device_cpu;
device_cpu.set... |
#include <gtest/gtest.h>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/event.h"
namespace caffe2 {
TEST(EventCUDATest, EventBasics) {
if (!HasCudaGPU())
return;
DeviceOption device_cpu;
device_cpu.set_device_type(PROTO_CPU);
DeviceOption device_cuda;
device... |
// !!! This is a file automatically generated by hipify!!!
#include <string>
#include <gtest/gtest.h>
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
class JustTest : public OperatorBase {
public:
using OperatorBase::OperatorBase;
bool Run(int /* unused */ /*stream_... |
#include <string>
#include <gtest/gtest.h>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
class JustTest : public OperatorBase {
public:
using OperatorBase::OperatorBase;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
virtual std::string typ... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/db/create_db_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(CreateDB, CreateDBOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/db/create_db_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(CreateDB, CreateDBOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/distributed/file_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/hip/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_HIP_OPERATOR(
FileStoreHandler... |
#include "caffe2/distributed/file_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_CUDA_OPERATOR(
FileStoreHandlerCreate,
FileStoreHandlerCreateOp<CUDAContext>);
#else
REGI... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/distributed/redis_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/hip/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_HIP_OPERATOR(
RedisStoreHandl... |
#include "caffe2/distributed/redis_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_CUDA_OPERATOR(
RedisStoreHandlerCreate,
RedisStoreHandlerCreateOp<CUDAContext>);
#else
R... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/image/image_input_op.h"
namespace caffe2 {
template <>
bool ImageInputOp<HIPContext>::ApplyTransformOnGPU(
const std::vector<std::int64_t>& dims,
const ... |
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/image/image_input_op.h"
namespace caffe2 {
template <>
bool ImageInputOp<CUDAContext>::ApplyTransformOnGPU(
const std::vector<std::int64_t>& dims,
const c10::Device& type) {
// GPU transform kernel allows explicitly s... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license... |
#include "caffe2/core/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license; see accompanying LICENSE file
*
**/
namespace caffe2 {
namespace {
// input in (int8,... |
// !!! This is a file automatically generated by hipify!!!
#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_
#define CAFFE2_IMAGE_TRANSFORM_GPU_H_
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided ... |
#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_
#define CAFFE2_IMAGE_TRANSFORM_GPU_H_
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistribut... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/abs_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AbsGradientHIPKernel(const int N, co... |
#include "caffe2/operators/abs_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AbsGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/accumulate_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Accumulate, AccumulateOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/accumulate_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Accumulate, AccumulateOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scal... |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/c... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/accuracy_op.h"
#include "caffe2/utils/hip/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <hipcub/hipcub.hpp>
n... |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/accuracy_op.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <cub/block/block_reduce.cuh>
namespace caffe2 {
namespace {
__global__ void AccuracyKernel(
const int N,
con... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/acos_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AcosGradientHIPKernel(
const int N,
const float* ... |
#include "caffe2/operators/acos_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AcosGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >=... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/alias_with_name.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(AliasWithName, AliasWithNameOp<HIPContext>);
} // namespace caffe2
C10_EXPORT_CAFFE2_OP_TO_C10_HIP(
AliasWithName,
caffe... |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/alias_with_name.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(AliasWithName, AliasWithNameOp<CUDAContext>);
} // namespace caffe2
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(
AliasWithName,
caffe2::AliasWithNameOp<caffe2::CUDAContext>);
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/asin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AsinGradientHIPKernel(
const int N,
const float* ... |
#include "caffe2/operators/asin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AsinGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >=... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/assert_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Assert, AssertOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/assert_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Assert, AssertOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/async_net_barrier_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(AsyncNetBarrier, AsyncNetBarrierOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/async_net_barrier_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(AsyncNetBarrier, AsyncNetBarrierOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/atan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AtanGradientHIPKernel(const int N, ... |
#include "caffe2/operators/atan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AtanGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
... |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/batch_matmul_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
template <>
bool BatchMatMulOp<HIPContext, DefaultEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0)... |
#include "caffe2/operators/batch_matmul_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <>
bool BatchMatMulOp<CUDAContext, DefaultEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR(BatchMatMul, BatchMatMulOp<CUDACo... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
c... |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
const int M,
const float* bounds,
const float* X,
int32_t* out) {
CUDA_1D_KERNE... |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/cbrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CbrtGradientHIPKernel(const int N, ... |
#include "caffe2/operators/cbrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CbrtGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
... |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scal... |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/c... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.