|
| 1 | +#pragma once |
| 2 | + |
| 3 | +#ifdef USE_ROCM |
| 4 | +//////////////////////////////////////// |
| 5 | +// For compatibility with CUDA and ROCm |
| 6 | +//////////////////////////////////////// |
| 7 | + #include <hip/hip_runtime_api.h> |
| 8 | + |
| 9 | +extern "C" { |
| 10 | + #ifndef CUDA_SUCCESS |
| 11 | + #define CUDA_SUCCESS hipSuccess |
| 12 | + #endif // CUDA_SUCCESS |
| 13 | + |
| 14 | +// https://rocm.docs.amd.com/projects/HIPIFY/en/latest/tables/CUDA_Driver_API_functions_supported_by_HIP.html |
| 15 | +typedef unsigned long long CUdevice; |
| 16 | +typedef hipDeviceptr_t CUdeviceptr; |
| 17 | +typedef hipError_t CUresult; |
| 18 | +typedef hipCtx_t CUcontext; |
| 19 | +typedef hipStream_t CUstream; |
| 20 | +typedef hipMemGenericAllocationHandle_t CUmemGenericAllocationHandle; |
| 21 | +typedef hipMemAllocationGranularity_flags CUmemAllocationGranularity_flags; |
| 22 | +typedef hipMemAllocationProp CUmemAllocationProp; |
| 23 | +typedef hipMemAccessDesc CUmemAccessDesc; |
| 24 | + |
| 25 | + #define CU_MEM_ALLOCATION_TYPE_PINNED hipMemAllocationTypePinned |
| 26 | + #define CU_MEM_LOCATION_TYPE_DEVICE hipMemLocationTypeDevice |
| 27 | + #define CU_MEM_ACCESS_FLAGS_PROT_READWRITE hipMemAccessFlagsProtReadWrite |
| 28 | + #define CU_MEM_ALLOC_GRANULARITY_MINIMUM hipMemAllocationGranularityMinimum |
| 29 | + |
| 30 | + // https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TYPES.html |
| 31 | + #define CU_MEM_ALLOCATION_COMP_NONE 0x0 |
| 32 | + |
| 33 | +// Error Handling |
| 34 | +// https://docs.nvidia.com/cuda/archive/11.4.4/cuda-driver-api/group__CUDA__ERROR.html |
| 35 | +CUresult cuGetErrorString(CUresult hipError, const char** pStr) { |
| 36 | + *pStr = hipGetErrorString(hipError); |
| 37 | + return CUDA_SUCCESS; |
| 38 | +} |
| 39 | + |
| 40 | +// Context Management |
| 41 | +// https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__CTX.html |
| 42 | +CUresult cuCtxGetCurrent(CUcontext* ctx) { |
| 43 | + // This API is deprecated on the AMD platform, only for equivalent cuCtx |
| 44 | + // driver API on the NVIDIA platform. |
| 45 | + return hipCtxGetCurrent(ctx); |
| 46 | +} |
| 47 | + |
| 48 | +CUresult cuCtxSetCurrent(CUcontext ctx) { |
| 49 | + // This API is deprecated on the AMD platform, only for equivalent cuCtx |
| 50 | + // driver API on the NVIDIA platform. |
| 51 | + return hipCtxSetCurrent(ctx); |
| 52 | +} |
| 53 | + |
| 54 | +// Primary Context Management |
| 55 | +// https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__PRIMARY__CTX.html |
| 56 | +CUresult cuDevicePrimaryCtxRetain(CUcontext* ctx, CUdevice dev) { |
| 57 | + return hipDevicePrimaryCtxRetain(ctx, dev); |
| 58 | +} |
| 59 | + |
| 60 | +// Virtual Memory Management |
| 61 | +// https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__VA.html |
| 62 | +CUresult cuMemAddressFree(CUdeviceptr ptr, size_t size) { |
| 63 | + return hipMemAddressFree(ptr, size); |
| 64 | +} |
| 65 | + |
| 66 | +CUresult cuMemAddressReserve(CUdeviceptr* ptr, size_t size, size_t alignment, |
| 67 | + CUdeviceptr addr, unsigned long long flags) { |
| 68 | + return hipMemAddressReserve(ptr, size, alignment, addr, flags); |
| 69 | +} |
| 70 | + |
| 71 | +CUresult cuMemCreate(CUmemGenericAllocationHandle* handle, size_t size, |
| 72 | + const CUmemAllocationProp* prop, |
| 73 | + unsigned long long flags) { |
| 74 | + return hipMemCreate(handle, size, prop, flags); |
| 75 | +} |
| 76 | + |
| 77 | +CUresult cuMemGetAllocationGranularity( |
| 78 | + size_t* granularity, const CUmemAllocationProp* prop, |
| 79 | + CUmemAllocationGranularity_flags option) { |
| 80 | + return hipMemGetAllocationGranularity(granularity, prop, option); |
| 81 | +} |
| 82 | + |
| 83 | +CUresult cuMemMap(CUdeviceptr dptr, size_t size, size_t offset, |
| 84 | + CUmemGenericAllocationHandle handle, |
| 85 | + unsigned long long flags) { |
| 86 | + return hipMemMap(dptr, size, offset, handle, flags); |
| 87 | +} |
| 88 | + |
| 89 | +CUresult cuMemRelease(CUmemGenericAllocationHandle handle) { |
| 90 | + return hipMemRelease(handle); |
| 91 | +} |
| 92 | + |
| 93 | +CUresult cuMemSetAccess(CUdeviceptr ptr, size_t size, |
| 94 | + const CUmemAccessDesc* desc, size_t count) { |
| 95 | + return hipMemSetAccess(ptr, size, desc, count); |
| 96 | +} |
| 97 | + |
| 98 | +CUresult cuMemUnmap(CUdeviceptr ptr, size_t size) { |
| 99 | + return hipMemUnmap(ptr, size); |
| 100 | +} |
| 101 | +} // extern "C" |
| 102 | + |
| 103 | +#else |
| 104 | +//////////////////////////////////////// |
| 105 | +// Import CUDA headers for NVIDIA GPUs |
| 106 | +//////////////////////////////////////// |
| 107 | + #include <cuda_runtime_api.h> |
| 108 | + #include <cuda.h> |
| 109 | +#endif |
0 commit comments