Files

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

74 lines
2.4 KiB
C++
Raw Permalink Normal View History

[Offload][CUDA] Allow CUDA kernels to use LLVM/Offload (#94549) Through the new `-foffload-via-llvm` flag, CUDA kernels can now be lowered to the LLVM/Offload API. On the Clang side, this is simply done by using the OpenMP offload toolchain and emitting calls to `llvm*` functions to orchestrate the kernel launch rather than `cuda*` functions. These `llvm*` functions are implemented on top of the existing LLVM/Offload API. As we are about to redefine the Offload API, this wil help us in the design process as a second offload language. We do not support any CUDA APIs yet, however, we could: https://www.osti.gov/servlets/purl/1892137 For proper host execution we need to resurrect/rebase https://tianshilei.me/wp-content/uploads/2021/12/llpp-2021.pdf (which was designed for debugging). ``` ❯❯❯ cat test.cu extern "C" { void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum); void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum); } __global__ void square(int *A) { *A = 42; } int main(int argc, char **argv) { int DevNo = 0; int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo)); *Ptr = 7; printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr); square<<<1, 1>>>(Ptr); printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr); llvm_omp_target_free_shared(Ptr, DevNo); } ❯❯❯ clang++ test.cu -O3 -o test123 -foffload-via-llvm --offload-arch=native ❯❯❯ llvm-objdump --offloading test123 test123: file format elf64-x86-64 OFFLOADING IMAGE [0]: kind elf arch gfx90a triple amdgcn-amd-amdhsa producer openmp ❯❯❯ LIBOMPTARGET_INFO=16 ./test123 Ptr 0x155448ac8000, *Ptr 7 Ptr 0x155448ac8000, *Ptr 42 ```
2024-08-12 17:44:58 -07:00
//===------ API.cpp - Kernel Language (CUDA/HIP) entry points ----- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
#include "Shared/APITypes.h"
#include <cstdio>
struct dim3 {
unsigned x = 0, y = 0, z = 0;
};
struct __omp_kernel_t {
dim3 __grid_size;
dim3 __block_size;
size_t __shared_memory;
void *__stream;
};
static __omp_kernel_t __current_kernel = {};
#pragma omp threadprivate(__current_kernel);
extern "C" {
// TODO: There is little reason we need to keep these names or the way calls are
// issued. For now we do to avoid modifying Clang's CUDA codegen. Unclear when
// we actually need to push/pop configurations.
unsigned __llvmPushCallConfiguration(dim3 __grid_size, dim3 __block_size,
size_t __shared_memory, void *__stream) {
__omp_kernel_t &__kernel = __current_kernel;
__kernel.__grid_size = __grid_size;
__kernel.__block_size = __block_size;
__kernel.__shared_memory = __shared_memory;
__kernel.__stream = __stream;
return 0;
}
unsigned __llvmPopCallConfiguration(dim3 *__grid_size, dim3 *__block_size,
size_t *__shared_memory, void *__stream) {
__omp_kernel_t &__kernel = __current_kernel;
*__grid_size = __kernel.__grid_size;
*__block_size = __kernel.__block_size;
*__shared_memory = __kernel.__shared_memory;
*((void **)__stream) = __kernel.__stream;
return 0;
}
int __tgt_target_kernel(void *Loc, int64_t DeviceId, int32_t NumTeams,
int32_t ThreadLimit, const void *HostPtr,
KernelArgsTy *Args);
unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
void *args, size_t sharedMem, void *stream) {
KernelArgsTy Args = {};
Args.DynCGroupMem = sharedMem;
Args.NumTeams[0] = gridDim.x;
Args.NumTeams[1] = gridDim.y;
Args.NumTeams[2] = gridDim.z;
Args.ThreadLimit[0] = blockDim.x;
Args.ThreadLimit[1] = blockDim.y;
Args.ThreadLimit[2] = blockDim.z;
Args.ArgPtrs = reinterpret_cast<void **>(args);
Args.Flags.IsCUDA = true;
return __tgt_target_kernel(nullptr, 0, gridDim.x, blockDim.x, func, &Args);
}
}