本文分享自華為雲社群《Ascend C 自定義運算元 Kernel Launch呼叫入門》,作者: jackwangcumt。
1 Kernel Launch概述
根據官方說明文件的介紹,Ascend C對外開放核函式的基礎呼叫(Kernel Launch)方式,是為了簡化Ascend C 自定義運算元的開發流程,提供更易用的除錯調優功能。當開發者完成運算元核函式的開發和Tiling實現後,即可透過AscendCL執行時介面,完成運算元的呼叫並實現自己的推理應用;同時提供簡易的kernel開發工程,開發者僅需提供kernel側實現,基於工程框架可以快速實現Kernel Launch。本文實驗前提是完成了《Ascend C 自定義PRelu運算元》博文的相關運算元開發工程。網址為:https://bbs.huaweicloud.com/blogs/425244 。請注意:
- 8.0.RC1.alpha002 當前版本,Kernel Launch開放式程式設計為試用特性,不支援應用於商用產品中。
- 8.0.RC1.alpha002 當前版本暫不支援獲取使用者workspace特性。
2 Kernel Launch呼叫方式
ACLRT_LAUNCH_KERNEL呼叫方式對核心呼叫符方式進行了功能加強,核函式的呼叫是非同步的,呼叫介面的使用方法如下:
ACLRT_LAUNCH_KERNEL(kernel_name)(blockDim, stream, argument list);
- kernel_name:運算元核函式的名稱。
- blockDim:規定了核函式將會在幾個核上執行。每個執行該核函式的核會被分配一個邏輯ID,即block_idx,可以在核函式的實現中呼叫GetBlockIdx來獲取block_idx。
- stream,型別為aclrtStream,stream用於維護一些非同步操作的執行順序,確保按照應用程式中的程式碼呼叫順序在Device上執行。
- argument list:引數列表,與核函式的引數列表保持一致。
為幫助開發者快速的完成運算元的Kernel Launch除錯,官方提供了簡易的運算元工程,我們可以基於該運算元工程中的樣例程式碼和工程框架進行運算元開發。運算元工程支援的如下:
- 該工程支援除錯功能,如PRINTF功能、DumpTensor。
- 工程編譯生成的應用程式,可透過msprof命令列方式採集和解析效能資料。
可以參考工程樣例:https://gitee.com/ascend/samples/blob/master/operator/AddCustomSample/KernelLaunch/AddKernelInvocationTilingNeo ,其目錄結構如下所示:
AddKernelInvocationNeo |-- cmake // CMake編譯檔案 |-- scripts | ├── gen_data.py // 輸入資料和真值資料生成指令碼檔案 | ├── verify_result.py // 驗證輸出資料和真值資料是否一致的驗證指令碼 |-- CMakeLists.txt // CMake編譯配置檔案 |-- add_custom.cpp // 向量運算元kernel實現 |-- data_utils.h // 資料讀入寫出函式 |-- main.cpp // 主函式,呼叫運算元的應用程式,含CPU域及NPU域呼叫 |-- run.sh // 編譯執行運算元的指令碼
基於該運算元工程,開發者進行運算元開發的步驟如下:
- 完成運算元kernel側實現。
- 編寫運算元呼叫應用程式main.cpp。
-
編寫CMake編譯配置檔案CMakeLists.txt。
- 根據實際需要修改輸入資料和真值資料生成指令碼檔案gen_data.py和驗證輸出資料和真值資料是否一致的驗證指令碼verify_result.py。
- 根據實際需要修改編譯執行運算元的指令碼run.sh並執行該指令碼,完成運算元的編譯執行和結果驗證。
3 Kernel Launch實現
在PReluSample目錄下新建一個目錄KernelLaunch,用於存放Kernel Launch呼叫方式的工程程式碼,我這裡參考官方的https://gitee.com/ascend/samples/tree/master/operator/LeakyReluCustomSample/KernelLaunch/
LeakyReluKernelInvocation樣例工程,並修改了相關引數,p_relu_custom.cpp 程式碼如下所示:
#include "kernel_operator.h" using namespace AscendC; constexpr int32_t BUFFER_NUM = 2; constexpr int32_t TOTAL_LENGTH = 8 * 200 * 1024; constexpr int32_t TILE_NUM = 32; constexpr float alpha = 0.002; class KernelPRelu { public: __aicore__ inline KernelPRelu() {} __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, uint32_t totalLength, uint32_t tileNum, float alpha) { PRINTF("[npu debug] >>> GetBlockNum() %d", GetBlockNum()); ASSERT(GetBlockNum() != 0 && "block dim can not be zero!"); this->blockLength = totalLength / GetBlockNum(); this->tileNum = tileNum; this->alpha = static_cast<float>(alpha); ASSERT(tileNum != 0 && "tile num can not be zero!"); this->tileLength = this->blockLength / tileNum / BUFFER_NUM; // get start index for current core, core parallel xGm.SetGlobalBuffer((__gm__ float*)x + this->blockLength * GetBlockIdx(), this->blockLength); yGm.SetGlobalBuffer((__gm__ float*)y + this->blockLength * GetBlockIdx(), this->blockLength); // pipe alloc memory to queue, the unit is Bytes pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileLength * sizeof(float)); pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(float)); pipe.InitBuffer(tmpBuffer1, this->tileLength * sizeof(float)); //pipe.InitBuffer(tmpBuffer2, this->tileLength * sizeof(float)); } __aicore__ inline void Process() { // loop count need to be doubled, due to double buffer int32_t loopCount = this->tileNum * BUFFER_NUM; // tiling strategy, pipeline parallel for (int32_t i = 0; i < loopCount; i++) { CopyIn(i); Compute(i); CopyOut(i); } } private: __aicore__ inline void CopyIn(int32_t progress) { // alloc tensor from queue memory LocalTensor<float> xLocal = inQueueX.AllocTensor<float>(); // copy progress_th tile from global tensor to local tensor DataCopy(xLocal, xGm[progress * this->tileLength], this->tileLength); // enque input tensors to VECIN queue inQueueX.EnQue(xLocal); } __aicore__ inline void Compute(int32_t progress) { // deque input tensors from VECIN queue LocalTensor<float> xLocal = inQueueX.DeQue<float>(); LocalTensor<float> yLocal = outQueueY.AllocTensor<float>(); LocalTensor<float> tmpTensor1 = tmpBuffer1.Get<float>(); float inputVal = 0.0; Maxs(tmpTensor1, xLocal, inputVal, this->tileLength); // x >= 0 --> x // x < 0 Mins(xLocal, xLocal, inputVal, this->tileLength); Muls(xLocal, xLocal, this->alpha, this->tileLength); Add(yLocal, xLocal, tmpTensor1, this->tileLength); outQueueY.EnQue<float>(yLocal); // free input tensors for reuse inQueueX.FreeTensor(xLocal); } __aicore__ inline void CopyOut(int32_t progress) { // deque output tensor from VECOUT queue LocalTensor<float> yLocal = outQueueY.DeQue<float>(); // copy progress_th tile from local tensor to global tensor DataCopy(yGm[progress * this->tileLength], yLocal, this->tileLength); // free output tensor for reuse outQueueY.FreeTensor(yLocal); } private: TPipe pipe; TBuf<QuePosition::VECCALC> tmpBuffer1; //TBuf<QuePosition::VECCALC> tmpBuffer1, tmpBuffer2; // create queues for input, in this case depth is equal to buffer num TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX; // create queue for output, in this case depth is equal to buffer num TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY; GlobalTensor<float> xGm, yGm; uint32_t blockLength; uint32_t tileNum; uint32_t tileLength; float alpha; }; extern "C" __global__ __aicore__ void p_relu_custom(GM_ADDR x, GM_ADDR y) { //GET_TILING_DATA(tiling_data, tiling); // TODO: user kernel impl KernelPRelu op; op.Init(x, y, TOTAL_LENGTH, TILE_NUM, alpha); op.Process(); } #ifndef __CCE_KT_TEST__ // call of kernel function void p_relu_custom_do(uint32_t blockDim, void* l2ctrl, void* stream, uint8_t* x, uint8_t* y) { p_relu_custom<<<blockDim, l2ctrl, stream>>>(x, y); } #endif
main.cpp 程式碼如下所示 :
/* * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. * This file constains code of cpu debug and npu code.We read data from bin file * and write result to file. */ #include "data_utils.h" #ifndef __CCE_KT_TEST__ #include "acl/acl.h" extern void p_relu_custom_do(uint32_t coreDim, void* l2ctrl, void* stream, uint8_t* x, uint8_t* y); #else #include "tikicpulib.h" extern "C" __global__ __aicore__ void p_relu_custom(GM_ADDR x, GM_ADDR y); #endif int32_t main(int32_t argc, char* argv[]) { uint32_t blockDim = 8; size_t inputByteSize = 8 * 200 * 1024 * sizeof(float); size_t outputByteSize = 8 * 200 * 1024 * sizeof(float); #ifdef __CCE_KT_TEST__ // CPU uint8_t* x = (uint8_t*)AscendC::GmAlloc(inputByteSize); uint8_t* y = (uint8_t*)AscendC::GmAlloc(outputByteSize); printf("[cpu debug]>>> inputByteSize: %d\n", inputByteSize); ReadFile("./input/input_x.bin", inputByteSize, x, inputByteSize); AscendC::SetKernelMode(KernelMode::AIV_MODE); ICPU_RUN_KF(p_relu_custom, blockDim, x, y); // use this macro for cpu debug WriteFile("./output/output_y.bin", y, outputByteSize); AscendC::GmFree((void *)x); AscendC::GmFree((void *)y); #else // NPU //CHECK_ACL(aclInit(nullptr)); CHECK_ACL(aclInit("./acl.json")); aclrtContext context; int32_t deviceId = 0; CHECK_ACL(aclrtSetDevice(deviceId)); CHECK_ACL(aclrtCreateContext(&context, deviceId)); aclrtStream stream = nullptr; CHECK_ACL(aclrtCreateStream(&stream)); uint8_t *xHost, *yHost; uint8_t *xDevice, *yDevice; CHECK_ACL(aclrtMallocHost((void**)(&xHost), inputByteSize)); CHECK_ACL(aclrtMallocHost((void**)(&yHost), outputByteSize)); CHECK_ACL(aclrtMalloc((void**)&xDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST)); CHECK_ACL(aclrtMalloc((void**)&yDevice, outputByteSize, ACL_MEM_MALLOC_HUGE_FIRST)); ReadFile("./input/input_x.bin", inputByteSize, xHost, inputByteSize); CHECK_ACL(aclrtMemcpy(xDevice, inputByteSize, xHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE)); p_relu_custom_do(blockDim, nullptr, stream, xDevice, yDevice); CHECK_ACL(aclrtSynchronizeStream(stream)); CHECK_ACL(aclrtMemcpy(yHost, outputByteSize, yDevice, outputByteSize, ACL_MEMCPY_DEVICE_TO_HOST)); WriteFile("./output/output_y.bin", yHost, outputByteSize); CHECK_ACL(aclrtFree(xDevice)); CHECK_ACL(aclrtFree(yDevice)); CHECK_ACL(aclrtFreeHost(xHost)); CHECK_ACL(aclrtFreeHost(yHost)); CHECK_ACL(aclrtDestroyStream(stream)); CHECK_ACL(aclrtDestroyContext(context)); CHECK_ACL(aclrtResetDevice(deviceId)); CHECK_ACL(aclFinalize()); #endif return 0; }
執行如下程式碼進行NPU上板除錯和CPU除錯:
#npu
bash run.sh Ascend310P1 npu_onboard
# cpu
bash run.sh Ascend310P1 cpu
點選關注,第一時間瞭解華為雲新鮮技術~