更多样例
样例一
下面的样例展示了数学库kernel侧API和Tiling API GetXxxMaxMinTmpSize的配套使用方法,具体流程如下:
Host侧调用Tiling接口,获取所需临时空间的大小,并将其写入tiling data中;kernel侧再读取tiling data,获取相应的临时空间大小,并根据此分配临时空间。
Host侧Tiling API 使用样例:
#include <vector> #include "register/op_def_registry.h" #include "register/tilingdata_base.h" #include "tiling/tiling_api.h" namespace optiling { BEGIN_TILING_DATA_DEF(AsinCustomTilingData) TILING_DATA_FIELD_DEF(uint32_t, srcSize); TILING_DATA_FIELD_DEF(uint32_t, tmpBufferSize); END_TILING_DATA_DEF; static ge::graphStatus TilingFunc(gert::TilingContext* context) { // Input source shapes. std::vector<int64_t> srcDims = {16, 128}; uint32_t srcSize = 1; for (auto dim : srcDims) { srcSize *= dim; } uint32_t typeSize = 2; ge::Shape shape(srcDims); uint32_t minValue = 0; uint32_t maxValue = 0; AscendC::GetAsinMaxMinTmpSize(shape, typeSize, false, maxValue, minValue); auto platformInfo = context->GetPlatformInfo(); auto ascendcPlatform = platform_ascendc::PlatformAscendC(platformInfo); uint64_t tailSize = 0; // ub剩余空间大小 ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, tailSize); // 本样例中使用完整的ub空间,实际情况下tailSize需要减掉用户已使用的ub空间 auto tmpSize = tailSize >= maxValue ? maxValue : tailSize; AsinCustomTilingData tiling; tiling.set_srcSize(srcSize); tiling.set_tmpBufferSize(tmpSize); context->SetBlockDim(1); tiling.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity()); context->GetRawTilingData()->SetDataSize(tiling.GetDataSize()); context->SetTilingKey(1); return ge::GRAPH_SUCCESS; } } // namespace optiling
kernel侧读取tiling data,获取相应的临时空间大小,并根据此分配临时空间:
#include "kernel_operator.h" template <typename srcType> class KernelAsin { public: __aicore__ inline KernelAsin() {} __aicore__ inline void Init(GM_ADDR src_gm, GM_ADDR dst_gm, uint32_t srcSize, uint32_t tmpBufferSize) { src_global.SetGlobalBuffer(reinterpret_cast<__gm__ srcType *>(src_gm), srcSize); dst_global.SetGlobalBuffer(reinterpret_cast<__gm__ srcType *>(dst_gm), srcSize); pipe.InitBuffer(inQueue, 1, srcSize * sizeof(srcType)); pipe.InitBuffer(outQueue, 1, srcSize * sizeof(srcType)); pipe.InitBuffer(tmpQueue, 1, tmpBufferSize); bufferSize = srcSize; } __aicore__ inline void Process() { CopyIn(); Compute(); CopyOut(); } private: __aicore__ inline void CopyIn() { LocalTensor<srcType> srcLocal = inQueue.AllocTensor<srcType>(); DataCopy(srcLocal, src_global, bufferSize); inQueue.EnQue(srcLocal); } __aicore__ inline void Compute() { LocalTensor<srcType> dstLocal = outQueue.AllocTensor<srcType>(); LocalTensor<srcType> srcLocal = inQueue.DeQue<srcType>(); LocalTensor<uint8_t> sharedTmpBuffer = tmpQueue.AllocTensor<uint8_t>(); Asin<srcType, false>(dstLocal, srcLocal, sharedTmpBuffer, bufferSize); outQueue.EnQue<srcType>(dstLocal); inQueue.FreeTensor(srcLocal); tmpQueue.FreeTensor(sharedTmpBuffer); } __aicore__ inline void CopyOut() { LocalTensor<srcType> dstLocal = outQueue.DeQue<srcType>(); DataCopy(dst_global, dstLocal, bufferSize); outQueue.FreeTensor(dstLocal); } private: GlobalTensor<srcType> src_global; GlobalTensor<srcType> dst_global; TPipe pipe; TQue<QuePosition::VECIN, 1> inQueue; TQue<QuePosition::VECCALC, 1> tmpQueue; TQue<QuePosition::VECOUT, 1> outQueue; uint32_t bufferSize = 0; }; extern "C" __global__ __aicore__ void kernel_asin_operator(GM_ADDR src_gm, GM_ADDR dst_gm, GM_ADDR tiling) { GET_TILING_DATA(tilingData, tiling); KernelAsin<half> op; op.Init(src_gm, dst_gm, tilingData.srcSize, tilingData.tmpBufferSize); if (TILING_KEY_IS(1)) { op.Process(); } }
样例二
下面的样例展示了数学库kernel侧API和Tiling API GetXxxTmpBufferFactorSize的配套使用方法,具体流程如下:
Host侧调用Tiling接口,获取maxLiveNodeCnt和extraBuf,并推算算子单次最大计算元素数量,将其写入tiling data中;kernel侧再读取tiling data,获取该值,基于该值分配临时空间。
Host侧Tiling API 使用样例:
#include <vector> #include <cassert> #include "register/op_def_registry.h" #include "register/tilingdata_base.h" #include "tiling/tiling_api.h" namespace optiling { BEGIN_TILING_DATA_DEF(AsinCustomTilingData) TILING_DATA_FIELD_DEF(uint32_t, srcSize); TILING_DATA_FIELD_DEF(uint32_t, tmpBufferSize); END_TILING_DATA_DEF; static ge::graphStatus TilingFunc(gert::TilingContext *context) { // Input source shapes. std::vector<int64_t> srcDims = { 16, 128 }; uint32_t srcSize = 1; uint32_t srcCurSize = 1; for (auto dim : srcDims) { srcSize *= dim; } uint32_t typeSize = 2; auto platformInfo = context->GetPlatformInfo(); auto ascendcPlatform = platform_ascendc::PlatformAscendC(platformInfo); uint64_t tailSize = 0; // ub剩余空间大小 ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, tailSize); uint32_t asinMaxLiveNodeCnt = 0; uint32_t asinExtraBuf = 0; uint32_t acosMaxLiveNodeCnt = 0; uint32_t acosExtraBuf = 0; AscendC::GetAsinTmpBufferFactorSize(typeSize, asinMaxLiveNodeCnt, asinExtraBuf); AscendC::GetAcosTmpBufferFactorSize(typeSize, acosMaxLiveNodeCnt, acosExtraBuf); // tmp的大小需要减去UB上调用api接口输入和输出占用的大小 // 该示例中包括Asin接口的输入输出,以及Acos的输入输出,其中Asin接口的输出作为Acos的输入,因此一共需要3份src的空间大小 auto tmpSize = tailSize - srcSize * typeSize * 3; assert(tmpSize >= asinExtraBuf); assert(tmpSize >= acosExtraBuf); // 计算Asin算子单次最大计算元素数量 if (asinMaliveNodeCnt != 0) { srcAsinCurSize = (tmpSize - asinExtraBuf) / asinMaxLiveNodeCnt / typeSize; } else { srcAsinCurSize = srcSize; } // 计算Acos算子单次最大计算元素数量 if (acosMaxLiveNodeCnt != 0) { srcAcosCurSize = (tmpSize - acosExtraBuf) / acosMaxLiveNodeCnt / typeSize; } else { srcAcosCurSize = srcSize; } srcCurSize = min(srcAsinCurSize, srcAcosCurSize); AsinCustomTilingData tiling; tiling.set_srcSize(srcSize); tiling.set_srcCurSize(srcCurSize); tiling.set_tmpBufferSize(tmpSize); context->SetBlockDim(1); tiling.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity()); context->GetRawTilingData()->SetDataSize(tiling.GetDataSize()); context->SetTilingKey(1); return ge::GRAPH_SUCCESS; } } // namespace optiling
kernel侧样例:
#include "kernel_operator.h" template <typename srcType> class KernelAsin { public: __aicore__ inline KernelAsin() {} __aicore__ inline void Init(GM_ADDR src_gm, GM_ADDR dst_gm, uint32_t srcSizeIn, uint32_t srcCurSizeIn, uint32_t tmpBufferSize) { srcSize = srcSizeIn; srcCurSize = srcCurSizeIn; src_global.SetGlobalBuffer(reinterpret_cast<__gm__ srcType *>(src_gm), srcSize); dst_global.SetGlobalBuffer(reinterpret_cast<__gm__ srcType *>(dst_gm), srcSize); pipe.InitBuffer(inQueue, 1, srcSize * sizeof(srcType)); pipe.InitBuffer(outQueue, 1, srcSize * sizeof(srcType)); pipe.InitBuffer(tmpQueue1, 1, srcCurSize * sizeof(srcType)); pipe.InitBuffer(tmpQueue, 1, tmpBufferSize); } __aicore__ inline void Process() { CopyIn(); Compute(); CopyOut(); } private: __aicore__ inline void CopyIn() { LocalTensor<srcType> srcLocal = inQueue.AllocTensor<srcType>(); DataCopy(srcLocal, src_global, srcSize); inQueue.EnQue(srcLocal); } __aicore__ inline void Compute() { LocalTensor<srcType> dstLocal = outQueue.AllocTensor<srcType>(); LocalTensor<srcType> srcLocal = inQueue.DeQue<srcType>(); LocalTensor<uint8_t> sharedTmpBuffer = tmpQueue.AllocTensor<uint8_t>(); LocalTensor<srcType> tmpresBuffer = tmpQueue1.AllocTensor<srcType>(); for (int32_t offset = 0; offset < srcSize; offset += srcCurSize) { Asin<srcType, false>(tmpresBuffer, srcLocal[offset], sharedTmpBuffer, srcCurSize); PipeBarrier<PIPE_V>(); Acos<srcType, false>(dstLocal[offset], tmpresBuffer, sharedTmpBuffer, srcCurSize); PipeBarrier<PIPE_V>(); } outQueue.EnQue<srcType>(dstLocal); inQueue.FreeTensor(srcLocal); tmpQueue.FreeTensor(sharedTmpBuffer); } __aicore__ inline void CopyOut() { LocalTensor<srcType> dstLocal = outQueue.DeQue<srcType>(); DataCopy(dst_global, dstLocal, srcSize); outQueue.FreeTensor(dstLocal); } private: GlobalTensor<srcType> src_global; GlobalTensor<srcType> dst_global; TPipe pipe; TQue<QuePosition::VECIN, 1> inQueue; TQue<QuePosition::VECCALC, 1> tmpQueue; TQue<QuePosition::VECCALC, 1> tmpQueue1; TQue<QuePosition::VECOUT, 1> outQueue; uint32_t srcSize, srcCurSize; }; extern "C" __global__ __aicore__ void kernel_asin_operator(GM_ADDR src_gm, GM_ADDR dst_gm, GM_ADDR tiling) { GET_TILING_DATA(tilingData, tiling); KernelAsin<half> op; op.Init(src_gm, dst_gm, tilingData.srcSize, tilingData.srcCurSize, tilingData.tmpBufferSize); if (TILING_KEY_IS(1)) { op.Process(); } }
父主题: 数学库