前置条件和编译命令请参见算子调用示例。当前仅支持
场景:量化场景。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 | #include <iostream> #include <vector> #include <numeric> #include "acl/acl.h" #include "atb/operation.h" #include "atb/types.h" #include <atb/atb_infer.h> #include "demo_util.h" const int32_t DEVICE_ID = 0; const uint32_t XDIM_0 = 2; const uint32_t XDIM_1 = 3; const uint32_t WEIGHTDIM_0 = 3; const uint32_t WEIGHTDIM_1 = 2; /** * @brief 准备atb::VariantPack中的所有输入tensor * @param contextPtr context指针 * @param stream stream * @return atb::SVector<atb::Tensor> atb::VariantPack中的输入tensor * @note 需要传入所有host侧tensor */ atb::SVector<atb::Tensor> PrepareInTensor(atb::Context *contextPtr, aclrtStream stream) { // 创建shape为[2, 3]的输入x tensor atb::Tensor x = CreateTensorFromVector(contextPtr, stream, std::vector<int8_t>{1, 2, 3, 4, 5, 6}, aclDataType::ACL_INT8, aclFormat::ACL_FORMAT_ND, {XDIM_0, XDIM_1}); // 创建shape为[3, 2]的输入weight tensor atb::Tensor weight = CreateTensorFromVector(contextPtr, stream, std::vector<int8_t>{1, 2, 3, 4, 5, 6}, aclDataType::ACL_INT8, aclFormat::ACL_FORMAT_ND, {WEIGHTDIM_0, WEIGHTDIM_1}); // 创建shape为[2]bias tensor atb::Tensor bias = CreateTensorFromVector( contextPtr, stream, std::vector<int32_t>(2, 1), aclDataType::ACL_INT32, aclFormat::ACL_FORMAT_ND, {1, 2}); // 创建shape为[2]的输入deqScale tensor atb::Tensor deqScale = CreateTensorFromVector( contextPtr, stream, std::vector<float>(2, 1), aclDataType::ACL_FLOAT, aclFormat::ACL_FORMAT_ND, {1, 2}); atb::SVector<atb::Tensor> inTensors = {x, weight, bias, deqScale}; return inTensors; } /** * @brief 创建一个linear operation * @return atb::Operation * 返回一个Operation指针 */ atb::Operation *CreateLinearOperation() { atb::infer::LinearParam param; param.transposeA = false; param.transposeB = false; param.hasBias = true; param.outDataType = aclDataType::ACL_BF16; param.enAccum = false; atb::Operation *LinearOp = nullptr; CHECK_STATUS(atb::CreateOperation(param, &LinearOp)); return LinearOp; } int main(int argc, char **argv) { // 设置卡号、创建context、设置stream atb::Context *context = nullptr; void *stream = nullptr; CHECK_STATUS(aclInit(nullptr)); CHECK_STATUS(aclrtSetDevice(DEVICE_ID)); CHECK_STATUS(atb::CreateContext(&context)); CHECK_STATUS(aclrtCreateStream(&stream)); context->SetExecuteStream(stream); // 创建op atb::Operation *linearOp = CreateLinearOperation(); // 准备输入tensor atb::VariantPack variantPack; variantPack.inTensors = PrepareInTensor(context, stream); // 放入输入tensor // 准备输出tensor atb::Tensor output = CreateTensor(aclDataType::ACL_BF16, aclFormat::ACL_FORMAT_ND, {XDIM_0, WEIGHTDIM_1}); variantPack.outTensors = {output}; // 放入输出tensor uint64_t workspaceSize = 0; // 计算workspaceSize大小 CHECK_STATUS(linearOp->Setup(variantPack, workspaceSize, context)); uint8_t *workspacePtr = nullptr; if (workspaceSize > 0) { CHECK_STATUS(aclrtMalloc((void **)(&workspacePtr), workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST)); } // linear执行 linearOp->Execute(variantPack, workspacePtr, workspaceSize, context); CHECK_STATUS(aclrtSynchronizeStream(stream)); // 流同步,等待device侧任务计算完成 // 释放资源 for (atb::Tensor &inTensor : variantPack.inTensors) { CHECK_STATUS(aclrtFree(inTensor.deviceData)); } for (atb::Tensor &outTensor : variantPack.outTensors) { CHECK_STATUS(aclrtFree(outTensor.deviceData)); } if (workspaceSize > 0) { CHECK_STATUS(aclrtFree(workspacePtr)); } CHECK_STATUS(atb::DestroyOperation(linearOp)); // operation,对象概念,先释放 CHECK_STATUS(aclrtDestroyStream(stream)); CHECK_STATUS(DestroyContext(context)); // context,全局资源,后释放 CHECK_STATUS(aclFinalize()); std::cout << "Linear dequant demo success!" << std::endl; return 0; } |