aclnnIndexAdd
接口原型
每个算子有两段接口,必须先调用“aclnnXxxGetWorkspaceSize”接口获取入参并根据计算流程计算所需workspace大小,再调用“aclnnXxx”接口执行计算。两段式接口如下:
- 第一段接口:aclnnStatus aclnnIndexAddGetWorkspaceSize(const aclTensor * self, int64_t dim, const aclTensor * index, const aclTensor * source, const aclScalar * alpha, aclTensor * out, uint64_t *workspaceSize, aclOpExecutor **executor)
- 第二段接口:aclnnStatus aclnnIndexAdd(void *workspace, uint64_t workspaceSize, aclOpExecutor *executor, aclrtStream stream)
功能描述
算子功能:在指定维度dim上,根据给定的索引index,将源张量source中值加到输入张量self中对应位置的值上。
aclnnIndexAddGetWorkspaceSize
- 接口定义:
aclnnStatus aclnnIndexAddGetWorkspaceSize(const aclTensor *self, int64_t dim, const aclTensor *index, const aclTensor *source, const aclScalar *alpha, aclTensor *out, uint64_t *workspaceSize, aclOpExecutor **executor)
- 参数说明:
- self:Device侧的aclTensor,数据类型支持FLOAT、FLOAT16、INT32、INT16、INT8、UINT8、DOUBLE。支持非连续的Tensor,数据格式支持ND。
- dim:指定的维度,数据类型支持INT64,取值范围为[-self.dim(), self.dim())。
- index:Device侧的aclTensor,数据类型支持INT64、INT32。支持非连续的Tensor,数据格式支持ND。
- source:Device侧的aclTensor,数据类型支持FLOAT、FLOAT16、INT32、INT16、INT8、UINT8、DOUBLE。支持非连续的Tensor,数据格式支持ND。
- alpha:Host侧的aclScalar,数据类型需要可转换成self与source推导后的数据类型。
- out:Device侧的aclTensor,数据类型支持FLOAT、FLOAT16、INT32、INT16、INT8、UINT8、DOUBLE。支持非连续的Tensor,数据格式支持ND。
- workspaceSize:返回用户需要在Device侧申请的workspace大小。
- executor:返回op执行器,包含了算子计算流程。
- 返回值:
返回aclnnStatus状态码,具体参见aclnn返回码。
第一段接口完成入参校验,出现以下场景时报错:
- 返回161001(ACLNN_ERR_PARAM_NULLPTR):传入的self、index、source、alpha、out是空指针。
- 返回161002(ACLNN_ERR_PARAM_INVALID):
- self、index和source的数据类型和数据格式不在支持的范围内。
- 推导出的数据类型无法转换为指定输出out的类型。
aclnnIndexAdd
- 接口定义:
aclnnStatus aclnnIndexAdd(void *workspace, uint64_t workspaceSize, aclOpExecutor *executor, aclrtStream stream)
- 参数说明:
- workspace:在Device侧申请的workspace内存起址。
- workspaceSize:在Device侧申请的workspace大小,由第一段接口aclnnIndexAddGetWorkspaceSize获取。
- executor:op执行器,包含了算子计算流程。
- stream:指定执行任务的AscendCL stream流。
- 返回值:
返回aclnnStatus状态码,具体参见aclnn返回码。
调用示例
#include <iostream>
#include <vector>
#include "acl/acl.h"
#include "aclnnop/aclnn_index_add.h"
#define CHECK_RET(cond, return_expr) \
do { \
if (!(cond)) { \
return_expr; \
} \
} while (0)
#define LOG_PRINT(message, ...) \
do { \
printf(message, ##__VA_ARGS__); \
} while (0)
int64_t GetShapeSize(const std::vector<int64_t>& shape) {
int64_t shapeSize = 1;
for (auto i : shape) {
shapeSize *= i;
}
return shapeSize;
}
int Init(int32_t deviceId, aclrtContext* context, aclrtStream* stream) {
// 固定写法,AscendCL初始化
auto ret = aclInit(nullptr);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclInit failed. ERROR: %d\n", ret); return ret);
ret = aclrtSetDevice(deviceId);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetDevice failed. ERROR: %d\n", ret); return ret);
ret = aclrtCreateContext(context, deviceId);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateContext failed. ERROR: %d\n", ret); return ret);
ret = aclrtSetCurrentContext(*context);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetCurrentContext failed. ERROR: %d\n", ret); return ret);
ret = aclrtCreateStream(stream);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateStream failed. ERROR: %d\n", ret); return ret);
return 0;
}
template <typename T>
int CreateAclTensor(const std::vector<T>& hostData, const std::vector<int64_t>& shape, void** deviceAddr,
aclDataType dataType, aclTensor** tensor) {
auto size = GetShapeSize(shape) * sizeof(T);
// 调用aclrtMalloc申请device侧内存
auto ret = aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMalloc failed. ERROR: %d\n", ret); return ret);
// 调用aclrtMemcpy将Host侧数据拷贝到device侧内存上
ret = aclrtMemcpy(*deviceAddr, size, hostData.data(), size, ACL_MEMCPY_HOST_TO_DEVICE);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMemcpy failed. ERROR: %d\n", ret); return ret);
// 计算连续tensor的strides
std::vector<int64_t> strides(shape.size(), 1);
for (int64_t i = shape.size() - 2; i >= 0; i--) {
strides[i] = shape[i + 1] * strides[i + 1];
}
// 调用aclCreateTensor接口创建aclTensor
*tensor = aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0, aclFormat::ACL_FORMAT_ND,
shape.data(), shape.size(), *deviceAddr);
return 0;
}
int main() {
// 1. (固定写法)device/context/stream初始化,参考AscendCL对外接口列表
// 根据自己的实际device填写deviceId
int32_t deviceId = 0;
aclrtContext context;
aclrtStream stream;
auto ret = Init(deviceId, &context, &stream);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("Init acl failed. ERROR: %d\n", ret); return ret);
// 2. 构造输入与输出,需要根据API的接口自定义构造
std::vector<int64_t> selfShape = {4, 2};
std::vector<int64_t> indexShape = {4};
std::vector<int64_t> sourceShape = {4, 2};
std::vector<int64_t> outShape = {4, 2};
void* selfDeviceAddr = nullptr;
void* indexDeviceAddr = nullptr;
void* sourceDeviceAddr = nullptr;
void* outDeviceAddr = nullptr;
aclTensor* self = nullptr;
aclTensor* index = nullptr;
aclTensor* source = nullptr;
aclTensor* out = nullptr;
std::vector<float> selfHostData = {0, 1, 2, 3, 4, 5, 6, 7};
std::vector<float> indexHostData = {0, 1, 2, 3};
std::vector<float> sourceHostData = {0, 1, 2, 3, 4, 5, 6, 7};
std::vector<float> outHostData(0, 1, 2, 3, 4, 5, 6, 7);
int64_t dim = 0;
float alphaValue = 1.0f;
// 创建self aclTensor
ret = CreateAclTensor(selfHostData, selfShape, &selfDeviceAddr, aclDataType::ACL_FLOAT, &self);
CHECK_RET(ret == ACL_SUCCESS, return ret);
ret = CreateAclTensor(indexHostData, indexShape, &indexDeviceAddr, aclDataType::ACL_FLOAT, &index);
CHECK_RET(ret == ACL_SUCCESS, return ret);
ret = CreateAclTensor(sourceHostData, sourceShape, &sourceDeviceAddr, aclDataType::ACL_FLOAT, &source);
CHECK_RET(ret == ACL_SUCCESS, return ret);
ret = CreateAclTensor(outHostData, outShape, &outDeviceAddr, aclDataType::ACL_FLOAT, &out);
CHECK_RET(ret == ACL_SUCCESS, return ret);
alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT);
CHECK_RET(self != nullptr, return ret);
// 3. 调用CANN算子库API,需要修改为具体的API名称
uint64_t workspaceSize = 0;
aclOpExecutor* executor;
// 调用aclnnIndexAdd第一段接口
ret = aclnnIndexAddGetWorkspaceSize(self, dim, index, source, alpha, out, &workspaceSize, &executor);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclnnIndexAddGetWorkspaceSize failed. ERROR: %d\n", ret); return ret);
// 根据第一段接口计算出的workspaceSize申请device内存
void* workspaceAddr = nullptr;
if (workspaceSize > 0) {
ret = aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("allocate workspace failed. ERROR: %d\n", ret); return ret);
}
// 调用aclnnIndexAdd第二段接口
ret = aclnnIndexAdd(workspaceAddr, workspaceSize, executor, stream);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclnnIndexAdd failed. ERROR: %d\n", ret); return ret);
// 4. (固定写法)同步等待任务执行结束
ret = aclrtSynchronizeStream(stream);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSynchronizeStream failed. ERROR: %d\n", ret); return ret);
// 5. 获取输出的值,将device侧内存上的结果拷贝至Host侧,需要根据具体API的接口定义修改
auto size = GetShapeSize(outShape);
std::vector<float> resultData(size, 0);
ret = aclrtMemcpy(resultData.data(), resultData.size() * sizeof(resultData[0]), outDeviceAddr,
size * sizeof(resultData[0]), ACL_MEMCPY_DEVICE_TO_HOST);
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("copy result from device to host failed. ERROR: %d\n", ret); return ret);
for (int64_t i = 0; i < size; i++) {
LOG_PRINT("result[%ld] is: %f\n", i, resultData[i]);
}
// 6. 释放aclTensor和aclScalar,需要根据具体API的接口定义修改
aclDestroyTensor(self);
aclDestroyTensor(out);
return 0;
}
父主题: NN类算子接口