本节介绍基于Ascend IR定义的算子信息,在使用算子之前,请先阅读该章节所列的相关说明:
介绍算子规格中所列的Format信息。 |
|
详细介绍算子规格中所列的TensorType类型。 |
|
当部分算子(如Add、Mul等)输入的Tensor数据类型不一致时,算子内部计算会自动提升数据类型。该部分给出数据类型提升的规则。 |
|
列出确定性计算特性涉及和支持的算子。 |
struct TensorType { explicit TensorType(DataType dt); TensorType(const std::initializer_list<DataType> &types); static TensorType ALL() { return TensorType{DT_BOOL, DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_QINT16, DT_QINT32, DT_QINT8, DT_QUINT16, DT_QUINT8, DT_RESOURCE, DT_STRING, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } static TensorType QuantifiedType() { return TensorType{DT_QINT16, DT_QINT32, DT_QINT8, DT_QUINT16, DT_QUINT8}; } static TensorType OrdinaryType() { return TensorType{DT_BOOL, DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } static TensorType BasicType() { return TensorType{DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_QINT16, DT_QINT32, DT_QINT8, DT_QUINT16, DT_QUINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } static TensorType NumberType() { return TensorType{DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_QINT32, DT_QINT8, DT_QUINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } static TensorType RealNumberType() { return TensorType{DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } static TensorType ComplexDataType() { return TensorType{DT_COMPLEX128, DT_COMPLEX64}; } static TensorType IntegerDataType() { return TensorType{DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } static TensorType SignedDataType() { return TensorType{DT_INT16, DT_INT32, DT_INT64, DT_INT8}; } static TensorType UnsignedDataType() { return TensorType{DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } static TensorType FloatingDataType() { return TensorType{DT_DOUBLE, DT_FLOAT, DT_FLOAT16}; } static TensorType IndexNumberType() { return TensorType{DT_INT32, DT_INT64}; } static TensorType UnaryDataType() { return TensorType{DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}; } static TensorType FLOAT() { return TensorType{DT_FLOAT, DT_FLOAT16}; } std::shared_ptr<TensorTypeImpl> tensor_type_impl_; };
当部分算子(如Add、Mul等)输入的Tensor数据类型不一致时,算子内部计算会自动提升数据类型。数据类型提升的规则如下表:
数据类型 |
f32 |
f16 |
bf16 |
s8 |
u8 |
s16 |
u16 |
s32 |
u32 |
s64 |
u64 |
bool |
c32 |
c64 |
f32 |
f32 |
f32 |
f32 |
f32 |
f32 |
f32 |
× |
f32 |
× |
f32 |
× |
f32 |
c64 |
c64 |
f16 |
f32 |
f16 |
f32 |
f16 |
f16 |
f16 |
× |
f16 |
× |
f16 |
× |
f16 |
c32 |
c64 |
bf16 |
f32 |
f64 |
bf16 |
bf16 |
bf16 |
bf16 |
× |
bf16 |
× |
bf16 |
× |
bf16 |
c32 |
c64 |
s8 |
f32 |
f16 |
bf16 |
s8 |
s16 |
s16 |
× |
s32 |
× |
s64 |
× |
s8 |
c32 |
c64 |
u8 |
f32 |
f16 |
bf16 |
s16 |
u8 |
s16 |
× |
s32 |
× |
s64 |
× |
u8 |
c32 |
c64 |
s16 |
f32 |
f16 |
bf16 |
s16 |
s16 |
s16 |
× |
s32 |
× |
s64 |
× |
s16 |
c32 |
c64 |
u16 |
× |
× |
× |
× |
× |
× |
u16 |
× |
× |
× |
× |
× |
× |
× |
s32 |
f32 |
f16 |
bf16 |
s32 |
s32 |
s32 |
× |
s32 |
× |
s64 |
× |
s32 |
c32 |
c64 |
u32 |
× |
× |
× |
× |
× |
× |
× |
× |
u32 |
× |
× |
× |
× |
× |
s64 |
f32 |
f16 |
bf16 |
s64 |
s64 |
s64 |
× |
s64 |
× |
s64 |
× |
s64 |
c32 |
c64 |
u64 |
× |
× |
× |
× |
× |
× |
× |
× |
× |
× |
u64 |
× |
× |
× |
bool |
f32 |
f16 |
bf16 |
s8 |
u8 |
s16 |
× |
s32 |
× |
s64 |
× |
bool |
c32 |
c64 |
c32 |
c64 |
c32 |
c32 |
c32 |
c32 |
c32 |
× |
c32 |
× |
c32 |
× |
c32 |
c32 |
c64 |
c64 |
c64 |
c64 |
c64 |
c64 |
c64 |
c64 |
× |
c64 |
× |
c64 |
× |
c64 |
c64 |
c64 |
算子实现中,由于存在异步的多线程执行,会导致浮点数累加的顺序变化,算子在相同的硬件和输入下,多次执行的结果可能不同;当开启确定性计算功能时,算子在相同的硬件和输入下,多次执行将产生相同的输出。
如下针对确定性计算特性的算子,如果所列算子不在对应的规格清单中,则说明当前芯片版本不支持该算子。