昇腾社区首页
中文
注册

模型编译、推理和保存样例

为了确保cpp文件可以正确找到Torch,可参考如下方式设置:

  • 在cmake命令的DCMAKE_PREFIX_PATH参数中添加Torch的安装路径。
  • 在CMakeLists.txt中添加find_package(Torch REQUIRED)。

Python开发环境

  • 编译推理TorchScript模型的样例:
    # 请务必先导入Torch,再导入mindietorch
    import torch 
    import mindietorch
    
    # 1. 加载原始torchscript(ts)模型
    model = torch.jit.load(model_path)
    model.eval() 
    
    # 2. 构造编译配置
    # 2.1 纯静态shape,单输入
    inputs = [mindietorch.Input((batchsize, 3, 224, 224))] 
    
    # 2.2 纯静态shape,多输入
    inputs = [mindietorch.Input((batchsize, 3, 224, 224)), mindietorch.Input((batchsize, 3, 224, 224))] 
    
    # 2.3 动态分档
    inputs = []
    inputs_gear_1 = [mindietorch.Input((1, 3, 224, 224))]  # 1档
    inputs_gear_2 = [mindietorch.Input((8, 3, 224, 224))]  # 2档
    inputs_gear_3 = [mindietorch.Input((32, 3, 224, 224))]  # 3档
    inputs.append(inputs_gear_1)
    inputs.append(inputs_gear_2)
    inputs.append(inputs_gear_3) 
    
    # 2.4 input shape range
    min_shape = (1, 3, 224, 224)
    max_shape = (32, 3, 224, 224)
    inputs = []
    inputs.append(mindietorch.Input(min_shape = min_shape, max_shape= max_shape)) 
    
    # 3. 编译优化
    compiled_module = mindietorch.compile(    
         model,
         inputs=inputs,
         precision_policy=mindietorch.PrecisionPolicy.FP16,
         truncate_long_and_double=True,
         require_full_compilation=False,
         allow_tensor_replace_int=False,
         min_block_size=3,
         torch_executed_ops=[],
         soc_version="Ascend310P3",
         optimization_level=0)     
    
    # 4. 模型推理
    mindietorch.set_device(0)
    input_data = torch.ones([1, 3, 224, 224])
    results = compiled_module.forward(input_data) 
    
    # 5. ts模型保存加载
    compiled_module.save("CompiledModel.ts")
    reload_module = torch.jit.load("CompiledModel.ts") 
    
    # 6. 保存OM离线模型,框架推理插件不支持OM的推理
    output_engine = mindietorch.export_engine(
         model,
         "forward",
         inputs=inputs,
         precision_policy=mindietorch.PrecisionPolicy.FP16,
         truncate_long_and_double=True,
         require_full_compilation=False,
         allow_tensor_replace_int=False,
         min_block_size=3,
         torch_executed_ops=[],
         soc_version="Ascend310P3",
         optimization_level=0)
    with open('engine.om', 'wb') as file:     
        file.write(output_engine)
  • 编译推理ExportedProgram模型的样例:
    # 请务必先导入Torch,再导入mindietorch
    import torch
    from torch._export import export, dynamic_dim 
    import mindietorch
    
    # 1. 加载原始PyTorch模型(nn.Module)
    model = torch.load(model_path)
    model.eval() 
    
    # 2. 构造编译配置
    # 2.1 纯静态shape,单输入
    inputs = [mindietorch.Input((batchsize, 3, 224, 224))] 
    
    # 2.2 纯静态shape,多输入
    inputs = [mindietorch.Input((batchsize, 3, 224, 224)), mindietorch.Input((batchsize, 3, 224, 224))] 
    
    # 2.3 input shape range
    min_shape = (1, 3, 224, 224)
    max_shape = (32, 3, 224, 224)
    inputs = []
    inputs.append(mindietorch.Input(min_shape = min_shape, max_shape= max_shape)) 
    
    # 3. 编译优化
    # 3.1 使用mindietorch进行编译之前可以选择提前将模型导出为ExportedProgram
    input_data = torch.ones([1, 3, 224, 224])
    # 使用PyTorch导出模型时,假如输入动态的,则还需要构造constraints
    constraints = [
        dynamic_dim(input_data, 0) <= 32,
        dynamic_dim(input_data, 0) >= 1,
    ]
    # export的更多详细用法请参考PyTorch的官方文档
    model_ep = export(model, args=(input_data,), constraints=constraints)
    
    # 3.2 调用mindietorch的compile方法编译
    compiled_module = mindietorch.compile(    
         model_ep,
         inputs=inputs,
         precision_policy=mindietorch.PrecisionPolicy.FP16,
         soc_version="Ascend310P3",
         ir="dynamo")     
    
    # 4. 模型推理
    mindietorch.set_device(0)
    results = compiled_module(input_data) 
    
    # 5. 编译后模型支持通过torch.save和torch.load进行保存和加载
    torch.save(compiled_module, "./compiled.pt")        
    reloda_compiled_model = torch.load("./compiled.pt")

C++开发环境

编译推理TorchScript模型的样例:
#include <torch/torch.h> 
#include <torch/script.h> 
#include "torch/csrc/jit/api/module.h"  
#include "torch_aie.h"  

// 1. 加载原始ts模型 
torch::jit::script::Module model = torch::jit::load(model_path); 
model.eval();  

// 2. 构造编译配置 
// 2.1 纯静态
shape std::vector<torch_aie::Input> inputs; 
inputs.emplace_back(torch_aie::Input(dims_1, torch_aie::DataType::INT32, torch_aie::TensorFormat::ND));  // input1 inputs.emplace_back(torch_aie::Input(dims_2, torch_aie::DataType::INT32, torch_aie::TensorFormat::ND));  // input2 
torch_aie::torchscript::CompileSpec compile_spec(inputs);  

// 2.2 动态分档 
std::vector<torch_aie::InputProfile> inputs_gear; 
torch_aie::InputProfile inputs_gear_1;  // 1档 
torch_aie::InputProfile inputs_gear_2;  // 2档 
torch_aie::InputProfile inputs_gear_3;  // 3档 
inputs_gear_1.emplace_back(torch_aie::Input({1, 3, 640, 640}, torch_aie::DataType::FLOAT, torch_aie::TensorFormat::NCHW));
inputs_gear_2.emplace_back(torch_aie::Input({8, 3, 640, 640}, torch_aie::DataType::FLOAT, torch_aie::TensorFormat::NCHW)); 
inputs_gear_3.emplace_back(torch_aie::Input({32, 3, 640, 640}, torch_aie::DataType::FLOAT, torch_aie::TensorFormat::NCHW)); inputs_gear.emplace_back(inputs_gear_1); 
inputs_gear.emplace_back(inputs_gear_2); 
inputs_gear.emplace_back(inputs_gear_3);  

torch_aie::torchscript::CompileSpec compileSpec(inputs_gear);  

// 2.3 input shape range 
std::vector<int64_t> minShape = { 1, 3, 640, 640 }; 
std::vector<int64_t> maxShape = { 32, 3, 640, 640 }; 
std::vector<torch_aie::Input> inputs; 
inputs.emplace_back(torch_aie::Input(minShape, maxShape, torch_aie::DataType::FLOAT, torch_aie::TensorFormat::NCHW)); 
torch_aie::torchscript::CompileSpec compileSpec(inputs);  

// 3. 编译优化 
auto compiled_module = compile(model, compile_spec);  

// 4. 模型推理 
torch_aie::set_device(1);  // 设置运行ID,不设置即为编译时的DeviceId  

std::vector<torch::jit::IValue> inputs_ivalues; 
inputs_ivalues.emplace_back(at::randn(shape_1, dtype)); 
inputs_ivalues.emplace_back(at::randn(shape_2, dtype)); 
auto results = compiled_module.forward(inputs_ivalues);  

// 5. ts模型保存加载 
compiled_module.save("CompiledModel.ts"); 
torch::jit::script::Module reload_module = torch::jit::load("CompiledModel.ts");  

// 6. 保存OM离线模型,框架推理插件不支持OM的推理 
export_engine(model, "forward", compile_spec, path);  

// 7. 去初始化 
torch_aie::finalize(); // 程序退出前需显式调用去初始化接口,否则可能会导致程序退出异常