当训练网络精度未达预期时,可以通过采集训练过程中各算子的运算结果(即Data Dump数据),然后借助精度比对工具,和业界标准算子(如TensorFlow)运算结果进行数据偏差对比,从而准帮助开发人员快速解决算子精度问题。当前支持采集的算子数据主要包括:
默认训练过程中不采集算子的dump数据,如需采集并对数据进行分析,可从以下两种方法中任选一种:
if __name__ == '__main__':
session_config = tf.ConfigProto()
custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
# enable_dump:是否开启Dump功能
custom_op.parameter_map["enable_dump"].b = True
# dump_path:dump数据存放路径,该参数指定的目录需要在启动训练的环境上(容器或Host侧)提前创建且确保安装时配置的运行用户具有读写权限
custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes("/home/HwHiAiUser/output")
# dump_step:指定采集哪些迭代的Dump数据
custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes("0|5|10")
# dump_mode:Dump模式,取值:input/output/all
custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
(npu_sess, npu_shutdown) = init_resource(config=session_config)
tf.app.run()
shutdown_resource(npu_sess, npu_shutdown)
close_session(npu_sess)session_config = tf.ConfigProto(allow_soft_placement=True)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy,
session_config=session_config,
save_checkpoints_secs=60*60*24)
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir, config=npu_run_config_init(run_config=run_config))session_config = tf.ConfigProto(allow_soft_placement=True)
custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = 'NpuOptimizer'
# enable_dump:是否开启Dump功能
custom_op.parameter_map["enable_dump"].b = True
# dump_path:dump数据存放路径,该参数指定的目录需要在启动训练的环境上(容器或Host侧)提前创建且确保安装时配置的运行用户具有读写权限
custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes("/home/HwHiAiUser/output")
# dump_step:指定采集哪些迭代的Dump数据
custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes("0|5|10")
# dump_mode:Dump模式,取值:input/output/all
custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy,
session_config=session_config,
save_checkpoints_secs=60*60*24)
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir, config=npu_run_config_init(run_config=run_config))session_config = tf.ConfigProto(allow_soft_placement=True)
custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = 'NpuOptimizer'
# 使能相关配置
custom_op.parameter_map["xxx"].x = xxx
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy,
session_config=session_config,
save_checkpoints_secs=60*60*24)
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir, config=npu_run_config_init(run_config=run_config))if __name__ == '__main__':
session_config = tf.ConfigProto()
custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
# enable_dump:是否开启Dump功能
custom_op.parameter_map["enable_dump"].b = True
# dump_path:dump数据存放路径,该参数指定的目录需要在启动训练的环境上(容器或Host侧)提前创建且确保安装时配置的运行用户具有读写权限
custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes("/home/HwHiAiUser/output")
# dump_step:指定采集哪些迭代的Dump数据
custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes("0|5|10")
# dump_mode:Dump模式,取值:input/output/all
custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
(npu_sess, npu_shutdown) = init_resource(config=session_config)
tf.app.run()
shutdown_resource(npu_sess, npu_shutdown)
close_session(npu_sess)with tf.Session(config=npu_config_proto()) as sess:
sess.run(tf.global_variables_initializer())
interaction_table.init.run()config_proto = tf.ConfigProto()
custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = 'NpuOptimizer'
custom_op.parameter_map["enable_dump"].b = True
custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes("/home/HwHiAiUser/output")
custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes("0|5|10")
custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
config = npu_config_proto(config_proto=config_proto)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
interaction_table.init.run()if __name__ == '__main__':
session_config = tf.ConfigProto()
custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
# enable_dump:是否开启Dump功能
custom_op.parameter_map["enable_dump"].b = True
# dump_path:dump数据存放路径,该参数指定的目录需要在启动训练的环境上(容器或Host侧)提前创建且确保安装时配置的运行用户具有读写权限
custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes("/home/HwHiAiUser/output")
# dump_step:指定采集哪些迭代的Dump数据
custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes("0|5|10")
# dump_mode:Dump模式,取值:input/output/all
custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
(npu_sess, npu_shutdown) = init_resource(config=session_config)
tf.app.run()
shutdown_resource(npu_sess, npu_shutdown)
close_session(npu_sess)import tensorflow as tf import tensorflow.python.keras as keras from tensorflow.python.keras import backend as K from npu_bridge.npu_init import * npu_keras_sess = set_keras_session_npu_config() #数据预处理... #模型搭建... #模型编译... #模型训练...
import tensorflow as tf
import tensorflow.python.keras as keras
from tensorflow.python.keras import backend as K
from npu_bridge.npu_init import *
config_proto = tf.ConfigProto()
custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = 'NpuOptimizer'
custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
npu_keras_sess = set_keras_session_npu_config(config=config_proto)
#数据预处理...
#模型搭建...
#模型编译...
#模型训练...