optimizer = tf.compat.v1.train.RMSPropOptimizer( ARGS.learning_rate, momentum=ARGS.momentum) train_op = optimizer.minimize(loss)
with tf.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) sess.run(outputs) #将训练后的参数保存为checkpoint文件 saver_save.save(sess, retrain_ckpt, global_step=0)
variables_to_restore = tf.compat.v1.global_variables() saver_restore = tf.compat.v1.train.Saver(variables_to_restore) with tf.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) #恢复训练参数 saver_restore.restore(sess, retrain_ckpt) #将量化因子写入record文件,说明:如用户未使能任何量化功能则无需执行此步骤,直接执行下一步 sess.run(retrain_ops[-1]) #固化pb模型 constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( sess, eval_graph.as_graph_def(), [output.name[:-2] for output in outputs]) with tf.io.gfile.GFile(frozen_quant_eval_pb, 'wb') as f: f.write(constant_graph.SerializeToString())
import amct_tensorflow as amct amct.set_logging_level(print_level="info", save_level="info")
推荐执行该步骤,以确保原始模型可以完成推理且精度正常;执行该步骤时,可以使用部分测试集,减少运行时间。
user_test_evaluate_model(evaluate_model, test_data)
train_graph = user_load_train_graph()
record_file = './tmp/record.txt' retrain_ops = amct.create_compressed_retrain_model(graph=train_graph, config_defination=simple_cfg, outputs=user_model_outputs, record_file=record_file)
compressed_model_path = './result/user_model' amct.save_compressed_retrain_model(pb_model=trained_pb, outputs=user_model_outputs, record_file=record_file, save_path=compressed_model_path)
compressed_model = './results/user_model_compressed.pb' user_do_inference(compressed_model, test_data)