HX study Computer Vision

TensorRT C++调用

2021-01-23

TensorRT C++调用

调用流程分为构建阶段和执行阶段

构建时配置builder、network、config

执行时读取engine、创建context

//! 创建一个全局的ILogger
static class Logger : public ILogger {
public:
	virtual void log(Severity severity, const char* msg) {
		if (severity == Severity::kINTERNAL_ERROR) {
			INFOE("NVInfer INTERNAL_ERROR: %s", msg);
			abort();
		}
		else if (severity == Severity::kERROR) {
			INFOE("NVInfer ERROR: %s", msg);
		}
		else  if (severity == Severity::kWARNING) {
			INFOW("NVInfer WARNING: %s", msg);
		}
		else {
			//INFO("%s", msg);
		}
	}
}gLogger;

template<typename _T>
static void destroyNV(_T* ptr) {
    if (ptr) ptr->destroy();
}

/* 
====构建阶段====
*/
//! 创建builder
shared_ptr<IBuilder> builder(createInferBuilder(gLogger), destroyNV<IBuilder>);

//! 创建network,不同模型不同Parser
shared_ptr<INetworkDefinition> network;
shared_ptr<ICaffeParser> caffeParser;
shared_ptr<nvonnxparser::IParser> onnxParser;
shared_ptr<nvuffparser::IUffParser> uffParser;
//! if onnx
//! TensorRT 7.0之后onnx支持动态输入
const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(explicitBatch), destroyNV<INetworkDefinition>);//创建network
onnxParser.reset(nvonnxparser::createParser(*network, gLogger), destroyNV<nvonnxparser::IParser>);//创建onnxParser
onnxParser->parseFromFile(source.onnxmodel().c_str(), 1);//解析导入的模型
//! else if caffe
network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(0U), destroyNV<INetworkDefinition>);//创建network
caffeParser.reset(createCaffeParser(), destroyNV<ICaffeParser>);//创建caffeParser
auto blobNameToTensor = caffeParser->parse(source.prototxt().c_str(), source.caffemodel().c_str(), *network, nvinfer1::DataType::kFLOAT);//解析导入的模型
for (auto& output : outputs) {
    network->markOutput(*blobNameToTensor->find(output.c_str()));//指定网络的输出
}
//! else if tf
network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(0U), destroyNV<INetworkDefinition>);//创建network
uffParser.reset(nvuffparser::createUffParser(), destroyNV<nvuffparser::IUffParser>);//创建uffParser
uffParser->registerInput("input", DimsCHW(1, 28, 28), nvuffparser::UffInputOrder::kNCHW);//声明网络的输入输出
uffParser->registerOutput("onput");
uffParser->parse(source.uffmodel().c_str(), *network, nvinfer1::DataType::kFLOAT);//解析导入的模型

shared_ptr<IBuilderConfig> config(builder->createBuilderConfig(), destroyNV<IBuilderConfig>);
//! config配置
builder->setMaxBatchSize(maxBatchSize);//模型一次可处理图片batchsize
size_t _1_GB = 1 << 30;
config->setMaxWorkspaceSize(_1_GB);//限制每一层能够使用的最大临时存储空间
if (!builder->platformHasFastFp16()) {
    INFOW("Platform not have fast fp16 support");
}
config->setFlag(BuilderFlag::kFP16);
//! if onnx 配置动态batch
IOptimizationProfile *profile = builder->createOptimizationProfile();
profile->setDimensions(inputTensor->getName(), OptProfileSelector::kMIN, minDim);
profile->setDimensions(inputTensor->getName(), OptProfileSelector::kOPT, optDim);
profile->setDimensions(inputTensor->getName(), OptProfileSelector::kMAX, maxDim);
config->addOptimizationProfile(profile);
//! 根据之前的builder、network创建engine
shared_ptr<ICudaEngine> engine(builder->buildEngineWithConfig(*network, *config), destroyNV<ICudaEngine>);
//! 序列化engine
shared_ptr<IHostMemory> seridata(engine->serialize(), destroyNV<IHostMemory>);
//! 保存modeltrt至本地
//! ...

/*
====执行阶段====
*/
//! runtime时需要反序列化获取engine,engine的运行需要context,一个engine可有多个context
runtime_ = shared_ptr<IRuntime>(createInferRuntime(gLogger), destroyNV<IRuntime>);
engine_ = shared_ptr<ICudaEngine>(runtime_->deserializeCudaEngine(data.data(), data.size(), pluginFactory_.get()), destroyNV<ICudaEngine>);//反序列化得到engine
context_ = shared_ptr<IExecutionContext>(engine_->createExecutionContext(), destroyNV<IExecutionContext>);//创建context

//! 获取输入输出层的索引
int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME);
int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME);
void* buffers[2];
buffers[inputIndex] = inputBuffer;
buffers[outputIndex] = outputBuffer;

Dims4 input_dims{ inputs_[0]->num(), inputs_[0]->channel(), inputs_[0]->height(), inputs_[0]->width() };
//! setBindingDimension() and setOptimizationProfile() for dynamic shape models.
context->context_->setBindingDimensions(0, input_dims);
//! enqueue、enqueueV2:异步执行推断 
//! execute、executeV2:同步执行推断 
//! enqueue需要指定确定的inputBatchSize
bool execute_result = context->context_->enqueue(inputBatchSize, buffers, context->stream_, nullptr);
//! enqueueV2没有inputBatchSize这个参数
bool execute_result = context->context_->enqueueV2(buffers, context->stream_, nullptr);

动态batch设置:enqueueV2支持动态batch,onnx模型导出时设置dynamic_axes(参考 ONNX模型解析),通过setBindingDimensions(0, input_dims),可以动态设置每次推理时的batch

来源参考

TensorRT开发者指南


上一篇 ONNX模型解析

下一篇 ONNX-TensorRT

Comments

Content