我正在尝试使用tensorrt C++ API从ONNX模型创建一个TensorRT引擎。我已经根据documentation编写了读取、序列化和写入tensorrt引擎到磁盘的代码。我已经使用debian installation instructions在colab上安装了tensorrt7。
这是我使用g++ rnxt.cpp -o rnxt编译的c++代码
#include <cuda_runtime_api.h>
#include <NvOnnxParser.h>
#include <NvInfer.h>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <sstream>
#include <iterator>
#include <algorithm>
class Logger : public nvinfer1::ILogger
{
void log(Severity severity, const char* msg) override
{
// suppress info-level messages
if (severity != Severity::kINFO)
std::cout << msg << std::endl;
}
} gLogger;
int main(){
int maxBatchSize = 32;
nvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(gLogger);
const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
nvinfer1::INetworkDefinition* network = builder->createNetworkV2(explicitBatch);
nvonnxparser::IParser* parser = nvonnxparser::createParser(*network, gLogger);
parser->parseFromFile("saved_resnext.onnx", 1);
for (int i = 0; i < parser->getNbErrors(); ++i)
{
std::cout << parser->getError(i)->desc() << std::endl;
}
builder->setMaxBatchSize(maxBatchSize);
nvinfer1::IBuilderConfig* config = builder->createBuilderConfig();
config->setMaxWorkspaceSize(1 << 20);
nvinfer1::ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
parser->destroy();
network->destroy();
config->destroy();
builder->destroy();
nvinfer1::IHostMemory *serializedModel = engine->serialize();
std::ofstream engine_file("saved_resnext.engine");
engine_file.write((const char*)serializedModel->data(),serializedModel->size());
serializedModel->destroy();
return 0;
}在编译时,我得到以下错误:
/tmp/ccJaGxCX.o: In function `nvinfer1::(anonymous namespace)::createInferBuilder(nvinfer1::ILogger&)':
rnxt.cpp:(.text+0x19): undefined reference to `createInferBuilder_INTERNAL'
/tmp/ccJaGxCX.o: In function `nvonnxparser::(anonymous namespace)::createParser(nvinfer1::INetworkDefinition&, nvinfer1::ILogger&)':
rnxt.cpp:(.text+0x43): undefined reference to `createNvOnnxParser_INTERNAL'
collect2: error: ld returned 1 exit status我还得到了与<cuda_runtime_api.h>相关的错误,所以我将cuda的include目录(/usr/local/cuda-11.0/targets/x86_64-linux/include)中的那些文件添加(粘贴)到了/usr/include directory中,之后我得到了上述错误。我没有太多使用C++的经验,任何帮助都将不胜感激。
编辑:我还使用以下命令安装了libnvinfer
!apt-get install -y libnvinfer7=7.1.3-1+cuda11.0
!apt-get install -y libnvinfer-dev=7.1.3-1+cuda11.0发布于 2021-08-27 03:34:16
这个问题是由于Makefile中没有链接nvonnxparser.so造成的。只需添加
target_link_libraries(${TARGET_NAME} nvonnxparser)在你的CMake里。
https://stackoverflow.com/questions/62573335
复制相似问题