首页
学习
活动
专区
圈层
工具
发布
首页
学习
活动
专区
圈层
工具
MCP广场
社区首页 >问答首页 >用YOLOv5和OpenCV加载C++时出错

用YOLOv5和OpenCV加载C++时出错
EN

Stack Overflow用户
提问于 2022-02-08 15:44:55
回答 2查看 1.3K关注 0票数 0

我试图使用YOLOv5与C++和Opencv一起运行对象检测。我主要遵循这个例子:https://github.com/doleron/yolov5-opencv-cpp-python/blob/main/cpp/yolo.cpp

我的代码是:

代码语言:javascript
运行
复制
#include <vector>
#include <string>
#include <algorithm>
#include <sstream>
#include <iterator>
#include <cmath>
#include <opencv2/opencv.hpp>
void load_net(cv::dnn::Net& net, bool is_cuda)
{
    auto result = cv::dnn::readNetFromONNX("yolov5s.onnx");
    if (is_cuda)
    {
        std::cout << "Attempty to use CUDA\n";
        result.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
        result.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA_FP16);
    }
    else
    {
        std::cout << "Running on CPU\n";
        result.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
        result.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
    }
    net = result;
}
const std::vector<cv::Scalar> colors = { cv::Scalar(255, 255, 0), cv::Scalar(0, 255, 0), cv::Scalar(0, 255, 255), cv::Scalar(255, 0, 0) };
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.2;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.4;
struct Detection
{
    int class_id;
    float confidence;
    cv::Rect box;
};
cv::Mat format_yolov5(const cv::Mat& source) {
    int col = source.cols;
    int row = source.rows;
    int _max = MAX(col, row);
    cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3);
    source.copyTo(result(cv::Rect(0, 0, col, row)));
    return result;
}
void detect(cv::Mat& image, cv::dnn::Net& net, std::vector<Detection>& output, const std::vector<std::string>& className) {
    cv::Mat blob;
    auto input_image = format_yolov5(image);
    cv::dnn::blobFromImage(input_image, blob, 1. / 255., cv::Size(INPUT_WIDTH, INPUT_HEIGHT), cv::Scalar(), true, false);
    net.setInput(blob);
    std::vector<cv::Mat> outputs;
    net.forward(outputs, net.getUnconnectedOutLayersNames());
    float x_factor = input_image.cols / INPUT_WIDTH;
    float y_factor = input_image.rows / INPUT_HEIGHT;
    float* data = (float*)outputs[0].data;
    const int dimensions = 85;
    const int rows = 25200;
    std::vector<int> class_ids;
    std::vector<float> confidences;
    std::vector<cv::Rect> boxes;
    for (int i = 0; i < rows; ++i) {
        float confidence = data[4];
        if (confidence >= CONFIDENCE_THRESHOLD) {
            float* classes_scores = data + 5;
            cv::Mat scores(1, className.size(), CV_32FC1, classes_scores);
            cv::Point class_id;
            double max_class_score;
            minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
            if (max_class_score > SCORE_THRESHOLD) {
                confidences.push_back(confidence);
                class_ids.push_back(class_id.x);
                float x = data[0];
                float y = data[1];
                float w = data[2];
                float h = data[3];
                int left = int((x - 0.5 * w) * x_factor);
                int top = int((y - 0.5 * h) * y_factor);
                int width = int(w * x_factor);
                int height = int(h * y_factor);
                boxes.push_back(cv::Rect(left, top, width, height));
            }
        }
        data += 85;
    }
    std::vector<int> nms_result;
    cv::dnn::NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, nms_result);
    for (int i = 0; i < nms_result.size(); i++) {
        int idx = nms_result[i];
        Detection result;
        result.class_id = class_ids[idx];
        result.confidence = confidences[idx];
        result.box = boxes[idx];
        output.push_back(result);
    }
}
int main(int argc, char** argv)
{
    std::vector<std::string> class_list = { "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush" };
    cv::Mat frame;
    cv::VideoCapture capture(0);
    if (!capture.isOpened())
    {
        std::cerr << "Error opening video file\n";
        return -1;
    }
    bool is_cuda = argc > 1 && strcmp(argv[1], "cuda") == 0;
    cv::dnn::Net net;
    load_net(net, is_cuda);
    auto start = std::chrono::high_resolution_clock::now();
    int frame_count = 0;
    float fps = -1;
    int total_frames = 0;
    while (true)
    {
        capture.read(frame);
        if (frame.empty())
        {
            std::cout << "End of stream\n";
            break;
        }
        std::vector<Detection> output;
        detect(frame, net, output, class_list);
        frame_count++;
        total_frames++;
        int detections = output.size();
        for (int i = 0; i < detections; ++i)
        {
            auto detection = output[i];
            auto box = detection.box;
            auto classId = detection.class_id;
            const auto color = colors[classId % colors.size()];
            cv::rectangle(frame, box, color, 3);
            cv::rectangle(frame, cv::Point(box.x, box.y - 20), cv::Point(box.x + box.width, box.y), color, cv::FILLED);
            cv::putText(frame, class_list[classId].c_str(), cv::Point(box.x, box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
        }
        if (frame_count >= 30)
        {
            auto end = std::chrono::high_resolution_clock::now();
            fps = frame_count * 1000.0 / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
            frame_count = 0;
            start = std::chrono::high_resolution_clock::now();
        }
        if (fps > 0)
        {
            std::ostringstream fps_label;
            fps_label << std::fixed << std::setprecision(2);
            fps_label << "FPS: " << fps;
            std::string fps_label_str = fps_label.str();
            cv::putText(frame, fps_label_str.c_str(), cv::Point(10, 25), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 0, 255), 2);
        }
        cv::imshow("output", frame);
        if (cv::waitKey(1) != -1)
        {
            capture.release();
            std::cout << "finished by user\n";
            break;
        }
        char c = (char)cv::waitKey(25);//Allowing 25 milliseconds frame processing time and initiating break condition//
        if (c == 27) { //If 'Esc' is entered break the loop//
            break;
        }
    }
    std::cout << "Total frames: " << total_frames << "\n";
    return 0;
}

运行此命令时,读取onnx文件时会出现错误,这是控制台的输出。

代码语言:javascript
运行
复制
OpenCV(4.5.5) Error: Unspecified error (> Node [Range@ai.onnx]:(354) parse error: OpenCV(4.5.5) C:\build\master_winpack-build-win64-vc14\opencv\modules\dnn\src\dnn.cpp:621: error: (-2:Unspecified error) Can't create layer "354" of type "Range" in function 'cv::dnn::dnn4_v20211220::LayerData::getLayerInstance'
> ) in cv::dnn::dnn4_v20211220::ONNXImporter::handleNode, file C:\build\master_winpack-build-win64-vc14\opencv\modules\dnn\src\onnx\onnx_importer.cpp, line 928
OpenCV: terminate handler is called! The last OpenCV error is:
OpenCV(4.5.5) Error: Unspecified error (> Node [Range@ai.onnx]:(354) parse error: OpenCV(4.5.5) C:\build\master_winpack-build-win64-vc14\opencv\modules\dnn\src\dnn.cpp:621: error: (-2:Unspecified error) Can't create layer "354" of type "Range" in function 'cv::dnn::dnn4_v20211220::LayerData::getLayerInstance'
> ) in cv::dnn::dnn4_v20211220::ONNXImporter::handleNode, file C:\build\master_winpack-build-win64-vc14\opencv\modules\dnn\src\onnx\onnx_importer.cpp, line 928

有谁知道这里的问题是什么,以及如何解决它?

编辑:如果您想运行它并自己检查输出,您可以从https://github.com/ultralytics/yolov5/releases下载该模型

EN

回答 2

Stack Overflow用户

发布于 2022-07-26 02:52:59

我要分享我的经验。

1.12.0+cu116

  1. Download
    • Windows 10,x64
    • GeForce 1660 Ti
    • Visual Studio 2019,version 16
    • OpenCV 4.6.0

    H 19Python 3.8H 210H 111CUDA 11.6,cuDNN 8.4H 212H 113PyTorch

    • OpenCV源代码。H 218G 219

遵循以下指导方针:

https://machinelearningprojects.net/build-opencv-with-cuda-and-cudnn/

在我的例子中,由于我只需要python和C++,所以我还不检查以下项目: test、objc、java、js

  1. Git克隆并运行export.py以获得onnx模型,并运行detect.py来检查OpenCV dnn模块.

命令:

代码语言:javascript
运行
复制
python export.py --weights yolov5s.pt --simplify --include onnx
python detect.py --weights yolov5s.onnx --device 0 --dnn

不要忘记添加参数--简化,否则当您使用OpenCV和C++在Visual上加载它时,会失败的。第二个命令是检查OpenCV dnn模块,如果这里失败,它在Visual上也会失败。

https://docs.nvidia.com/deeplearning/cudnn/install-guide/index.html

  1. cuDNN安装

implementation

  • 检查后端和目标

下面的代码片段将用于执行此操作。

代码语言:javascript
运行
复制
const string namesBackend[] = {
    "DNN_BACKEND_DEFAULT",
    "DNN_BACKEND_HALIDE",
    "DNN_BACKEND_INFERENCE_ENGINE",
    "DNN_BACKEND_OPENCV",
    "DNN_BACKEND_VKCOM",
    "DNN_BACKEND_CUDA",
    "DNN_BACKEND_WEBNN",
    "DNN_BACKEND_TIMVX"
};
const string namesTarget[] = {
    "DNN_TARGET_CPU",
    "DNN_TARGET_OPENCL",
    "DNN_TARGET_OPENCL_FP16",
    "DNN_TARGET_MYRIAD",
    "DNN_TARGET_VULKAN",
    "DNN_TARGET_FPGA",
    "DNN_TARGET_CUDA",
    "DNN_TARGET_CUDA_FP16",
    "DNN_TARGET_HDDL",
    "DNN_TARGET_NPU"
};
// list dnn backends
auto listBackend = cv::dnn::getAvailableBackends();
cout << "########## Avaliable Backends ##########" << endl;
for (const auto& bkend : listBackend) {
   cout << "Backend: " << namesBackend[bkend.first] << ", Target: " << 
   namesTarget[bkend.second] << endl;
}
int numDevices = cv::cuda::getCudaEnabledDeviceCount();
cout << "Number of cuda device: " << numDevices << endl;

  • 负载模型

代码片段:

代码语言:javascript
运行
复制
const string model_path_yolov5s = "d:\\C++\\PedestrianDetector\\models\\yolov5s.onnx";
cv::dnn::Net net = cv::dnn::readNetFromONNX(model_path_yolov5s);
net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA_FP16);

  • Detect

代码片段:

代码语言:javascript
运行
复制
chrono::system_clock::time_point startTime = chrono::system_clock::now();
cv::dnn::blobFromImage(frame, blob, 1.0 / 255.0, cv::Size(INPUT_WIDTH, 
INPUT_HEIGHT), cv::Scalar(0, 0, 0), true, false);
net.setInput(blob);
vector<cv::Mat> detections;
vector<string> outLayerNames = net.getUnconnectedOutLayersNames();
net.forward(detections, outLayerNames);
chrono::system_clock::time_point endTime = chrono::system_clock::now();
chrono::milliseconds millSec = chrono::duration_cast<chrono::milliseconds>(endTime - startTime);
票数 1
EN

Stack Overflow用户

发布于 2022-04-07 07:48:22

您需要使用函数readNet来加载onnx文件,而不是readNetFromONNX。我还使用超级语言Yolo V5 repo将(.pt)文件导出到(.onnx)文件。代替这个自动结果= cv::dnn::readNetFromONNX("yolov5s.onnx");您需要使用auto net =yolov5s.onnx

票数 -1
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/71036722

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档