前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >OpenCV DNN模块教程(四)Mask-RCNN实例分割

OpenCV DNN模块教程(四)Mask-RCNN实例分割

作者头像
Color Space
发布2020-10-29 10:09:32
9860
发布2020-10-29 10:09:32
举报
文章被收录于专栏:OpenCV与AI深度学习

本文为OpenCV DNN模块官方教程的扩展,介绍如何使用OpenCV加载TensorFlow Object Detection API训练的模型做实例分割,以Mask-RCNN为例来检测缺陷。TensorFlow Object Detection API的github链接地址如下:https://github.com/tensorflow/models/tree/master/research/object_detection

本文以TensorFlow 1.x为例(TF2.x等后续稳定支持OpenCV后介绍),介绍OpenCV DNN模块调用Mask-RCNN模型做实例分割的步骤如下:

(1) 下载或自己训练生成 .pb 格式的模型文件。本文以自己训练好的缺陷检测模型frozen_inference_graph.pb为例:

(2) 使用指令用.pb文件生成.pbtxt文件, Mask-RCNN使用tf_text_graph_mask_rcnn.py,指令如下:

主要参数三个:

--input 输入.pb模型文件完整路径;

--output 输出.pbtxt文件完整路径;

--config 输入config文件完整路径

完整指令:

代码语言:javascript
复制
python tf_text_graph_mask_rcnn.py --input E:\Practice\TensorFlow\DataSet\mask_defects2\model\export\frozen_inference_graph.pb --output E:\Practice\TensorFlow\DataSet\mask_defects2\model\export\frozen_inference_graph.pbtxt --config E:\Practice\TensorFlow\DataSet\mask_defects2\model\train\mask_rcnn_inception_v2_coco.config

运行结果:

(3) 配置OpenCV4.4,加载图片测试 ,代码如下:

代码语言:javascript
复制
#include <fstream>
#include <sstream>
#include <iostream>
#include <string.h>

#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>


using namespace cv;
using namespace dnn;
using namespace std;

// Initialize the parameters
float confThreshold = 0.4; // Confidence threshold
float maskThreshold = 0.5; // Mask threshold

vector<string> classes;
vector<Scalar> colors;

// Draw the predicted bounding box, colorize and show the mask on the image
void drawBox(Mat& frame, int classId, float conf, Rect box, Mat& objectMask)
{
  //Draw a rectangle displaying the bounding box
  rectangle(frame, Point(box.x, box.y), Point(box.x + box.width, box.y + box.height), Scalar(255, 178, 50), 5);

  //Get the label for the class name and its confidence
  string label = format("%.2f", conf);
  if (!classes.empty())
  {
    CV_Assert(classId < (int)classes.size());
    label = classes[classId] + ":" + label;
  }

  //Display the label at the top of the bounding box
  int baseLine;
  Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
  box.y = max(box.y, labelSize.height);
  //rectangle(frame, Point(box.x, box.y - round(1.5*labelSize.height)), Point(box.x + round(1.5*labelSize.width), box.y + baseLine), Scalar(255, 255, 255), FILLED);
  putText(frame, label, Point(box.x, box.y), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 255, 0), 2);

  Scalar color = colors[classId%colors.size()];

  // Resize the mask, threshold, color and apply it on the image
  resize(objectMask, objectMask, Size(box.width, box.height));
  Mat mask = (objectMask > maskThreshold);
  Mat coloredRoi = (0.5 * color + 0.7 * frame(box));
  coloredRoi.convertTo(coloredRoi, CV_8UC3);

  // Draw the contours on the image
  vector<Mat> contours;
  Mat hierarchy;
  mask.convertTo(mask, CV_8U);
  findContours(mask, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
  drawContours(coloredRoi, contours, -1, color, 5, LINE_8, hierarchy, 100);
  coloredRoi.copyTo(frame(box), mask);

}
// For each frame, extract the bounding box and mask for each detected object

void postprocess(Mat& frame, const vector<Mat>& outs)
{
  Mat outDetections = outs[0];
  Mat outMasks = outs[1];

  // Output size of masks is NxCxHxW where
  // N - number of detected boxes
  // C - number of classes (excluding background)
  // HxW - segmentation shape
  const int numDetections = outDetections.size[2];
  const int numClasses = outMasks.size[1];

  outDetections = outDetections.reshape(1, outDetections.total() / 7);
  for (int i = 0; i < numDetections; ++i)
  {
    float score = outDetections.at<float>(i, 2);
    if (score > confThreshold)
    {
      // Extract the bounding box
      int classId = static_cast<int>(outDetections.at<float>(i, 1));
      int left = static_cast<int>(frame.cols * outDetections.at<float>(i, 3));
      int top = static_cast<int>(frame.rows * outDetections.at<float>(i, 4));
      int right = static_cast<int>(frame.cols * outDetections.at<float>(i, 5));
      int bottom = static_cast<int>(frame.rows * outDetections.at<float>(i, 6));

      left = max(0, min(left, frame.cols - 1));
      top = max(0, min(top, frame.rows - 1));
      right = max(0, min(right, frame.cols - 1));
      bottom = max(0, min(bottom, frame.rows - 1));
      Rect box = Rect(left, top, right - left + 1, bottom - top + 1);

      // Extract the mask for the object
      Mat objectMask(outMasks.size[2], outMasks.size[3], CV_32F, outMasks.ptr<float>(i, classId));

      // Draw bounding box, colorize and show the mask on the image
      drawBox(frame, classId, score, box, objectMask);

    }
  }
}


/***************Image Test****************/
int main()
{
  // Load names of classes
  string classesFile = "./model2/label.names";
  ifstream ifs(classesFile.c_str());
  string line;
  while (getline(ifs, line)) classes.push_back(line);

  // Load the colors
  string colorsFile = "./model2/colors.txt";
  ifstream colorFptr(colorsFile.c_str());
  while (getline(colorFptr, line))
  {
    char* pEnd;
    double r, g, b;
    r = strtod(line.c_str(), &pEnd);
    g = strtod(pEnd, NULL);
    b = strtod(pEnd, NULL);
    Scalar color = Scalar(r, g, b, 255.0);
    colors.push_back(Scalar(r, g, b, 255.0));
  }

  // Give the configuration and weight files for the model
  String textGraph = "./model2/defect_label.pbtxt";
  String modelWeights = "./model2/frozen_inference_graph.pb";

  // Load the network
  Net net = readNetFromTensorflow(modelWeights, textGraph);

  // Open a video file or an image file or a camera stream.
  string str, outputFile;
  Mat frame, blob;

  // Create a window
  static const string kWinName = "OpenCV DNN Mask-RCNN Demo";

  // Process frames.
  frame = imread("./imgs/4.jpg");
  // Create a 4D blob from a frame.
  //blobFromImage(frame, blob, 1.0, Size(frame.cols, frame.rows), Scalar(), true, false);
  //blobFromImage(frame, blob, 1.0, Size(1012, 800), Scalar(), true, false);
  blobFromImage(frame, blob, 1.0, Size(800, 800), Scalar(), true, false);
  //blobFromImage(frame, blob);

  //Sets the input to the network
  net.setInput(blob);

  // Runs the forward pass to get output from the output layers
  std::vector<String> outNames(2);
  cout << outNames[0] << endl;
  cout << outNames[1] << endl;
  outNames[0] = "detection_out_final";
  outNames[1] = "detection_masks";
  vector<Mat> outs;
  net.forward(outs, outNames);

  // Extract the bounding box and mask for each of the detected objects
  postprocess(frame, outs);

  // Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
  vector<double> layersTimes;
  double freq = getTickFrequency() / 1000;
  double t = net.getPerfProfile(layersTimes) / freq;
  string label = format("test use time: %0.0f ms", t);
  putText(frame, label, Point(10, 20), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2);

  // Write the frame with the detection boxes
  Mat detectedFrame;
  frame.convertTo(detectedFrame, CV_8U);
  imwrite("result.jpg", frame);
  //resize(frame, frame, Size(frame.cols / 3, frame.rows / 3));
  imshow(kWinName, frame);
  waitKey(0);
  return 0;
}

测试图像:

运行结果:

本文参与 腾讯云自媒体同步曝光计划,分享自微信公众号。
原始发表:2020-10-14,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 OpenCV与AI深度学习 微信公众号,前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体同步曝光计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档