首页
学习
活动
专区
工具
TVP
发布
精选内容/技术社群/优惠产品,尽在小程序
立即前往

使用c+opencv调用tensorflow训练好的卷积神经网络

在OpenCV3.3版本发布中把DNN模块从扩展模块移到了OpenCV正式发布模块中,DNN模块最早来自Tiny-dnn,可以加载预先训练好的Caffe模型数据,后来OpenCV近一步扩展支持主流的深度学习框架模型数据的加载,常见的有如下:Caffe,TensorFlow,Torch/PyTorch 。OpenCV中DNN模块已经支持了下面这些经典的神经网络模块:

AlexNet

GoogLeNet v1 (also referred to as Inception-5h)

ResNet-34/50/...

SqueezeNet v1.1

VGG-based FCN (semantical segmentation network)

ENet (lightweight semantical segmentation network)

VGG-based SSD (object detection network) MobileNet-based SSD (light-weight object detection network)

本文将通过一个简单的例子来演示如何通过opencv调用自己构建并训练好的tensorflow模型。首先通过tensorflow自己构建一个简单的网络结构并进行训练,然后将训练好的网络结构保存为下来,供opencv调用。

第一步: 构建一个简单的卷积网络结构。

#导入包

import tensorflow as tf

import numpy as np

#import cv2

from matplotlib import pyplot as plt

import time

logdir='./output/'

#导入npy格式的训练和测试数据

INPUT_DATA = 'F:\\py\\solder_processed_data2.npy'

BATCH=5

processed_data = np.load(INPUT_DATA)

inputsize=224

n_training_example = len(processed_data[0])

n_tr=n_training_example

n_v=len(processed_data[2])

n_t=len(processed_data[4])

training_images = np.reshape(processed_data[0],[n_tr,inputsize,inputsize,3])

training_labels = np.reshape(processed_data[1],[n_tr,2])

validation_images = np.reshape( processed_data[2],[n_v,inputsize,inputsize,3])

validation_labels = np.reshape( processed_data[3],[n_v,2])

testing_images =np.reshape(processed_data[4],[n_t,inputsize,inputsize,3])

testing_labels = np.reshape(processed_data[5],[n_t,2])

print("%d training examples, %d validation examples and %d testing examples." % (

n_training_example, len(validation_labels), len(testing_labels)))

#创建会话,定义两个卷积层

sess=tf.InteractiveSession()

def weight_variable(shape):

initial=tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)

return tf.Variable(initial)

def bias__variable(shape):

initial=tf.constant(0.1, dtype=None, shape=shape)

return tf.Variable(initial)

def conv2d(x,w):

def max_pool_2x2(x):

x=tf.placeholder(tf.float32,[None,inputsize,inputsize,3],name="input")

y_=tf.placeholder(tf.float32,[None,2],name="labels")

w_conv1=weight_variable([3,3,3,32])

b_conv1=bias__variable([32])

h_pool1=max_pool_2x2(h_conv1)

w_conv2=weight_variable([3,3,32,64])

b_conv2=bias__variable([64])

h_pool2=max_pool_2x2(h_conv2)

#全连接层

w_fc1=weight_variable([56*56*64,1024])

b_fc1=bias__variable([1024])

h_pool2_flag=tf.reshape(h_pool2,[-1,56*56*64])

#全连接层

w_fc2=weight_variable([1024,512])

b_fc2=bias__variable([512])

#输出层

w_fc3=weight_variable([512,2])

b_fc3=bias__variable([2])

cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0)),reduction_indices=[1]))

train_step=tf.train.AdamOptimizer(0.00001).minimize(cross_entropy)

correct_prediction=tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1))

accurace=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

tf.global_variables_initializer().run()

start = 0

end = BATCH

duaration=0

#开始训练

for i in range(500):

if i%50==0:

train_accurace=accurace.eval(feed_dict=)

print("step %d,train accurace %g"%(i,train_accurace))

print("step %d,one step/second %g"%(i,duaration))

start_time=time.time()

train_step.run(feed_dict=)

duaration=time.time()-start_time

start = end

if start == n_training_example:

start = 0

end = start + BATCH

if end > n_training_example:

end = n_training_example

print("test accurace %g"%accurace.eval(feed_dict=))

#将训练好的模型保存下来为.pb文件

constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output"])

with tf.gfile.FastGFile(logdir+'expert-graph2.pb', mode='wb') as f:

f.write(constant_graph.SerializeToString())

第二步: 本文采用opencv3.4中的DNN模块调用训练好的模型,输出预测结果。

#include

#include

#include

#include

#include

#include

#include

using namespace cv;

using namespace cv::dnn;

using namespace std;

//自己新建一个txt文件,写入分类的标签(一行写一个标签,例如二分类,第一行写good,第二行bad)

String labels_txt_file = "F:\\py\\实现卷积神经网络\\output\\expert-graph.txt";

String tf_pb_file = "F:\\py\\实现卷积神经网络\\output\\expert-graph.pb";

vector readClassNames();

void main()

{

Mat src = imread("F:\\py\\solder\\bad\\20.jpg");

if (src.empty())

{

cout

}

vector labels=readClassNames();

Mat rgb;

int w = 224;

int h = 224;

resize(src, src, Size(w, h));

cvtColor(src, rgb, COLOR_BGR2RGB);

Net net = readNetFromTensorflow(tf_pb_file);

DWORD timestart = GetTickCount();

if (net.empty())

{

cout

}

Mat inputBlob = blobFromImage(src, 0.00390625f, Size(w, h), Scalar(), true, false);

//inputBlob -= 117.0;

//执行图像分类

Mat prob;

net.setInput(inputBlob, "input");

prob = net.forward("output");

cout

//prob=net.forward("softmax2");

//得到最大分类概率

Mat probMat= prob.reshape(1,1);

Point classNumber;

double classProb;

minMaxLoc(probMat, NULL, &classProb, NULL, &classNumber);

DWORD timeend = GetTickCount();

int classidx = classNumber.x;

printf("\n current image classification : %s, possible : %.2f\n",labels.at(classidx).c_str(),classProb);

cout

// 显示文本

putText(src,labels.at(classidx),Point(20,20),FONT_HERSHEY_SIMPLEX,1.0,Scalar(0,0,255),2,8);

imshow("Image Classfication", src);

waitKey(0);

}

vector readClassNames()

{

vector classNames;

fstream fp(labels_txt_file);

if (!fp.is_open())

{

cout

exit(-1);

}

string name;

while (!fp.eof())

{

getline(fp, name);

if (name.length())

classNames.push_back(name);

}

fp.close();

return classNames;

}

最后得到预测结果,本文只给出了程序,不提供图片数据。

  • 发表于:
  • 原文链接https://kuaibao.qq.com/s/20180912G0UQ1900?refer=cp_1026
  • 腾讯「腾讯云开发者社区」是腾讯内容开放平台帐号(企鹅号)传播渠道之一,根据《腾讯内容开放平台服务协议》转载发布内容。
  • 如有侵权,请联系 cloudcommunity@tencent.com 删除。

扫码

添加站长 进交流群

领取专属 10元无门槛券

私享最新 技术干货

扫码加入开发者社群
领券