# 【深度学习】使用tensorflow实现VGG19网络

VGG网络与AlexNet类似，也是一种CNN，VGG在2014年的 ILSVRC localization and classification 两个问题上分别取得了第一名和第二名。VGG网络非常深，通常有16－19层，卷积核大小为 3 x 3，16和19层的区别主要在于后面三个卷积部分卷积层的数量。第二个用tensorflow独立完成的小玩意儿......

```def maxPoolLayer(x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
"""max-pooling"""
return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1],
strides = [1, strideX, strideY, 1], padding = padding, name = name)

def dropout(x, keepPro, name = None):
"""dropout"""
return tf.nn.dropout(x, keepPro, name)

def fcLayer(x, inputD, outputD, reluFlag, name):
"""fully-connect"""
with tf.variable_scope(name) as scope:
w = tf.get_variable("w", shape = [inputD, outputD], dtype = "float")
b = tf.get_variable("b", [outputD], dtype = "float")
out = tf.nn.xw_plus_b(x, w, b, name = scope.name)
if reluFlag:
return tf.nn.relu(out)
else:
return out

def convLayer(x, kHeight, kWidth, strideX, strideY,
"""convlutional"""
channel = int(x.get_shape()[-1]) #获取channel数
with tf.variable_scope(name) as scope:
w = tf.get_variable("w", shape = [kHeight, kWidth, channel, featureNum])
b = tf.get_variable("b", shape = [featureNum])
featureMap = tf.nn.conv2d(x, w, strides = [1, strideY, strideX, 1], padding = padding)
return tf.nn.relu(tf.reshape(out, featureMap.get_shape().as_list()), name = scope.name)```

```class VGG19(object):
"""VGG model"""
def __init__(self, x, keepPro, classNum, skip, modelPath = "vgg19.npy"):
self.X = x
self.KEEPPRO = keepPro
self.CLASSNUM = classNum
self.SKIP = skip
self.MODELPATH = modelPath
#build CNN
self.buildCNN()

def buildCNN(self):
"""build model"""
conv1_1 = convLayer(self.X, 3, 3, 1, 1, 64, "conv1_1" )
conv1_2 = convLayer(conv1_1, 3, 3, 1, 1, 64, "conv1_2")
pool1 = maxPoolLayer(conv1_2, 2, 2, 2, 2, "pool1")

conv2_1 = convLayer(pool1, 3, 3, 1, 1, 128, "conv2_1")
conv2_2 = convLayer(conv2_1, 3, 3, 1, 1, 128, "conv2_2")
pool2 = maxPoolLayer(conv2_2, 2, 2, 2, 2, "pool2")

conv3_1 = convLayer(pool2, 3, 3, 1, 1, 256, "conv3_1")
conv3_2 = convLayer(conv3_1, 3, 3, 1, 1, 256, "conv3_2")
conv3_3 = convLayer(conv3_2, 3, 3, 1, 1, 256, "conv3_3")
conv3_4 = convLayer(conv3_3, 3, 3, 1, 1, 256, "conv3_4")
pool3 = maxPoolLayer(conv3_4, 2, 2, 2, 2, "pool3")

conv4_1 = convLayer(pool3, 3, 3, 1, 1, 512, "conv4_1")
conv4_2 = convLayer(conv4_1, 3, 3, 1, 1, 512, "conv4_2")
conv4_3 = convLayer(conv4_2, 3, 3, 1, 1, 512, "conv4_3")
conv4_4 = convLayer(conv4_3, 3, 3, 1, 1, 512, "conv4_4")
pool4 = maxPoolLayer(conv4_4, 2, 2, 2, 2, "pool4")

conv5_1 = convLayer(pool4, 3, 3, 1, 1, 512, "conv5_1")
conv5_2 = convLayer(conv5_1, 3, 3, 1, 1, 512, "conv5_2")
conv5_3 = convLayer(conv5_2, 3, 3, 1, 1, 512, "conv5_3")
conv5_4 = convLayer(conv5_3, 3, 3, 1, 1, 512, "conv5_4")
pool5 = maxPoolLayer(conv5_4, 2, 2, 2, 2, "pool5")

fcIn = tf.reshape(pool5, [-1, 7*7*512])
fc6 = fcLayer(fcIn, 7*7*512, 4096, True, "fc6")
dropout1 = dropout(fc6, self.KEEPPRO)

fc7 = fcLayer(dropout1, 4096, 4096, True, "fc7")
dropout2 = dropout(fc7, self.KEEPPRO)

self.fc8 = fcLayer(dropout2, 4096, self.CLASSNUM, True, "fc8")

wDict = np.load(self.MODELPATH, encoding = "bytes").item()
#for layers in model
for name in wDict:
if name not in self.SKIP:
with tf.variable_scope(name, reuse = True):
for p in wDict[name]:
if len(p.shape) == 1:
#bias 只有一维
sess.run(tf.get_variable('b', trainable = False).assign(p))
else:
#weights
sess.run(tf.get_variable('w', trainable = False).assign(p)) ```

buildCNN函数完全按照VGG的结构搭建网络。

ImageNet训练的VGG有很多类，几乎包含所有常见的物体，因此我们随便从网上找几张图片测试。比如我直接用了之前做项目的图片，为了避免审美疲劳，我们不只用渣土车，还要用挖掘机、采沙船：

```parser = argparse.ArgumentParser(description='Classify some images.')
parser.add_argument('path', help='Specify a path [e.g. testModel]')
args = parser.parse_args(sys.argv[1:])

if args.mode == 'folder': #测试方式为本地文件夹
#get testImage
withPath = lambda f: '{}/{}'.format(args.path,f)
testImg = dict((f,cv2.imread(withPath(f))) for f in os.listdir(args.path) if os.path.isfile(withPath(f)))
elif args.mode == 'url': #测试方式为URL
def url2img(url): #获取URL图像
'''url to image'''
resp = urllib.request.urlopen(url)
return image
testImg = {args.path:url2img(args.path)}

if testImg.values():
#some params
dropoutPro = 1
classNum = 1000
skip = []

imgMean = np.array([104, 117, 124], np.float)
x = tf.placeholder("float", [1, 224, 224, 3])

model = vgg19.VGG19(x, dropoutPro, classNum, skip)
score = model.fc8
softmax = tf.nn.softmax(score)

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())

for key,img in testImg.items():
#img preprocess
resized = cv2.resize(img.astype(np.float), (224, 224)) - imgMean #去均值
maxx = np.argmax(sess.run(softmax, feed_dict = {x: resized.reshape((1, 224, 224, 3))})) #网络输入为224*224
res = caffe_classes.class_names[maxx]

font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, res, (int(img.shape[0]/3), int(img.shape[1]/3)), font, 1, (0, 255, 0), 2) #在图像上绘制结果
print("{}: {}\n----".format(key,res)) #输出测试结果
cv2.imshow("demo", img)
cv2.waitKey(0)```

314 篇文章54 人订阅

0 条评论

## 相关文章

47570

### 图片逼真ls(Least Squares)gan 最小二乘GAN

https://github.com/wiseodd/generative-models

11230

3.7K90

56480

16640

44710

87590

24930

83860

10320