# 基于tensorflow的手写数字分类预测kaggle实战

2018年9月19日笔记

kaggle网站手写数字分类的比赛链接：https://www.kaggle.com/c/digit-recognizer 注册账号后才能参加kaggle比赛，本文作者成绩前2%，如下图所示：

image.png

## 1.下载并解压数据集

MNIST数据集下载链接: https://pan.baidu.com/s/1fPbgMqsEvk2WyM9hy5Em6w 密码: wa9p 下载压缩文件MNIST_data.rar完成后，选择解压到当前文件夹不要选择解压到MNIST_data。 文件夹结构如下图所示：

image.png

## 3.模型训练并保存

import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import random
import numpy as np

batch_size = 300
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)
X = np.vstack([mnist.train.images, mnist.test.images, mnist.validation.images])
y = np.vstack([mnist.train.labels, mnist.test.labels, mnist.validation.labels])
print(X.shape, y.shape)

X_images = tf.reshape(X_holder, [-1, 28, 28, 1])
#convolutional layer 1
conv1_Weights = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1), name='conv1_Weights')
conv1_biases = tf.Variable(tf.constant(0.1, shape=[32]), name='conv1_biases')
conv1_conv2d = tf.nn.conv2d(X_images, conv1_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv1_biases
conv1_activated = tf.nn.relu(conv1_conv2d)
conv1_pooled = tf.nn.max_pool(conv1_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#convolutional layer 2
conv2_Weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1), name='conv2_Weights')
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]), name='conv2_biases')
conv2_conv2d = tf.nn.conv2d(conv1_pooled, conv2_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv2_biases
conv2_activated = tf.nn.relu(conv2_conv2d)
conv2_pooled = tf.nn.max_pool(conv2_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#full connected layer 1
connect1_flat = tf.reshape(conv2_pooled, [-1, 7 * 7 * 64])
connect1_Weights = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1), name='connect1_Weights')
connect1_biases = tf.Variable(tf.constant(0.1, shape=[1024]), name='connect1_biases')
connect1_activated = tf.nn.relu(connect1_Wx_plus_b)
#full connected layer 2
connect2_Weights = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1), name='connect2_Weights')
connect2_biases = tf.Variable(tf.constant(0.1, shape=[10]), name='connect2_biases')
predict_y = tf.nn.softmax(connect2_Wx_plus_b)
#loss and train
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
train = optimizer.minimize(loss)

init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver = tf.train.Saver()

for i in range(20000):
selected_index = random.sample(range(len(y)), k=batch_size)
selected_X = X[selected_index]
selected_y = y[selected_index]
session.run(train, feed_dict={X_holder:selected_X, y_holder:selected_y})
if i % 100 == 0:
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_accuracy = session.run(accuracy, feed_dict={X_holder:mnist.train.images, y_holder:mnist.train.labels})
test_accuracy = session.run(accuracy, feed_dict={X_holder:mnist.test.images, y_holder:mnist.test.labels})
validation_accuracy = session.run(accuracy, feed_dict={X_holder:mnist.validation.images, y_holder:mnist.validation.labels})
print('step:%d train accuracy:%.4f test accuracy:%.4f validation accuracy:%.4f' %(i, train_accuracy, test_accuracy, validation_accuracy))
if train_accuracy == 1 and test_accuracy == 1 and validation_accuracy == 1:
save_path = saver.save(session, 'mnist_cnn_model/mnist_cnn.ckpt')
print('Save to path:', save_path)

## 4.加载模型

image.png

import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

batch_size = 100
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)

X_images = tf.reshape(X_holder, [-1, 28, 28, 1])
#convolutional layer 1
conv1_Weights = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1), name='conv1_Weights')
conv1_biases = tf.Variable(tf.constant(0.1, shape=[32]), name='conv1_biases')
conv1_conv2d = tf.nn.conv2d(X_images, conv1_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv1_biases
conv1_activated = tf.nn.relu(conv1_conv2d)
conv1_pooled = tf.nn.max_pool(conv1_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#convolutional layer 2
conv2_Weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1), name='conv2_Weights')
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]), name='conv2_biases')
conv2_conv2d = tf.nn.conv2d(conv1_pooled, conv2_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv2_biases
conv2_activated = tf.nn.relu(conv2_conv2d)
conv2_pooled = tf.nn.max_pool(conv2_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#full connected layer 1
connect1_flat = tf.reshape(conv2_pooled, [-1, 7 * 7 * 64])
connect1_Weights = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1), name='connect1_Weights')
connect1_biases = tf.Variable(tf.constant(0.1, shape=[1024]), name='connect1_biases')
connect1_activated = tf.nn.relu(connect1_Wx_plus_b)
#full connected layer 2
connect2_Weights = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1), name='connect2_Weights')
connect2_biases = tf.Variable(tf.constant(0.1, shape=[10]), name='connect2_biases')
predict_y = tf.nn.softmax(connect2_Wx_plus_b)
#loss and train
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
train = optimizer.minimize(loss)

session = tf.Session()
saver = tf.train.Saver()
saver.restore(session, 'mnist_cnn_model/mnist_cnn.ckpt')
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_images, train_labels = mnist.train.next_batch(5000)
test_images, test_labels = mnist.test.next_batch(5000)
train_accuracy = session.run(accuracy, feed_dict={X_holder:train_images, y_holder:train_labels})
test_accuracy = session.run(accuracy, feed_dict={X_holder:test_images, y_holder:test_labels})
print('train accuracy:%.4f test accuracy:%.4f' %(train_accuracy, test_accuracy))

Extracting MNIST_data/train-images-idx3-ubyte.gz Extracting MNIST_data/train-labels-idx1-ubyte.gz Extracting MNIST_data/t10k-images-idx3-ubyte.gz Extracting MNIST_data/t10k-labels-idx1-ubyte.gz INFO:tensorflow:Restoring parameters from mnist_cnn_model/mnist_cnn.ckpt load model successful train accuracy:1.0000 test accuracy:1.0000

## 5.模型预测

import pandas as pd

X = test_df.values
print('特征矩阵的形状：', X.shape)
X1 = X[:5000]
X2 = X[5000:10000]
X3 = X[10000:15000]
X4 = X[15000:20000]
X5 = X[20000:25000]
X6 = X[25000:]
y1 = session.run(predict_y, feed_dict={X_holder:X1})
y2 = session.run(predict_y, feed_dict={X_holder:X2})
y3 = session.run(predict_y, feed_dict={X_holder:X3})
y4 = session.run(predict_y, feed_dict={X_holder:X4})
y5 = session.run(predict_y, feed_dict={X_holder:X5})
y6 = session.run(predict_y, feed_dict={X_holder:X6})

import numpy as np
y = np.vstack([y1, y2, y3, y4, y5, y6])
y_argmax = np.argmax(y, 1)
y_argmax.shape
print('预测值的形状：', y_argmax.shape)
commit_df = pd.DataFrame({'ImageId': range(1, 1+len(y_argmax)),
'Label': y_argmax})
fileName = 'kaggle_commit3.csv'
commit_df.to_csv(fileName, index=False)
print('预测结果已经保存到文件', fileName)

image.png

image.png

## 7.总结

1.自己电脑配置不足，使用云服务器极大的加快了工程部署和模型训练速度； 2.在kaggle经典入门赛取得前2%的成绩，把简单的事做到极致； 3.本文作者提供可以加载的模型只能取得0.99571的成绩。

0 条评论

• ### 基于tensorflow的一元一次方程回归预测

安装tensorflow命令：pip install tensorflow 下面一段代码能够成功运行，则说明安装tensorflow环境成功。

• ### 基于tensorflow+CNN的MNIST数据集手写数字分类预测

对于一个被研究的物体，它有多个属性(x1, x2, ... xn)和一个值y。线性回归假设y与(x1, x2, ... xn)有线性关系，也就是我们可以把y表示...

• ### tensorflow入门：Logistic Regression

g(z)=11+e−zg(z) = \frac{1}{1+e^{-z}}g(z)=1+e−z1​

• ### TensorFlow从0到1 - 16 - L2正则化对抗“过拟合”

前面的14 交叉熵损失函数——防止学习缓慢和15 重新思考神经网络初始化从学习缓慢问题入手，尝试改进神经网络的学习。本篇讨论过拟合问题，并引入与之相对的L2正...

• ### TensorFlow从0到1丨第十六篇 L2正则化对抗“过拟合”

前面的第十四篇 交叉熵损失函数——防止学习缓慢和第十五篇 重新思考神经网络初始化从学习缓慢问题入手，尝试改进神经网络的学习。本篇讨论过拟合问题，并引入与之相对的...

• ### Tensorflow解决MNIST手写数字分类问题

官网：https://www.tensorflow.org/tutorials/layers

• ### tensorflow编程: Constants, Sequences, and Random Values

注意： start 和 stop 参数都必须是 浮点型；     取值范围也包括了 stop； tf.lin_space 等同于 tf.lins...