# 连载 | 深度学习入门第六讲

1.6 实现我们的网络来分类数字

git clone https://github.com/mnielsen/neural-networks-and-deep-learning.git

class Network(object): def __init__(self, sizes):

self.num_layers = len(sizes) self.sizes = sizes

self.biases = [np.random.randn(y, 1) for y in sizes[1:]] self.weights = [np.random.randn(y, x)

for x, y in zip(sizes[:-1], sizes[1:])]

net = Network([2, 3, 1])

Network 对象中的偏置和权重都是被随机初始化的，使用 Numpy 的 np.random.randn 函数来 生成均值为 0，标准差为 1 的高斯分布。这样的随机初始化给了我们的随机梯度下降算法一个 起点。在后面的章节中我们将会发现更好的初始化权重和偏置的方法，但是目前随机地将其初 始化。注意 Network 初始化代码假设第一层神经元是一个输入层，并对这些神经元不设置任何偏 置，因为偏置仅在后面的层中用于计算输出。

a′ = σ(wa + b) （22）

• 以分量形式写出方程 (22)，并验证它和计算S 型神经元输出的规则 (4) 结果相同。 有了这些，很容易写出从一个 Network 实例计算输出的代码。我们从定义 S 型函数开始:

def sigmoid(z):
return 1.0/(1.0+np.exp(-z))

def feedforward(self, a):

"""Return the output of the network if "a" is input.""" for b, w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a)+b)

return a

def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None): """Train the neural network using mini-batch stochastic

gradient descent. The "training_data" is a list of tuples "(x, y)" representing the training inputs and the desired

outputs. The other non-optional parameters are self-explanatory. If "test_data" is provided then the network will be evaluated against the test data after each

epoch, and partial progress printed out. This is useful for tracking progress, but slows things down substantially."""

if test_data: n_test = len(test_data) n = len(training_data)

for j in xrange(epochs): random.shuffle(training_data) mini_batches = [

training_data[k:k+mini_batch_size]for k in xrange(0, n, mini_batch_size)]

for mini_batch in mini_batches: self.update_mini_batch(mini_batch, eta) if test_data:

print "Epoch {0}: {1} / {2}".format( j, self.evaluate(test_data), n_test)

else: print "Epoch {0} complete".format(j)

training_data 是一个 (x, y) 元组的列表，表示训练输入和其对应的期望输出。变量 epochs和 mini_batch_size 正如你预料的 —— 迭代期数量，和采样时的小批量数据的大小。eta 是学习 速率，η。如果给出了可选参数 test_data，那么程序会在每个训练器后评估网络，并打印出部分 进展。这对于追踪进度很有用，但相当拖慢执行速度。

def update_mini_batch(self, mini_batch, eta):"""Update the network's weights and biases by applying

gradient descent using backpropagation to a single mini batch. The "mini_batch" is a list of tuples "(x, y)", and "eta" is the learning rate."""

nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights]

for x, y in mini_batch: delta_nabla_b, delta_nabla_w = self.backprop(x, y) nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]

nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] self.weights = [w-(eta/len(mini_batch))*nw

for w, nw in zip(self.weights, nabla_w)] self.biases = [b-(eta/len(mini_batch))*nb

for b, nb in zip(self.biases, nabla_b)]

delta_nabla_b, delta_nabla_w = self.backprop(x, y)

""" network.py ~~~~~~~~~~

A module to implement the stochastic gradient descent learning

algorithm for a feedforward neural network. Gradients are calculated using backpropagation. Note that I have focused on making the code simple, easily readable, and easily modifiable. It is not optimized,

and omits many desirable features. """

#### Libraries

# Standard library import random

# Third-party libraries

import numpy as np

class Network(object):

def __init__(self, sizes):"""The list sizes contains the number of neurons in the

respective layers of the network. For example, if the list was [2, 3, 1] then it would be a three-layer network, with the

first layer containing 2 neurons, the second layer 3 neurons, and the third layer 1 neuron. The biases and weights for the network are initialized randomly, using a Gaussian

distribution with mean 0, and variance 1. Note that the first layer is assumed to be an input layer, and by convention we

won't set any biases for those neurons, since biases are only ever used in computing the outputs from later layers."""self.num_layers = len(sizes)

self.sizes = sizes self.biases = [np.random.randn(y, 1) for y in sizes[1:]]

self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]

def feedforward(self, a):"""Return the output of the network if a is input."""

for b, w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a)+b)

return adef SGD(self, training_data, epochs, mini_batch_size, eta,

test_data=None): """Train the neural network using mini-batch stochastic

gradient descent. The training_data is a list of tuples (x, y) representing the training inputs and the desired

outputs. The other non-optional parameters are self-explanatory. If test_data is provided then the network will be evaluated against the test data after each

epoch, and partial progress printed out. This is useful for tracking progress, but slows things down substantially."""

if test_data: n_test = len(test_data) n = len(training_data)for j in xrange(epochs):

random.shuffle(training_data) mini_batches = [

training_data[k:k+mini_batch_size]for k in xrange(0, n, mini_batch_size)]

for mini_batch in mini_batches: self.update_mini_batch(mini_batch, eta) if test_data:

print "Epoch {0}: {1} / {2}".format( j, self.evaluate(test_data), n_test)

else: print "Epoch {0} complete".format(j)

def update_mini_batch(self, mini_batch, eta):"""Update the network's weights and biases by applying

gradient descent using backpropagation to a single mini batch. The mini_batch is a list of tuples (x, y), and eta

is the learning rate.""" nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights]

for x, y in mini_batch: delta_nabla_b, delta_nabla_w = self.backprop(x, y)

nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]

nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] self.weights = [w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]

self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]

def backprop(self, x, y):

"""Return a tuple (nabla_b, nabla_w) representing the gradient for the cost function C_x. nabla_b and nabla_w are layer-by-layer lists of numpy arrays, similar

to self.biases and self.weights.""" nabla_b = [np.zeros(b.shape) for b in self.biases]

nabla_w = [np.zeros(w.shape) for w in self.weights]# feedforwardactivation = x

activations = [x] # list to store all the activations, layer by layerzs = [] # list to store all the z vectors, layer by layer

for b, w in zip(self.biases, self.weights): z = np.dot(w, activation)+b

zs.append(z) activation = sigmoid(z) activations.append(activation)

# backward pass delta = self.cost_derivative(activations[-1], y) * \

sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose())

# Note that the variable l in the loop below is used a little # differently to the notation in Chapter 2 of the book. Here,

# l = 1 means the last layer of neurons, l = 2 is the # second-last layer, and so on. It's a renumbering of the

# scheme in the book, used here to take advantage of the fact # that Python can use negative indices in lists.for l in xrange(2, self.num_layers):

z = zs[-l] sp = sigmoid_prime(z)

delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())

return (nabla_b, nabla_w)

def evaluate(self, test_data):"""Return the number of test inputs for which the neural

network outputs the correct result. Note that the neural network's output is assumed to be the index of whichever neuron in the final layer has the highest activation."""

test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data]

return sum(int(x == y) for (x, y) in test_results)def cost_derivative(self, output_activations, y):

"""Return the vector of partial derivatives \partial C_x / \partial a for the output activations."""

return (output_activations-y)

#### Miscellaneous functions def sigmoid(z):"""The sigmoid function."""

return 1.0/(1.0+np.exp(-z))

def sigmoid_prime(z):

"""Derivative of the sigmoid function.""" return sigmoid(z)*(1-sigmoid(z))

>>> import mnist_loader >>> training_data, validation_data, test_data = \

>>> import network
>>> net = network.Network([784, 30, 10])

>>> net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

Epoch 0: 9129 / 10000 Epoch 1: 9295 / 10000

Epoch 2: 9348 / 10000 ...

Epoch 27: 9528 / 10000 Epoch 28: 9542 / 10000

Epoch 29: 9534 / 10000

>>> net = network.Network([784, 100, 10])
>>> net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

>>> net = network.Network([784, 100, 10])
>>> net.SGD(training_data, 30, 10, 0.001, test_data=test_data)

Epoch 0: 1139 / 10000 Epoch 1: 1136 / 10000

Epoch 2: 1135 / 10000 ...

Epoch 27: 2101 / 10000 Epoch 28: 2123 / 10000

Epoch 29: 2142 / 10000

>>> net = network.Network([784, 30, 10])

>>> net.SGD(training_data, 30, 10, 100.0, test_data=test_data)

Epoch 0: 1009 / 10000

Epoch 1: 1009 / 10000 Epoch 2: 1009 / 10000

Epoch 3: 1009 / 10000 ... Epoch 27: 982 / 10000

Epoch 28: 982 / 10000 Epoch 29: 982 / 10000

• 试着创建一个仅有两层的网络 —— 一个输入层和一个输出层，分别有 784 和 10 个神经元， 没有隐藏层。用随机梯度下降算法训练网络。你能达到多少识别率?

"""

A library to load the MNIST image data. For details of the data

structures that are returned, see the doc strings for load_data and load_data_wrapper. In practice, load_data_wrapper is the function usually called by our neural network code.

"""

#### Libraries# Standard libraryimport cPickle

import gzip

# Third-party libraries import numpy as np

def load_data():"""Return the MNIST data as a tuple containing the training data,

the validation data, and the test data.

The training_data is returned as a tuple with two entries. The first entry contains the actual training images. This is a numpy ndarray with 50,000 entries. Each entry is, in turn, a

numpy ndarray with 784 values, representing the 28 * 28 = 784 pixels in a single MNIST image.

The second entry in the training_data tuple is a numpy ndarray

containing 50,000 entries. Those entries are just the digit values (0...9) for the corresponding images contained in the first entry of the tuple.

The validation_data and test_data are similar, except

each contains only 10,000 images. This is a nice data format, but for use in neural networks it's

helpful to modify the format of the training_data a little. That's done in the wrapper function load_data_wrapper(), see

below. """

f = gzip.open('../data/mnist.pkl.gz', 'rb') training_data, validation_data, test_data = cPickle.load(f) f.close()

return (training_data, validation_data, test_data)

def load_data_wrapper():"""Return a tuple containing (training_data, validation_data, test_data). Based on load_data, but the format is more

convenient for use in our implementation of neural networks.

In particular, training_data is a list containing 50,000 2-tuples (x, y). x is a 784-dimensional numpy.ndarray

containing the input image. y is a 10-dimensional numpy.ndarray representing the unit vector corresponding to the correct digit for x.

validation_data and test_data are lists containing 10,000

2-tuples (x, y). In each case, x is a 784-dimensional numpy.ndarry containing the input image, and y is the corresponding classification, i.e., the digit values (integers)

corresponding to x.

Obviously, this means we're using slightly different formats for the training data and the validation / test data. These formats

turn out to be the most convenient for use in our neural network code."""tr_d, va_d, te_d = load_data()

training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]] training_results = [vectorized_result(y) for y in tr_d[1]]

training_data = zip(training_inputs, training_results) validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]] validation_data = zip(validation_inputs, va_d[1])

test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]] test_data = zip(test_inputs, te_d[1])

return (training_data, validation_data, test_data)

def vectorized_result(j):"""Return a 10-dimensional unit vector with a 1.0 in the jth position and zeroes elsewhere. This is used to convert a digit

(0...9) into a corresponding desired output from the neural network."""

e = np.zeros((10, 1)) e[j] = 1.0return e

38 篇文章28 人订阅

0 条评论

## 相关文章

2006

2365

36216

3529

5308

### 【干货】卷积神经网络中的四种基本组件

【导读】当今，卷积神经网络在图像识别等领域取得巨大的成功，那么是什么使其高效而快速呢？本文整理John Olafenwa的一篇博文，主要介绍了卷积神经网络采用的...

6766

3376

3726

2669

6577