# 《neural network and deep learning》题解——ch03 如何选择神经网络的超参数

http://blog.csdn.net/u011239443/article/details/77748116

# 问题一

self.weights = [w - (eta / len(mini_batch)) * nw for w, nw in
zip(self.weights, nabla_w)]

total_cost中去掉：

cost += 0.5 * (lmbda / len(data)) * sum(np.linalg.norm(w) ** 2 for w in self.weights)

net.SGD(training_data[:1000],30,10,0.5,evaluation_data=validation_data[:100],monitor_evaluation_accuracy=True)

Epoch 30 training complete
Acc on evaluation: 17 / 100

Epoch 30 training complete
Acc on evaluation: 11 / 100

λ = 10.0 时，结果：

Epoch 29 training complete
Acc on evaluation: 11 / 100

λ = 1.0 时，结果：

Epoch 30 training complete
Acc on evaluation: 31 / 100

# 问题二

    def SGD(self, training_data, epochs, mini_batch_size, eta,
lmbda=0.0,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuray=False,max_try = 100):

cnt 记录不提升的次数，如达到max_try，就退出循环。这里用monitor_evaluation_accuracy举例：

        cnt = 0
for j in xrange(epochs):
......
if monitor_evaluation_accuracy:
acc = self.accuracy(evaluation_data)
evaluation_accurary.append(acc)
if len(evaluation_accurary) > 1 and acc < evaluation_accurary[len(evaluation_accurary)-2]:
cnt += 1
if cnt >= max_try:
break
else:
cnt = 0
print "Acc on evaluation: {} / {}".format(acc, n_data)
......

# 问题三

## 策略与实现

    def SGD(self, training_data, epochs, mini_batch_size, eta,
lmbda=0.0,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuray=False,min_x = 0.01):

            if monitor_evaluation_accuracy:
acc = self.accuracy(evaluation_data)
evaluation_accurary.append(acc)
if len(evaluation_accurary) > 1 and \
(acc - evaluation_accurary[len(evaluation_accurary)-2])*1.0/(1.0*n_data) < min_x:
break
print "Acc on evaluation: {} / {}".format(acc, n_data)

## 对比

10 回合不提升终止策略：

net.SGD(training_data[:1000],50,10,0.25,5.0,evaluation_data=validation_data[:100],
monitor_evaluation_accuracy=True,max_try=3)

Epoch 32 training complete
Acc on evaluation: 15 / 100

Epoch 3 training complete
Acc on evaluation: 17 / 100

# 问题四

        cnt = 0
del_cnt = 0
for j in xrange(epochs):
......
if monitor_evaluation_accuracy:
acc = self.accuracy(evaluation_data)
evaluation_accurary.append(acc)
if len(evaluation_accurary) > 1 and acc < evaluation_accurary[len(evaluation_accurary)-2]:
cnt += 1
if cnt >= max_try:
del_cnt += 1
if del_cnt >= 7:
break
eta /= 2.0
cnt = 0
else:
cnt = 0
print "Acc on evaluation: {} / {}".format(acc, n_data)

# 问题五

• 使用梯度下降来确定 λ 的障碍在于，

• 使用梯度下降来确定 η 的障碍在于，η 的最优解不是一个常数，随着迭代次数的增加，η 的最优解会越来越小。

0 条评论