今天我们会将我们上一篇文章讲解的DQN的理论进行实战,实战的背景目前仍然是探险者上天堂游戏,不过在下一次开始我们会使用OpenAI gym的环境库,玩任何我们想玩的游戏。
看上去整个算法似乎很复杂,其实就是Q-Learning的框架加了三样东西
q_target
参数接下来我们就一步步把上篇文章学习到的理论实现把。
这里没有太多需要说明的,就是按照算法流程编写。
1from maze_env import Maze
2from DQN_modified import DeepQNetwork
3
4
5def run_maze():
6 step = 0#用来控制什么时候学习
7 for episode in range(25000):
8 # 初始化环境
9 observation = env.reset()
10
11 while True:
12 # 刷新环境
13 env.render()
14
15 # DQN根据观测值选择行为
16 action = RL.choose_action(observation)
17
18 # 环境根据行为给出下一个state,reward,是否终止
19 observation_, reward, done = env.step(action)
20
21 #DQN存储记忆
22 RL.store_transition(observation, action, reward, observation_)
23
24 #控制学习起始时间和频率(选择200步之后再每5步学习一次的原因是先累积一些记忆再开始学习)
25 if (step > 200) and (step % 5 == 0):
26 RL.learn()
27
28 # swap observation
29 observation = observation_
30 observation = observation_
31
32 # break while loop when end of this episode
33 if done:
34 break
35 step += 1
36
37 # end of game
38 print('game over')
39 env.destroy()
40
41
42if __name__ == "__main__":
43 # maze game
44 env = Maze()
45 RL = DeepQNetwork(env.n_actions, env.n_features,
46 learning_rate=0.01,
47 reward_decay=0.9,
48 e_greedy=0.9,
49 replace_target_iter=200,#每200步替换一次target_net的参数
50 memory_size=2000, #记忆上线
51 # output_graph=True #是否输出tensorboard文件
52 )
53 env.after(100, run_maze)
54 env.mainloop()
55 RL.plot_cost()
上一篇文章提到,我们引入两个CNN来降低当前Q值和目标Q值的相关性,提高了算法的稳定性,所以接下来我们就来搭建这两个神经网络。target_net
用来预测q_target
的值,他不会及时更新参数。而eval_net
用来预测q_eval
,这个神经网络拥有最新的神经网络参数。这两个神经网络结果是完全一样的,只是里面的参数不同。
两个神经网络是为了固定住一个神经网络(target_net
)的参数,也就是说target_net
是eval_net
的一个历史版本,拥有eval_net
很久之前的一组参数,而且这组参数被固定一段时间后再被eval_net
的新参数所替换。而eval_net
是不断在被提升的,所以是一个可以被训练的网络trainable=True
。而target_net
的trainable=False
。
1import numpy as np
2import tensorflow as tf
3
4np.random.seed(1)
5tf.set_random_seed(1)
6
7
8# Deep Q Network off-policy
9class DeepQNetwork:
10 def _build_net(self):
11 # ------------------ all inputs ------------------------
12 self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input State
13 self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input Next State
14 self.r = tf.placeholder(tf.float32, [None, ], name='r') # input Reward
15 self.a = tf.placeholder(tf.int32, [None, ], name='a') # input Action
16
17 w_initializer, b_initializer = tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)
18
19 # ------------------ build evaluate_net ------------------
20 with tf.variable_scope('eval_net'):
21 e1 = tf.layers.dense(self.s, 20, tf.nn.relu, kernel_initializer=w_initializer,
22 bias_initializer=b_initializer, name='e1')
23 self.q_eval = tf.layers.dense(e1, self.n_actions, kernel_initializer=w_initializer,
24 bias_initializer=b_initializer, name='q')
25
26 # ------------------ build target_net ------------------
27 with tf.variable_scope('target_net'):
28 t1 = tf.layers.dense(self.s_, 20, tf.nn.relu, kernel_initializer=w_initializer,
29 bias_initializer=b_initializer, name='t1')
30 self.q_next = tf.layers.dense(t1, self.n_actions, kernel_initializer=w_initializer,
31 bias_initializer=b_initializer, name='t2')
32
33 with tf.variable_scope('q_target'):
34 q_target = self.r + self.gamma * tf.reduce_max(self.q_next, axis=1, name='Qmax_s_') # shape=(None, )
35 self.q_target = tf.stop_gradient(q_target)#使用stop_gradient对q_target反传截断,方便计算loss
36 with tf.variable_scope('q_eval'):
37 a_indices = tf.stack([tf.range(tf.shape(self.a)[0], dtype=tf.int32), self.a], axis=1)
38 self.q_eval_wrt_a = tf.gather_nd(params=self.q_eval, indices=a_indices) # shape=(None, )
39 with tf.variable_scope('loss'):
40 self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval_wrt_a, name='TD_error'))
41 with tf.variable_scope('train'):
42 self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
43
44 def store_transition(self, s, a, r, s_):
45 if not hasattr(self, 'memory_counter'):
46 self.memory_counter = 0
47 #记录一条[s,a,r,s_]的记录
48 transition = np.hstack((s, [a, r], s_))
49 # 总memory大小是固定的,如果超出总大小,旧memory就被新的memory替换
50 index = self.memory_counter % self.memory_size
51 self.memory[index, :] = transition#替换过程
52 self.memory_counter += 1
53
54 def choose_action(self, observation):
55 # 统一observation的shape(1,size_of_obervation)
56 observation = observation[np.newaxis, :]
57
58 if np.random.uniform() < self.epsilon:
59 # 让eval_net神经网络生成所有action的值,并选择值最大的action
60 actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
61 action = np.argmax(actions_value)
62 else:
63 action = np.random.randint(0, self.n_actions)
64 return action
65
66 def learn(self):
67 # 检查是否替换了target_net的参数
68 if self.learn_step_counter % self.replace_target_iter == 0:
69 self.sess.run(self.target_replace_op)
70 print('\ntarget_params_replaced\n')
71
72 # 从memory中随机抽取batch_size这么多记忆
73 if self.memory_counter > self.memory_size:
74 sample_index = np.random.choice(self.memory_size, size=self.batch_size)
75 else:
76 sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
77 batch_memory = self.memory[sample_index, :]
78
79 #训练eval_net
80 _, cost = self.sess.run(
81 [self._train_op, self.loss],
82 feed_dict={
83 self.s: batch_memory[:, :self.n_features],
84 self.a: batch_memory[:, self.n_features],
85 self.r: batch_memory[:, self.n_features + 1],
86 self.s_: batch_memory[:, -self.n_features:],
87 })
88
89 self.cost_his.append(cost)
90
91 # 逐渐增强epsilon,降低行为的随机性
92 self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
93 self.learn_step_counter += 1
94
95 def plot_cost(self):
96 import matplotlib.pyplot as plt
97 plt.plot(np.arange(len(self.cost_his)), self.cost_his)
98 plt.ylabel('Cost')
99 plt.xlabel('training steps')
100 plt.show()
定义完两个神经网络之后,我们来定义其他部分,包括:
1class DeepQNetwork:
2 # 上次的内容
3 def _build_net(self):
4
5 # 这次的内容:
6 # 初始值
7 def __init__(self):
8
9 # 存储记忆
10 def store_transition(self, s, a, r, s_):
11
12 # 选行为
13 def choose_action(self, observation):
14
15 # 学习
16 def learn(self):
17
18 # 看看学习效果 (可选)
19 def plot_cost(self):
1class DeepQNetwork:
2 def __init__(
3 self,
4 n_actions,
5 n_features,
6 learning_rate=0.01,
7 reward_decay=0.9,
8 e_greedy=0.9,
9 replace_target_iter=300,
10 memory_size=500,
11 batch_size=32,
12 e_greedy_increment=None,
13 output_graph=False,
14 ):
15 self.n_actions = n_actions
16 self.n_features = n_features
17 self.lr = learning_rate
18 self.gamma = reward_decay
19 self.epsilon_max = e_greedy #epsilon的最大值
20 self.replace_target_iter = replace_target_iter #更换target_net的步数
21 self.memory_size = memory_size #记忆上限
22 self.batch_size = batch_size #每次更新从memory里面取多少记忆出来
23 self.epsilon_increment = e_greedy_increment #epsilon的增量
24 self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max #是否开启探险模式,并逐步减少探险次数
25
26 # 记录学习次数(用来判断是否更换target_net参数)
27 self.learn_step_counter = 0
28
29 # 初始化全0记忆 [s, a, r, s_]
30 self.memory = np.zeros((self.memory_size, n_features * 2 + 2))#n_features是指state的横纵坐标两个特征
31
32 # consist of [target_net, evaluate_net]
33 self._build_net()
34
35 t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net') #提取target_net参数
36 e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net') #提取eval_net的参数
37
38 with tf.variable_scope('hard_replacement'):
39 self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]#更新target_net参数
40
41 self.sess = tf.Session()
42
43 if output_graph:
44 # $ tensorboard --logdir=logs
45 tf.summary.FileWriter("logs/", self.sess.graph)
46
47 self.sess.run(tf.global_variables_initializer())
48 self.cost_his = []
DQN的精髓部分:经验池。记录下所有经历过的步,这些步可以进行反复的学习,所以是一种off-policy方法。
1 def store_transition(self, s, a, r, s_):
2 if not hasattr(self, 'memory_counter'):
3 self.memory_counter = 0
4 #记录一条[s,a,r,s_]的记录
5 transition = np.hstack((s, [a, r], s_))
6 # 总memory大小是固定的,如果超出总大小,旧memory就被新的memory替换
7 index = self.memory_counter % self.memory_size
8 self.memory[index, :] = transition#替换过程
9 self.memory_counter += 1
1 def choose_action(self, observation):
2 # 统一observation的shape(1,size_of_obervation)
3 observation = observation[np.newaxis, :]
4
5 if np.random.uniform() < self.epsilon:
6 # 让eval_net神经网络生成所有action的值,并选择值最大的action
7 actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
8 action = np.argmax(actions_value)
9 else:
10 action = np.random.
这里涉及了target_net
和eval_net
的交互使用。
1 def learn(self):
2 # 检查是否替换了target_net的参数
3 if self.learn_step_counter % self.replace_target_iter == 0:
4 self.sess.run(self.target_replace_op)
5 print('\ntarget_params_replaced\n')
6
7 # 从memory中随机抽取batch_size这么多记忆
8 if self.memory_counter > self.memory_size:
9 sample_index = np.random.choice(self.memory_size, size=self.batch_size)
10 else:
11 sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
12 batch_memory = self.memory[sample_index, :]
13
14 #训练eval_net
15 _, cost = self.sess.run(
16 [self._train_op, self.loss],
17 feed_dict={
18 self.s: batch_memory[:, :self.n_features],
19 self.a: batch_memory[:, self.n_features],
20 self.r: batch_memory[:, self.n_features + 1],
21 self.s_: batch_memory[:, -self.n_features:],
22 })
23
24 self.cost_his.append(cost)
25
26 # 逐渐增强epsilon,降低行为的随机性
27 self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
28 self.learn_step_counter += 1
我们可以看下输出的cost曲线:
有了解过深度学习的同学可能会比较的惊讶,cost曲线不应该是平稳下降的吗,为什么这里反而到后面cost又突然变高。这是因为DQN中的input数据是一步步改变的,而且会根据学习情况,获取到不同的数据,所以这并不像一般的监督学习,DQN的cost曲线就会有所不同了。
所以我们改成统计reward来评判算法,设置每隔replace_target_iter=200
就统计一次找到宝藏的次数。eplace_target_iter
针对的是learn_step_counter
,也就是说每学200次就统计一次,而每走5step才学一次,实际上是每走1000step统计一下在这1000step内找到几次宝藏。而每1000step后target_net
神经网络参数就更新一次,导致evaluate_net
收敛的目标发生变化,会导致性能上的波动。cost仅仅是evaluate_net
更新神经网络时的中间参数,而每走1000step找到宝藏的次数才是真正的性能指标。可以看到,走到20000step时性能基本稳定下来。