周老师课程推荐的程序解析
一、关键点
一、关于eta
二、关于离散化
离散为40个状态(二维)
三、关于_
表示某个变量是临时的或无关紧要的
四、关于列表解析
solution_policy_scores = [run_episode(env, solution_policy, False) for _ in range(100)]
二、代码块
import numpy as np
import gym
from gym import wrappers
off_policy = True # if True use off-policy q-learning update, if False, use on-policy SARSA update
n_states = 40 # Discrete value
iter_max = 5000
initial_lr = 1.0 # Learning rate
min_lr = 0.003
gamma = 1.0
t_max = 10000
eps = 0.1
测试策略函数
def run_episode(env, policy=None, render=False):
obs = env.reset()#reset env
total_reward = 0
step_idx = 0
for _ in range(t_max):#we know it can end the game in 10000 step
if render:
env.render()#fresh env
if policy is None:
action = env.action_space.sample()
else:
a,b = obs_to_state(env, obs)#it comes from the number34 code
action = policy[a][b]
obs, reward, done, _ = env.step(action)
total_reward += gamma ** step_idx * reward
step_idx += 1
if done:
break
return total_reward
离散化状态函数
def obs_to_state(env, obs):
""" Maps an observation to state """
# we quantify the continous state space into discrete space
env_low = env.observation_space.low
env_high = env.observation_space.high
env_dx = (env_high - env_low) / n_states#state discretization
a = int((obs[0] - env_low[0])/env_dx[0])#'/'
b = int((obs[1] - env_low[1])/env_dx[1])
return a, b
主函数
if __name__ == '__main__':
env_name = 'MountainCar-v0'#the name of id can search
env = gym.make(env_name)#make a env
env.seed(0)#let the resule can be same
np.random.seed(0)#let the resule can be same
if off_policy == True:#confirm the policy
print ('----- using Q Learning -----')
else:
print('------ using SARSA Learning ---')
q_table = np.zeros((n_states, n_states, 3))#3 action,and the dimensional of state is 3
for i in range(iter_max):#the ep is 5000
obs = env.reset()#reset the env
total_reward = 0#0 reward
## eta: learning rate is decreased at each step
eta = max(min_lr, initial_lr * (0.85 ** (i//100)))
for j in range(t_max):#the ep is 10000,after we need reset env
a, b = obs_to_state(env, obs)#State value after discretization
if np.random.uniform(0, 1) < eps:
action = np.random.choice(env.action_space.n)#such as 0,1,2
else:
action = np.argmax(q_table[a][b])
obs, reward, done, _ = env.step(action)
total_reward += reward
# update q table
a_, b_ = obs_to_state(env, obs)
if off_policy == True:
# use q-learning update (off-policy learning)
q_table[a][b][action] = q_table[a][b][action] + eta * (reward + gamma * np.max(q_table[a_][b_]) - q_table[a][b][action])
else:
# use SARSA update (on-policy learning)
# epsilon-greedy policy on Q again
if np.random.uniform(0,1) < eps:
action_ = np.random.choice(env.action_space.n)
else:
action_ = np.argmax(q_table[a_][b_])
q_table[a][b][action] = q_table[a][b][action] + eta * (reward + gamma * q_table[a_][b_][action_] - q_table[a][b][action])
if done:
break
if i % 200 == 0:
print('Iteration #%d -- Total reward = %d.' %(i+1, total_reward))
solution_policy = np.argmax(q_table, axis=2)
solution_policy_scores = [run_episode(env, solution_policy, False) for _ in range(100)]
print("Average score of solution = ", np.mean(solution_policy_scores))
# Animate it
for _ in range(2):
run_episode(env, solution_policy, True)
env.close()
主函数的顺序是:
首先建立一个环境然后选择Q-Learning,然后初始化Q表。
循环5000个ep,每个里有10000步。
选择动作,得到状态、奖励和结束标志
进而选择下一个状态,选择这个状态的最大值进行更新Q表。
每200个ep打印一次当前ep的总奖励。
5000ep过后更新策略,然后展示2次画面。