环境:
pip install gym
pip install paddlepaddle==1.6.3
pip install parl==1.3.1
经验回放 固定目标
具体代码
replay_memory.py
import collections
import random
import numpy as np
# 经验回放类
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
# 增加一条经验
def append(self, exp):
self.buffer.append(exp)
# 经验池 随机抽取 batch_size条经验
def sample(self, batch_size):
# 随机抽取
mini_batch = random.sample(self.buffer, batch_size)
obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []
# 数据分成多组
for exp in mini_batch:
obs, a, r, n_obs, done = exp
obs_batch.append(obs)
action_batch.append(a)
reward_batch.append(r)
next_obs_batch.append(n_obs)
done_batch.append(done)
sample_data = (self.get_data_np(obs_batch),
self.get_data_np(action_batch),
self.get_data_np(reward_batch),
self.get_data_np(next_obs_batch),
self.get_data_np(done_batch))
return sample_data
# 数据转换成 numpy
@staticmethod
def get_data_np(data):
return np.array(data).astype('float32')
# 重写长度 函数
def __len__(self):
return len(self.buffer)
神经网络
model.py
import parl
from parl import layers
# 全链接网络模型
class Model(parl.Model):
def __init__(self, act_dim):
# 隐藏层 大小
hid1_size = 128
hid2_size = 128
# 3层全链接层
self.fc1 = layers.fc(size=hid1_size, act='relu')
self.fc2 = layers.fc(size=hid2_size, act='relu')
self.fc3 = layers.fc(size=act_dim, act=None) # 输出层
# 前向传播
def value(self, obs):
# obs: 环境数据
h1 = self.fc1(obs) # 隐藏层1
h2 = self.fc2(h1) # 隐藏层2
q_out = self.fc3(h2) # 输出层
return q_out
算法
algorithm.py
import copy
import paddle.fluid as fluid
import parl
from parl import layers
class DQN(parl.Algorithm):
def __init__(self, model, act_dim=None, gamma=None, lr=None):
self.model = model
self.target_model = copy.deepcopy(model)
assert isinstance(act_dim, int)
assert isinstance(gamma, float)
assert isinstance(lr, float)
self.act_dim = act_dim
self.gamma = gamma
self.lr = lr
# 同步网络权重 同步到 目标网络
def sync_target(self):
self.model.sync_weights_to(self.target_model)
# 预测执行
def predict(self, obs):
return self.model.value(obs)
# 学习并更新网络
def learn(self, obs, action, reward, next_obs, terminal):
# 目标网络 计算target_Q
next_predict_value = self.target_model.value(next_obs)
best_v = layers.reduce_max(next_predict_value, dim=1)
best_v.stop_gradient = True # 阻止target_model 更新
terminal = layers.cast(terminal, dtype='float32')
target = reward + (1.0 - terminal) * self.gamma * best_v
# 当前网络 计算Q(s, a)
predict_value = self.model.value(obs)
action_one_hot = layers.one_hot(action, self.act_dim)
action_one_hot = layers.cast(action_one_hot, dtype='float32')
predict_action_value = layers.reduce_sum(layers.elementwise_mul(action_one_hot, predict_value),
dim=1)
# 获取loss 计算Q(s, a)与target_q的均方差
cost = layers.square_error_cost(predict_action_value, target)
cost = layers.reduce_mean(cost)
# 优化器
optimizer = fluid.optimizer.Adam(learning_rate=self.lr)
optimizer.minimize(cost)
return cost
智能体
agent.py
import numpy as np
import paddle.fluid as fluid
import parl
from parl import layers
# 智能体
class Agent(parl.Agent):
def __init__(self, algorithm, obs_dim, act_dim,
e_greed=0.1, e_greed_decrement=0):
assert isinstance(obs_dim, int)
assert isinstance(act_dim, int)
self.obs_dim = obs_dim # 环境维度
self.act_dim = act_dim # 动作维度
self.alg = algorithm # 算法
self.global_step = 0
self.update_target_steps = 200 # 同步网络的步数
self.e_greed = e_greed # 探索的概率
self.e_greed_decrement = e_greed_decrement # 探索概率下降
super(Agent, self).__init__(algorithm)
def build_program(self):
self.pred_program = fluid.Program()
self.learn_program = fluid.Program()
# 执行 算法的预测(程序 静态处理 监视)
with fluid.program_guard(self.pred_program):
obs = layers.data(name='obs',
shape=[self.obs_dim],
dtype='float32')
self.value = self.alg.predict(obs)
# 执行 算法的学习
with fluid.program_guard(self.learn_program):
# 当前环境
obs = layers.data(name='obs',
shape=[self.obs_dim],
dtype='float32')
# 动作
action = layers.data(name='act',
shape=[1],
dtype='int32')
# 回馈
reward = layers.data(name='reward', shape=[], dtype='float32')
# 下一个环境
next_obs = layers.data(name='next_obs',
shape=[self.obs_dim],
dtype='float32')
# 是否结束
terminal = layers.data(name='terminal', shape=[], dtype='bool')
self.cost = self.alg.learn(obs, action, reward, next_obs, terminal)
# 学习
def learn(self, obs, act, reward, next_obs, terminal):
# 执行同步
if self.global_step % self.update_target_steps == 0:
self.alg.sync_target()
# print('同步一下')
# 准备数据
self.global_step += 1
act = np.expand_dims(act, -1)
feed = {'obs': obs.astype('float32'),
'act': act.astype('int32'),
'reward': reward,
'next_obs': next_obs.astype('float32'),
'terminal': terminal}
# 执行学习
cost = self.fluid_executor.run(
self.learn_program, feed=feed, fetch_list=[self.cost])[0] # 静态程序
return cost
# 预测
def predict(self, obs):
obs = np.expand_dims(obs, axis=0)
# 执行预测
pred_q = self.fluid_executor.run(
self.pred_program, # 静态程序
feed={'obs': obs.astype('float32')}, # 输入数据
fetch_list=[self.value])[0] # 输出反馈
# 获取最优动作
pred_q = np.squeeze(pred_q, axis=0)
act = np.argmax(pred_q)
return act
# 随机+预测
def sample(self, obs):
sample = np.random.rand()
if sample < self.e_greed: # 探索随机
act = np.random.randint(self.act_dim) # 探索执行
else:
act = self.predict(obs) # 预测执行
self.e_greed = max(0.01, self.e_greed - self.e_greed_decrement) # 更新探索概率
return act
训练
train.py
import gym
import numpy as np
from parl.utils import logger
from agent import Agent
from algorithm import DQN
from model import Model
from replay_memory import ReplayMemory
# 执行 一轮
def run_episode(env, agent, rpm):
# 初始化环境
total_reward = 0
obs = env.reset()
step = 0
# 循环执行
while True:
step += 1
# 带探索的 动作
action = agent.sample(obs)
# print(agent.e_greed)
next_obs, reward, done, _ = env.step(action) # 执行一步
rpm.append((obs, action, reward, next_obs, done)) # 加入缓存
# 缓存 与 步数 执行学习
if (len(rpm) > MEMORY_WAP_SIZE) and (step % LEARN_FREQ == 0):
# 获取缓存数据
(batch_obs, batch_action, batch_reward,
batch_next_obs, batch_done) = rpm.sample(BATCH_SIZE)
# 学习一次(更新权重)
train_loss = agent.learn(batch_obs, batch_action,
batch_reward, batch_next_obs,
batch_done)
# 统计总回馈
total_reward += reward
# 更新环境
obs = next_obs
if done: # 结束
break
return total_reward
# 评估
def evaluate(env, agent, render=False):
# 回馈列表
eval_reward = []
for i in range(5):
# 初始化环境
obs = env.reset()
episode_reward = 0
# 执行一个episode
while True:
# 预测动作
action = agent.predict(obs)
# 执行一步
obs, reward, done, _ = env.step(action)
# 统计反馈
episode_reward += reward
if render:
env.render()
if done:
break
# 总反馈加入回馈列表
eval_reward.append(episode_reward)
# 返回5次均值
return np.mean(eval_reward)
def main():
# 初始化环境
env = gym.make('CartPole-v0')
action_dim = env.action_space.n
obs_shape = env.observation_space.shape
# 初始化 缓存 模型 算法 智能体
rpm = ReplayMemory(MEMORY_SIZE)
model = Model(act_dim=action_dim)
algorithm = DQN(model, act_dim=action_dim,
gamma=GAMMA, lr=LEARNING_RATE,
)
agent = Agent(algorithm, obs_dim=obs_shape[0],
act_dim=action_dim,
e_greed=0.1,
e_greed_decrement=1e-6)
# 增加 缓存数据
while len(rpm) < MEMORY_WAP_SIZE:
run_episode(env, agent, rpm)
# 总次数
max_episode = 2000
episode = 0
while episode < max_episode:
# 训练学习 50
for i in range(0, 50):
total_reward = run_episode(env, agent, rpm)
episode += 1
# print(total_reward)
# 评估模式
eval_reward = evaluate(env, agent, render=True)
# 打印结果
logger.info('episode:{} e_greed:{} test_reward:{}'.format(
episode, agent.e_greed, eval_reward))
# 训练结束,保存模型
save_path = './dqn_model.ckpt'
agent.save(save_path)
if __name__ == '__main__':
LEARN_FREQ = 5 # 学习频率
MEMORY_SIZE = 2000 # 缓存大小
MEMORY_WAP_SIZE = 200 # 启动学习 最小缓存
BATCH_SIZE = 32
LEARNING_RATE = 0.001 # 学习率
GAMMA = 0.99 # 反馈衰减因子
main()
预测
import gym
import numpy as np
from parl.utils import logger
from agent import Agent
from algorithm import DQN
from model import Model
# 评估
def evaluate(env, agent, render=False):
# 回馈列表
eval_reward = []
for i in range(5):
# 初始化环境
obs = env.reset()
episode_reward = 0
# 执行一个episode
while True:
# 预测动作
action = agent.predict(obs)
# 执行一步
obs, reward, done, _ = env.step(action)
# 统计反馈
episode_reward += reward
if render:
env.render()
if done:
break
# 总反馈加入回馈列表
eval_reward.append(episode_reward)
# 返回5次均值
return np.mean(eval_reward)
def main():
# 初始化环境
env = gym.make('CartPole-v0')
action_dim = env.action_space.n
obs_shape = env.observation_space.shape
# 初始化 模型 算法 智能体
model = Model(act_dim=action_dim)
algorithm = DQN(model, act_dim=action_dim,
gamma=GAMMA, lr=LEARNING_RATE,
)
agent = Agent(algorithm, obs_dim=obs_shape[0],
act_dim=action_dim,
e_greed=0.1,
e_greed_decrement=1e-6)
# 加载模型
save_path = './dqn_model.ckpt'
agent.restore(save_path)
# 总次数
max_episode = 3
episode = 0
while episode < max_episode:
# 评估模式
eval_reward = evaluate(env, agent, render=True)
# 打印结果
logger.info('episode:{} e_greed:{} test_reward:{}'.format(
episode, agent.e_greed, eval_reward))
if __name__ == '__main__':
LEARNING_RATE = 0.001 # 学习率
GAMMA = 0.99 # 反馈衰减因子
main()