强化学习 Reinforcement Learning(三)——是时候用 PARL 框架玩会儿 DOOM 了!!!(下)

强化学习 Reinforcement Learning(三)—— 是时候用 PARL 框架玩会儿 DOOM 了!!!(下)

呼~~~终于到代码部分了

废话不多说,直接上代码

训练代码

导入库与支持

# -*- coding:utf-8 -*-
from vizdoom import *
import random
import copy
import numpy as np
import parl
from parl import layers
import paddle.fluid as fluid
from parl.utils import logger
import collections

一些超参数与路径参数的设置

# initialize our doomgame environment
game = DoomGame()

# 指定场景文件的存放路径
# game.set_doom_scenario_path(DOOM_PATH)
game.load_config("/usr/local/lib/python3.6/dist-packages/vizdoom/scenarios/basic.cfg")
# 指定地图文件的路径
# game.set_doom_map(MAP_PATH)

# 设置屏幕分辨率和屏幕的格式
game.set_screen_resolution(ScreenResolution.RES_256X160)
game.set_screen_format(ScreenFormat.GRAY8)

# 通过简单的设置True或者False来添加所需的粒子和效果
game.set_render_hud(False)
game.set_render_minimal_hud(False)
game.set_render_crosshair(True)
game.set_render_weapon(True)
game.set_render_decals(True)
game.set_render_particles(True)
game.set_render_effects_sprites(True)
game.set_render_messages(True)
game.set_render_corpses(True)
game.set_render_screen_flashes(True)

# 设置智能体可用的按钮
game.add_available_button(Button.MOVE_LEFT)
game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.ATTACK)

# 初始化行为数组
actions = np.zeros((game.get_available_buttons_size(), game.get_available_buttons_size()))
count = 0
for i in actions:
    i[count] = 1
    count += 1
actions = actions.astype(int).tolist()

# 添加游戏变量:弹药、生命力和杀死怪兽个数
game.add_available_game_variable(GameVariable.AMMO0)
game.add_available_game_variable(GameVariable.DAMAGECOUNT)
game.add_available_game_variable(GameVariable.HITCOUNT)

# 设置 episode_timeout ,在经过一些时间步之后终止情景。
# 另外,还设置 episode_start_time ,这对于省略初始事件非常有用
game.set_episode_timeout(6 * 200)
game.set_episode_start_time(10)
game.set_sound_enabled(False)

# 设存活奖励为0
game.set_living_reward(-10)

# doom有效具有不同模式,如玩家、观众、非同步玩家、非同步观众
# 在玩家模式下,智能体将真正玩游戏,因此,在此采用玩家模式
game.set_mode(Mode.PLAYER)

# initialize the game environment
game.init()

构建模型,主要是一个 CNN + 假装的 RNN(雾)

# Model
class Model(parl.Model):

    def __init__(self, num_actions):
        # define the hyperparameters of the CNN
        # filter size
        self.filter_size = 5
        # number of filters
        self.num_filters = [16, 32, 64]
        # stride size
        self.stride = 2
        # pool size
        self.poolsize = 2
        self.vocab_size = 4000
        self.emb_dim = 256
        # drop out probability
        self.dropout_probability = [0.3, 0.2]
        self.act_dim = num_actions

    def value(self, obs):
        # first convolutional layer
        self.conv1 = fluid.layers.conv2d(obs,
                                        num_filters = self.num_filters[0],
                                        filter_size = self.filter_size,
                                        stride = self.stride,
                                        act='relu')
        self.pool1 = fluid.layers.pool2d(self.conv1,
                                         pool_size = self.poolsize,
                                         pool_type = "max",
                                         pool_stride = self.stride)
        # second convolutional layer
        self.conv2 = fluid.layers.conv2d_transpose(self.pool1,
                                        num_filters = self.num_filters[1],
                                        filter_size = self.filter_size,
                                        stride = self.stride,
                                        act='relu')
        self.pool2 = fluid.layers.pool2d(self.conv2,
                                         pool_size = self.poolsize,
                                         pool_type = "max",
                                         pool_stride = self.stride)
        # third convolutional layer
        self.conv3 = fluid.layers.conv2d(self.pool2,
                                            num_filters = self.num_filters[1],
                                            filter_size = self.filter_size,
                                            stride = self.stride,
                                            act='relu')
        self.pool3 = fluid.layers.pool2d(self.conv3,
                                         pool_size = self.poolsize,
                                         pool_type = "max",
                                         pool_stride = self.stride)

        # add dropout and reshape the input
        self.fc1 = fluid.layers.fc(self.pool3, size = 512, act="relu")
        self.drop1 = fluid.layers.dropout(self.fc1, dropout_prob = self.dropout_probability[0])
        self.fc2 = fluid.layers.fc(self.drop1, size = 512, act="relu")
        # build RNN(fake)
        #self.input = fluid.layers.concat(input = [self.fc2, self.c], axis = 0)
        self.tanh = fluid.layers.tanh(self.fc2)
        self.o = fluid.layers.softmax(self.tanh)
        self.drop2 = fluid.layers.dropout(self.o, dropout_prob = self.dropout_probability[1])
        self.prediction = fluid.layers.fc(self.drop2, size = self.act_dim)

        return self.prediction

DQN 算法部分

# Algorithm
class DQN(parl.Algorithm):

    def __init__(self, model, act_dim=None, gamma=None,  lr=None):
        """
        Args:
            model (parl.Model): 定义Q函数的前向网络结构
            act_dim (int): action空间的维度,即有几个action
            gamma (float): reward的衰减因子
            lr (float): learning rate 学习率.
        """
        self.model = model
        self.target_model = copy.deepcopy(model)

        assert isinstance(act_dim, int)
        assert isinstance(gamma, float)
        assert isinstance(lr, float)
        self.act_dim = act_dim
        self.gamma = gamma
        self.lr = lr

    def predict(self, obs):
        """
        使用self.model的value网络来获取 [Q(s,a1),Q(s,a2),...]
        """
        return self.model.value(obs)

    def learn(self, obs, action, reward, next_obs, terminal):
        """
        使用DQN算法更新self.model的value网络
        """
        # 从target_model中获取 max Q' 的值,用于计算target_Q
        next_pred_value = self.target_model.value(next_obs)
        best_v = layers.reduce_max(next_pred_value, dim=1)
        best_v.stop_gradient = True  # 阻止梯度传递
        terminal = layers.cast(terminal, dtype='float32')
        target = reward + (1.0 - terminal) * self.gamma * best_v

        pred_value = self.model.value(obs)  # 获取Q预测值
        # 将action转onehot向量,比如:3 => [0,0,0,1,0]
        action_onehot = layers.one_hot(action, self.act_dim)
        action_onehot = layers.cast(action_onehot, dtype='float32')
        # 下面一行是逐元素相乘,拿到action对应的 Q(s,a)
        # 比如:pred_value = [[2.3, 5.7, 1.2, 3.9, 1.4]], action_onehot = [[0,0,0,1,0]]
        #  ==> pred_action_value = [[3.9]]
        pred_action_value = layers.reduce_sum(
            layers.elementwise_mul(action_onehot, pred_value), dim=1)

        # 计算 Q(s,a) 与 target_Q的均方差,得到loss
        cost = layers.square_error_cost(pred_action_value, target)
        cost = layers.reduce_mean(cost)
        optimizer = fluid.optimizer.Adam(learning_rate=self.lr)  # 使用Adam优化器
        optimizer.minimize(cost)
        return cost

    def sync_target(self):
        """
        把 self.model 的模型参数值同步到 self.target_model
        """
        self.model.sync_weights_to(self.target_model)

定义智能体与经验回放部分

# Agent
class Agent(parl.Agent):
    def __init__(self,
                 algorithm,
                 obs_dim,
                 act_dim,
                 e_greed=0.1,
                 e_greed_decrement=0):
        assert isinstance(obs_dim, list)
        assert isinstance(act_dim, int)
        self.obs_dim = obs_dim
        self.act_dim = act_dim
        super(Agent, self).__init__(algorithm)

        self.global_step = 0
        self.update_target_steps = 200  # 每隔200个training steps再把model的参数复制到target_model中

        self.e_greed = e_greed  # 有一定概率随机选取动作,探索
        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛,探索的程度慢慢降低

    def build_program(self):
        self.pred_program = fluid.Program()
        self.learn_program = fluid.Program()

        with fluid.program_guard(self.pred_program):  # 搭建计算图用于 预测动作,定义输入输出变量
            obs = layers.data(name='obs', shape=self.obs_dim, dtype='float32')
            self.value = self.alg.predict(obs)

        with fluid.program_guard(self.learn_program):  # 搭建计算图用于 更新Q网络,定义输入输出变量
            obs = layers.data(name='obs', shape=self.obs_dim, dtype='float32')
            action = layers.data(name='act', shape=[1], dtype='int32')
            reward = layers.data(name='reward', shape=[], dtype='float32')
            next_obs = layers.data(name='next_obs', shape=self.obs_dim, dtype='float32')
            terminal = layers.data(name='terminal', shape=[], dtype='bool')
            self.cost = self.alg.learn(obs, action, reward, next_obs, terminal)

    def sample(self, obs):
        sample = np.random.rand()  # 产生0~1之间的小数
        if sample < self.e_greed:
            act = np.random.randint(self.act_dim)  # 探索:每个动作都有概率被选择
        else:
            act = self.predict(obs)  # 选择最优动作
        self.e_greed = max(
            0.01, self.e_greed - self.e_greed_decrement)  # 随着训练逐步收敛,探索的程度慢慢降低
        return act

    def predict(self, obs):  # 选择最优动作
        obs = np.array(obs)
        if(obs.shape == (3, 160, 256)):#CHW
            pass
        elif(obs.shape == (256, 3, 160)):
            obs = obs.transpose(1, 2, 0) #NCHW
        elif(obs.shape == (160, 256, 3)):
            obs = obs.transpose(2, 0, 1) #NCHW

        obs = np.expand_dims(obs, axis=0)
        pred_Q = self.fluid_executor.run(
            self.pred_program,
            feed={'obs': obs.astype('float32')},
            fetch_list=[self.value])[0]
        pred_Q = np.squeeze(pred_Q, axis=0)
        act = np.argmax(pred_Q)  # 选择Q最大的下标,即对应的动作
        return act

    def learn(self, obs, act, reward, next_obs, terminal):
        # 每隔200个training steps同步一次model和target_model的参数
        if self.global_step % self.update_target_steps == 0:
            self.alg.sync_target()
        self.global_step += 1
        obs = np.array(obs)

        if(obs.shape == (3, 160, 256)):#CHW
            pass
        elif(obs.shape == (256, 3, 160)):
            obs = obs.transpose(1, 2, 0) #NCHW
        elif(obs.shape == (160, 256, 3)):
            obs = obs.transpose(2, 0, 1) #NCHW
        obs = np.expand_dims(obs, axis=0)
        next_obs = np.array(next_obs)
        if(next_obs.shape == (3, 160, 256)):#CHW
            pass
        elif(next_obs.shape == (256, 3, 160)):
            next_obs = next_obs.transpose(1, 2, 0) #NCHW
        elif(next_obs.shape == (160, 256, 3)):
            next_obs = next_obs.transpose(2, 0, 1) #NCHW
        next_obs = np.expand_dims(next_obs, axis=0)
        act = np.expand_dims(act, -1)

        feed = {
            'obs': obs.astype('float32'),
            'act': act.astype('int32'),
            'reward': reward,
            'next_obs': next_obs.astype('float32'),
            'terminal': terminal
        }
        cost = self.fluid_executor.run(
            self.learn_program, feed=feed, fetch_list=[self.cost])[0]  # 训练一次网络
        return cost
        
class ReplayMemory(object):
    def __init__(self, max_size):
        self.buffer = collections.deque(maxlen=max_size)

    # 增加一条经验到经验池中
    def append(self, exp):
        self.buffer.append(exp)

    # 从经验池中选取N条经验出来
    def sample(self, batch_size):
        mini_batch = random.sample(self.buffer, batch_size)
        obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []

        for experience in mini_batch:
            s, a, r, s_p, done = experience
            obs_batch.append(s)
            action_batch.append(a)
            reward_batch.append(r)
            next_obs_batch.append(s_p)
            done_batch.append(done)

        return np.array(obs_batch[0]).astype('float32'), \
            np.array(action_batch).astype('float32'), np.array(reward_batch).astype('float32'),\
            np.array(next_obs_batch[0]).astype('float32'), np.array(done_batch).astype('float32')

    def __len__(self):
        return len(self.buffer)

小技巧,通过将三帧图像合成再输入 CNN 让模型可以获取到运动信息


def frame(state):
    n = True
    while True:
        if n:
            frame0 = state.screen_buffer
            state = game.get_state()
            frame1 = state.screen_buffer
            state = game.get_state()
            frame2 = state.screen_buffer
        else:
            frame0 = frame1
            frame1 = frame2
            frame2 = state.screen_buffer
        obs = np.dstack([frame0, frame1, frame2])
        n = False
        yield obs

训练函数与评估函数

def run_episode(game, agent, rpm):
    game.new_episode()
    logger.info('start new episode')
    state = game.get_state()
    frame_gen = frame(state)
    obs = next(frame_gen)
    step = 0
    while not game.is_episode_finished():
        state = game.get_state()
        step += 1
        action = agent.sample(obs)  # 采样动作,所有动作都有概率被尝试到
        reward = game.make_action(actions[action])
        next_obs = next(frame_gen)
        next_obs = np.array(next_obs)

        if(next_obs.shape == (3, 160, 256)):#CHW
            pass
        elif(next_obs.shape == (256, 3, 160)):
            next_obs = next_obs.transpose(1, 2, 0) #NCHW
        elif(next_obs.shape == (160, 256, 3)):
            next_obs = next_obs.transpose(2, 0, 1) #NCHW

        done = game.is_episode_finished()
        rpm.append((obs, action, reward, next_obs, done))

        # train model
        if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
            (batch_obs, batch_action, batch_reward, batch_next_obs,
             batch_done) = rpm.sample(BATCH_SIZE)
            train_loss = agent.learn(batch_obs, batch_action, batch_reward,
                                     batch_next_obs,
                                     batch_done)  # s,a,r,s',done
        obs = next_obs
    total_reward = game.get_total_reward()
    print('training reward:'+str(total_reward))
    return total_reward

# 评估 agent, 跑 5 个episode,总reward求平均
def evaluate(game, agent, render=False):
    eval_reward = []
    for i in range(5):
        game.new_episode()
        episode_reward = 0
        while not game.is_episode_finished():
            state = game.get_state()
            frame_gen = frame(state)
            obs = next(frame_gen)
            action = agent.predict(obs)  # 预测动作,只选最优动作
            reward = game.make_action(actions[action])
            done = not game.is_episode_finished()
            misc = state.game_variables
            if render:
                game.set_window_visible(render)
        episode_reward = game.get_total_reward()
        print('evaluate reward:'+str(episode_reward))
        eval_reward.append(episode_reward)
    return np.mean(eval_reward)

训练主体部分

action_dim = game.get_available_buttons_size()
obs_shape = [-1, 3, 160, 256]
rpm = ReplayMemory(MEMORY_SIZE)

# 根据parl框架构建agent
model = Model(num_actions = action_dim)
algorithm = DQN(model, act_dim = action_dim, gamma=GAMMA, lr=LEARNING_RATE)
agent = Agent(
    algorithm,
    obs_dim=obs_shape,
    act_dim=action_dim,
    e_greed=0.15,  # 有一定概率随机选取动作,探索
    e_greed_decrement=1e-5)  # 随着训练逐步收敛,探索的程度慢慢降reward = RewardBuilder()

# 加载模型
# save_path = './DRQN_DOOM.ckpt'
# agent.restore(save_path)

# 先往经验池里存一些数据,避免最开始训练的时候样本丰富度不够
while len(rpm) < MEMORY_WARMUP_SIZE:
    run_episode(game, agent, rpm)

# 开始训练
episode = 0
while episode < max_episode:  # 训练max_episode个回合,test部分不计算入episode数量

    # train part
    for i in range(0, 5):
        total_reward = run_episode(game, agent, rpm)
        episode += 1
        logger.info('episode:{}    e_greed:{}  '.format(
            episode, agent.e_greed))

    # test part
    eval_reward = evaluate(game, agent, render=False)  # render=True 查看显示效果
    logger.info('episode:{}    e_greed:{}   test_reward:{}'.format(
        episode, agent.e_greed, eval_reward))

# 训练结束,保存模型
save_path = './DQN_DOOM.ckpt'
agent.save(save_path)
game.close()


效果展示

在这里插入图片描述
在这里插入图片描述

仍需改进的地方

  1. RNN 是个假的,主要是如果加上 LSTM 等结构,网络进行参数复制的时候会报错
  2. 奖励函数设计过于粗糙,很有可能会是一个及其稀疏的奖励,收敛性是个大问题
  3. 运行设施没有 GPU ,模型是用纯 CPU 上网本跑的,参数设置的非常保守

本人以后会发布一些关于机器学习模型算法,自动控制算法的其他文章,也会聊一聊自己做的一些小项目,希望读者朋友们能够喜欢。

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值