强化学习算法复现(六):DoubleDQN_gym倒立摆

在这里插入图片描述

建立RL_brain.py

import torch
import torch.nn as nn
import torch.nn.functional as F                 # 导入torch.nn.functional (激活函数)
import numpy as np


class Net(nn.Module):  # 建立网络
    def __init__(self, N_STATES, N_ACTIONS):
        nn.Module.__init__(self)
        self.input_num = N_STATES
        self.output_num = N_ACTIONS

        self.fc1 = nn.Linear(self.input_num, 50)             # 输入层——————>隐藏层: 状态数————>50个神经元
        self.fc1.weight.data.normal_(0, 0.1)           # 权重初始化 (均值为0,方差为0.1的正态分布)

        self.out = nn.Linear(50, self.output_num)            # 隐藏层——————>输出层: 50个神经元——————>各动作的价值
        self.out.weight.data.normal_(0, 0.1)           # 权重初始化 (均值为0,方差为0.1的正态分布)

    def forward(self, state):  # 前向传播
        x = self.fc1(state)                                # 经过第一层
        x = F.relu(x)                                  # 使用激励函数ReLU
        actions_value = self.out(x)                    # 隐藏层————————>输出层

        return actions_value                           # 输出动作的价值


class DQN(object):  # 定义DQN类 (定义两个网络)
    def __init__(self, N_STATES, N_ACTIONS, MEMORY_CAPACITY = 2000, EPSILON = 0.9, LR = 0.1, BATCH_SIZE = 32 , TARGET_REPLACE_ITER = 100, GAMMA = 0.8):
        # 超参数介绍
        self.n_states = N_STATES  # 状态个数
        self.n_actions = N_ACTIONS  # 动作个数
        self.memory_capacity = MEMORY_CAPACITY  # 记忆库容量
        self.epsilon = EPSILON   # e_贪心
        self.batch_size = BATCH_SIZE  #
        self.taget_replace_iter = TARGET_REPLACE_ITER
        self.gamma = GAMMA  # 折扣系数

        net = Net(N_STATES, N_ACTIONS)
        self.eval_net, self.target_net = net, net       # 利用Net创建评估网络和目标网络

        self.learn_step_counter = 0

        self.memory_counter = 0
        self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2), dtype=float)   # 存储训练数据

        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)    # 使用Adam优化器
        self.loss_func = nn.MSELoss()                                           # 使用均方误差为损失函数

    def choose_action(self, state):
        state = torch.FloatTensor(state)    # 转换成tensor类型
        state = torch.unsqueeze(state, 0)   # 增加一个维度

        if np.random.uniform() < self.epsilon:                   # greedy
            actions_value = self.eval_net.forward(state)         # 对评估网络前向传播,得到action_value(tensor)
            action = torch.max(actions_value, 1)[1].numpy()      # 输出每一行最大值的索引,并转化为数组形式
            action = action[0]                                   # 从数组中提取数字

        else:                                                    # explore 随机选择
            action = np.random.choice(self.n_actions)

        return action

    def store_transition(self, s, a, r, s_):    # 将一个transition储存进记忆库中
        transition = np.hstack((s, a, r, s_))   # 在水平方向上向右边拼接
        index = self.memory_counter % self.memory_capacity   # 取余数,使得记忆库内容中新的数据自动覆盖
        self.memory[index, :] = transition
        self.memory_counter += 1                                           # memory_counter自加1

    def learn(self):
        # 目标网络参数更新
        if self.learn_step_counter % self.taget_replace_iter == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())        # 将评估网络的参数赋给目标网络
        self.learn_step_counter += 1                                            # 学习步数自加1

        # 从记忆池中随机抽取一批数据batch
        sample_index = np.random.choice(self.memory_capacity, self.batch_size)
        batch_memory = self.memory[sample_index, :]

        # 将其代表的储存为tensor形式,float类型
        b_s = torch.FloatTensor(batch_memory[:, :self.n_states])
        b_a = torch.LongTensor(batch_memory[:, self.n_states:self.n_states+1].astype(int))
        b_r = torch.FloatTensor(batch_memory[:, self.n_states+1:self.n_states+2])
        b_s_ = torch.FloatTensor(batch_memory[:, -self.n_states:])

        q_eval = self.eval_net(b_s).gather(1, b_a)  # Q(s)估计   提取b_s,b_a对应的值

        q_next = self.target_net(b_s_).detach()
        q_target = b_r + self.gamma * q_next.max(1)[0].view(self.batch_size, 1)   # Q(s')现实 = b_r+gamma*max【】

        loss = self.loss_func(q_eval, q_target)

        # 反向传播,使用优化器更新参数
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
import time
import gym
from DQN_brain import *

start = time.time()
env = gym.make('CartPole-v1')

N_ACTIONS = env.action_space.n  # 杆子动作空间(2个动作)  (0左1右)
N_STATES = env.observation_space.shape[0]  # 杆子状态空间 (4维)
MEMORY_CAPACITY = 1000

dqn = DQN(N_STATES, N_ACTIONS, MEMORY_CAPACITY)

for i_episode in range(400):  # 400个episode循环
    s = env.reset()
    ep_r = 0
    for t in range(1000):
        env.render()
        a = dqn.choose_action(s)  # 根据网络选择动作
        s_, r, done, info = env.step(a)

        # 修改reward , 为了更好的收敛
        x, x_dot, theta, theta_dot = s_
        r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
        r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
        r = r1 + r2

        dqn.store_transition(s, a, r, s_)  # 存储进记忆池
        ep_r += r

        if dqn.memory_counter == MEMORY_CAPACITY:
            print("记忆库已经满,开始学习")

        if dqn.memory_counter > MEMORY_CAPACITY:
            dqn.learn()
            if done:
                print('Ep: ', i_episode, '| Ep_r: ', ep_r)  # round()方法返回ep_r的小数点四舍五入到2个数字

        if done:
            break  # 该episode结束
        s = s_  # 更新状态

env.close()
interval = time.time() - start
print("Time: %.4f" % interval)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值