D3QN以DQN为基础,加入double和dueling两种改进。收敛速度更快,收敛更加稳定。
从实际测试看,强化学习对初值比较敏感,所以如果一开始训练效果不好,可以重新开始训练,可能会有好效果。
demo:
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
import matplotlib.pyplot as plt
import copy
import os
import random
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# hyper-parameters
BATCH_SIZE = 128
LR = 0.001
GAMMA = 0.995
EPISILO = 0.9
MEMORY_CAPACITY = 8000
Q_NETWORK_ITERATION = 100
env = gym.make("MountainCar-v0")
env = env.unwrapped
NUM_ACTIONS = env.action_space.n
NUM_STATES = env.observation_space.shape[0]
ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample.shape
class Net(nn.Module):