Deep Q-Network (DQN)的python实现举例

以下是一个简单的DQN算法的Python实现示例:

import gym
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam

# 定义DQN类
class DQN:
    def __init__(self, state_size, action_size):
        self.state_size = state_size
        self.action_size = action_size
        self.memory = []
        self.gamma = 0.95  # 折扣因子
        self.epsilon = 1.0  # ε-greedy策略中的ε
        self.epsilon_decay = 0.995  # ε的衰减率
        self.epsilon_min = 0.01  # ε的最小值
        self.learning_rate = 0.001
        self.model = self._build_model()

    # 构建神经网络模型
    def _build_model(self):
        model = Sequential()
        model.add(Dense(24, input_dim=self.state_size, activation='relu'))
        model.add(Dense(24, activation='relu'))
        model.add(Dense(self.action_size, activation='linear'))
        model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
        return model

    # 存储经验
    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    # 使用ε-greedy策略选择动作
    def act(self, state):
        if np.random.rand() <= self.epsilon:
            return np.random.randint(self.action_size)
        else:
            q_values = self.model.predict(state)
            return np.argmax(q_values[0])

    # 训练神经网络
    def train(self, batch_size):
        minibatch = np.random.choice(len(self.memory), batch_size, replace=False)
        for state, action, reward, next_state, done in self.memory[minibatch]:
            target = reward
            if not done:
                # 使用目标网络估计下一个状态的Q值
                next_q_values = self.model.predict(next_state)[0]
                target = reward + self.gamma * np.amax(next_q_values)
            # 更新当前状态的Q值
            q_values = self.model.predict(state)
            q_values[0][action] = target
            self.model.fit(state, q_values, epochs=1, verbose=0)
        # 逐渐降低ε的值
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

# 使用OpenAI Gym中的CartPole环境进行训练
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
dqn = DQN(state_size, action_size)

num_episodes = 1000
batch_size = 32

for episode in range(num_episodes):
    state = env.reset()
    state = np.reshape(state, [1, state_size])

    for step in range(500):  # 每个episode最多执行500步
        action = dqn.act(state)
        next_state, reward, done, _ = env.step(action)
        next_state = np.reshape(next_state, [1, state_size])
        dqn.remember(state, action, reward, next_state, done)
        state = next_state
        if done:
            break

    if len(dqn.memory) > batch_size:
        dqn.train(batch_size)

    if episode % 50 == 0:
        print("Episode: {}, Steps: {}, Epsilon: {:.2f}".format(episode, step, dqn.epsilon))

# 测试训练好的DQN
state = env.reset()
state = np.reshape(state, [1, state_size])

for step in range(500):
    env.render()
    action = dqn.act(state)
    next_state, reward, done, _ = env.step(action)
    next_state = np.reshape(next_state, [1, state_size])
    state = next_state
    if done:
        break

env.close()

在这个示例中,我们使用OpenAI Gym中的CartPole环境进行训练。首先,我们定义了一个DQN类,其中包括神经网络模型、存储经验、选择动作、训练神经网络等方法。然后,在主循环中,我们与环境交互,并将经验存储在经验回放缓冲区中。如果经验回放缓冲区中的经验足够多,就通过随机抽样一批经验来训练神经网络。最后,我们使用训练好的DQN测试在环境中执行动作。

请注意,这只是DQN算法的一个简单实现示例,以便理解其基本原理。在实际应用中,可能需要对算法进行更多的改进和优化,例如使用目标网络、双重DQN、优先级经验回放等技术,以提高算法的性能和稳定性。同时,需要根据具体问题进行调参和优化。

DQNDeep Q-Network)是一种基于深度学习的强化学习算法。它通过神经网络估计每个动作的Q值,从而实现智能体对环境的决策。下面是DQN算法的Python实现步骤: 1.导入必要的库和环境 ```python import gym import numpy as np import tensorflow as tf from collections import deque env = gym.make('CartPole-v0') state_size = env.observation_space.shape action_size = env.action_space.n ``` 2.定义DQN模型,包括网络结构和训练方法 ```python class DQNAgent: def __init__(self, state_size, action_size): self.state_size = state_size self.action_size = action_size self.memory = deque(maxlen=2000) self.gamma = 0.95 self.epsilon = 1.0 self.epsilon_decay = 0.995 self.epsilon_min = 0.01 self.learning_rate = 0.001 self.model = self._build_model() def _build_model(self): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Dense(24, input_dim=self.state_size, activation='relu')) model.add(tf.keras.layers.Dense(24, activation='relu')) model.add(tf.keras.layers.Dense(self.action_size, activation='linear')) model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(lr=self.learning_rate)) return model def remember(self, state, action, reward, next_state, done): self.memory.append((state, action, reward, next_state, done)) def act(self, state): if np.random.rand() <= self.epsilon: return np.random.choice(self.action_size) act_values = self.model.predict(state) return np.argmax(act_values) def replay(self, batch_size): minibatch = np.array(random.sample(self.memory, batch_size)) for state, action, reward, next_state, done in minibatch: target = reward if not done: target = (reward + self.gamma * np.amax(self.model.predict(next_state))) target_f = self.model.predict(state) target_f[action] = target self.model.fit(state, target_f, epochs=1, verbose=0) if self.epsilon > self.epsilon_min: self.epsilon *= self.epsilon_decay def load(self, name): self.model.load_weights(name) def save(self, name): self.model.save_weights(name) ``` 3.训练DQN模型并测试 ```python agent = DQNAgent(state_size, action_size) done = False batch_size = 32 EPISODES = 100 for e in range(EPISODES): state = env.reset() state = np.reshape(state, [1, state_size]) for time in range(500): action = agent.act(state) next_state, reward, done, _ = env.step(action) reward = reward if not done else -10 next_state = np.reshape(next_state, [1, state_size]) agent.remember(state, action, reward, next_state, done) state = next_state if done: print("episode: {}/{}, score: {}, e: {:.2}" .format(e, EPISODES, time, agent.epsilon)) break if len(agent.memory) > batch_size: agent.replay(batch_size) if e % 10 == 0: agent.save("./dqn.h5") # test the trained DQN model agent.load("./dqn.h5") state = env.reset() state = np.reshape(state, [1, state_size]) for time in range(500): env.render() action = agent.act(state) next_state, reward, done, _ = env.step(action) state = np.reshape(next_state, [1, state_size]) if done: break env.close() ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

智能科技前沿

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值