以下是一个简单的DQN算法的Python实现示例:
import gym
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
# 定义DQN类
class DQN:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = []
self.gamma = 0.95 # 折扣因子
self.epsilon = 1.0 # ε-greedy策略中的ε
self.epsilon_decay = 0.995 # ε的衰减率
self.epsilon_min = 0.01 # ε的最小值
self.learning_rate = 0.001
self.model = self._build_model()
# 构建神经网络模型
def _build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
# 存储经验
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
# 使用ε-greedy策略选择动作
def act(self, state):
if np.random.rand() <= self.epsilon:
return np.random.randint(self.action_size)
else:
q_values = self.model.predict(state)
return np.argmax(q_values[0])
# 训练神经网络
def train(self, batch_size):
minibatch = np.random.choice(len(self.memory), batch_size, replace=False)
for state, action, reward, next_state, done in self.memory[minibatch]:
target = reward
if not done:
# 使用目标网络估计下一个状态的Q值
next_q_values = self.model.predict(next_state)[0]
target = reward + self.gamma * np.amax(next_q_values)
# 更新当前状态的Q值
q_values = self.model.predict(state)
q_values[0][action] = target
self.model.fit(state, q_values, epochs=1, verbose=0)
# 逐渐降低ε的值
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
# 使用OpenAI Gym中的CartPole环境进行训练
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
dqn = DQN(state_size, action_size)
num_episodes = 1000
batch_size = 32
for episode in range(num_episodes):
state = env.reset()
state = np.reshape(state, [1, state_size])
for step in range(500): # 每个episode最多执行500步
action = dqn.act(state)
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
dqn.remember(state, action, reward, next_state, done)
state = next_state
if done:
break
if len(dqn.memory) > batch_size:
dqn.train(batch_size)
if episode % 50 == 0:
print("Episode: {}, Steps: {}, Epsilon: {:.2f}".format(episode, step, dqn.epsilon))
# 测试训练好的DQN
state = env.reset()
state = np.reshape(state, [1, state_size])
for step in range(500):
env.render()
action = dqn.act(state)
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
state = next_state
if done:
break
env.close()
在这个示例中,我们使用OpenAI Gym中的CartPole环境进行训练。首先,我们定义了一个DQN类,其中包括神经网络模型、存储经验、选择动作、训练神经网络等方法。然后,在主循环中,我们与环境交互,并将经验存储在经验回放缓冲区中。如果经验回放缓冲区中的经验足够多,就通过随机抽样一批经验来训练神经网络。最后,我们使用训练好的DQN测试在环境中执行动作。
请注意,这只是DQN算法的一个简单实现示例,以便理解其基本原理。在实际应用中,可能需要对算法进行更多的改进和优化,例如使用目标网络、双重DQN、优先级经验回放等技术,以提高算法的性能和稳定性。同时,需要根据具体问题进行调参和优化。