MADDPG论文代码讲解:
来源:哔哩哔哩MADDPG
①linux下配置环境
git clone https://github.com/openai/multiagent-particle-envs.git
#这一步也可以直接下载文件夹
cd multiagent-particle-envs/ #进入该文件夹
python -m venv maddpg #在maddpg下创建虚拟环境
source maddpg/bin/activate #激活名字为maddpg的虚拟环境
pip list
pip install gym==0.10.5 #OpenAI gym(0.10.5)
pip list
pip uninstall numpy #在原有的安装路径删除numpy的文件夹,选择‘y’表示确认删除
pip install numpy==1.14.5 #安装numpy(1.14.5)
pip list #查看安装了什么
ls #该文件夹下有什么
pip install -e . #安装 editable ,注意有一个点
pip install torch #安装torch
vim test.py #进入对py文件的编辑模式
#或者在相应的文件下自己把写好的py文件拖进去
##写完test.py后,输入
python test.py
vim maddpg_torch.py
②test.py文件内容
import numpy as np
from make_env import make_env #这个是环境的代码
env = make_env('simple_adversary') #该环境是物理欺骗
#print('number of agents',env.n) #智能体的数量(2个合作,1个对手)
#print('observation space',env.observation_space) #观察空间(包含3个智能体)
#print('action space',env.action_space) #动作空间(包含3个智能体)
#print('n actions',env.action_space[0].n) #动作的数量
observation = env.reset()
#print(observation)
no_op = np.array([0,0.1,0.12,0.33,0.54]) # 小数是连续动作,整数是离散动作,这是是连续动作
#在论文中用的是离散动作
action = [no_op,no_op,no_op]
obs_,reward,done,info = env.step(action)
print(reward)
print(done)
③ maddpg_torch.py
import os
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.function as F
import torch.optim as optim
from make_env import make_env
class MultiAgentReplayBuffer: #先构建经验池
def __init__(self,max_size,critic_dims,actor_dims,n_actions,n_agents,batch_size):
self.mem_size = max_size #经验池的大小
self.mem_cntr = 0 #counter 数经验
self.n_agents = n_agents #智能体数量
self.batch_size = batch_size #每一批量的大小
self.n_actions = n_actions #n个动作
#因为是集中训练,所以跟critic有关
self.state_memory = np.zeros((self.mem_size,critic_dims)) #状态s
self.new_state_memory = np.zeros((self.mem_size,critic_dims)) #新状态
self.reward_memory = np.zeros((self.mem_size,n_agents)) #奖励
self.terminal_memory = np.zeros((self.mem_size,n_agents), dtype=bool)#(终端即末状态的经验,Q值在终端始终为0,因为没有未来的奖励)
#指的是下面这个函数:def init_actor_memory(self)
self.init_actor_memory()
def init_actor_memory(self): #初始化actor记忆
self.actor_state_memory = [] #状态
self.actor_new_state_memory = [] #新状态
self.actor_action_memory = [] #动作
for i in range(n_agents): #每个智能体,从actor获取,(因为是分散执行得到的)
self.actor_state_memory.append(
np.zeros((self.mem_size,self.actor_dims[i])))#状态
self.actor_new_state_memory.append(
np.zeros((self.mem_size,self.actor_dims[i]))) #新状态
self.actor_action_memory.append(
np.zeros((self.mem_size,self.n_actions)))#动作
def store_transition(self,raw_obs,state,action,reward,
raw_obs_,state_,done): #raw_obs 原始obs,要递给critic,state:所有obs的flattened, raw_obs_新的原始obs,state_新的state
if self.mem_cntr % self.mem_size == 0 and self.men_cntr > 0:
self.init_actor_memory()
index = self.mem_cntr % self.mem_size
for agent_idx in range(self.n_agents):
self.actor_state_memory[agent_idx][index] = raw_obs[agent_idx]
self.actor_new_state_memory[agent_idx][index] = raw_obs_[agent_idx]
self.actor_action_memory[agent_idx][index] = action[agent_idx]
未完待续,详细的代码链接pytorch版本的MADDPG