MPE环境代码解读(MAA2C)

1.代码架构

1.1环境ENV和World对象

make_env.py:包含将多智能体环境作为OpenAI gym类对象导入的代码

方法:def make_env(scenario_name, benchmark=False)

在这里插入图片描述

core.py:包含在整个代码中使用的各种对象(Entities, Landmarks, Agents等)的类。
类:
1.class EntityState(object)#所有实体的物理状态
物理位置p_pos和物理速度p_vel
2.class AgentState(EntityState)#agent状态(包括沟通和内部/精神状态)
通信信息c
3.class Action(object)# agent的动作
物理动作u和通信动作c
4.class Entity(object)# 物理世界实体的属性和状态
移动、碰撞、颜色、速度
5.class Landmark(Entity)# 地标实体的属性
6.class Agent(Entity)# Agent实体的属性
7.class World(object)#多agent世界
rendering.py: 用于在屏幕上显示agent的行为
policy.py: 包含基于键盘输入的交互策略的代码
scenario.py:包含为所有场景扩展的基本场景对象

在这里插入图片描述

World(object)类的world对象相当于环境模拟器,该对象是对特定场景下真实环境的模拟,通过scenario.make_world()方法构造。world对象由一系列不同角色的entities对象组成,这些entites对象分为以下两大类:
Landmark类,属于该类的entites对象属性不会发生变化,相当于物理世界中的静物,例如地标landmark对象,草grass对象,食物food对象等等;
Agent类,属于该类的entites对象属性会不断发生变化,相当于物理世界中的动物,在这里也即关注的智能体agent对象
agent对象还分为两类:
policy_agent:动作由强化学习算法所控制的智能体;
scripted_agent:动作由自定义脚本所控制的智能体

在这里插入图片描述

environment.py:py:包含环境模拟的代码(交互物理,_step()函数等)
env对象由env.world对象与env.agents对象组成,这里的world对象就是上面所介绍的环境模拟器,而agents对象是world中由强化学习所控制的所有智能体world.policy_agents。

在这里插入图片描述

env对象最为关键的三个方法是:
def reset(self):重置环境模拟器world;
def step(self, action_n):更新环境模拟器world;
def render(self, mode='human'):渲染,显示当前环境模拟器状态
可以调用以下方法来获取单个智能体的观测、奖励、info和done:
def _get_obs(self, agent);
def _get_reward(self, agent);
def _get_done(self, agent);
def _get_info(self, agent)。

1.2scenario对象

MPE提供了一系列不同的粒子场景,这些场景全部被放置在scearios文件夹下:
simple、simple_adversary、simple_crypto、simple_push、simple_reference、simple_speaker_listener、simple_spread、simple_tag、simple_world_comm

在这里插入图片描述

  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是使用TensorFlow实现MADDPG算法并结合优先经验回放和MPE环境代码代码中包含注释以帮助理解。 ```python import numpy as np import tensorflow as tf import random from mlagents.envs import UnityEnvironment from mlagents.envs import BrainInfo # 超参数 BUFFER_SIZE = int(1e6) # 经验回放缓冲区大小 BATCH_SIZE = 128 # 批量大小 GAMMA = 0.99 # 折扣因子 TAU = 1e-3 # 目标网络更新速率 LR_ACTOR = 1e-3 # Actor网络学习率 LR_CRITIC = 1e-3 # Critic网络学习率 UPDATE_EVERY = 2 # 更新网络的时间步数 NUM_UPDATES = 10 # 每次更新网络的次数 # 神经网络模型 class Actor(tf.keras.Model): def __init__(self, state_size, action_size): super(Actor, self).__init__() self.fc1 = tf.keras.layers.Dense(256, activation='relu') self.fc2 = tf.keras.layers.Dense(128, activation='relu') self.fc3 = tf.keras.layers.Dense(action_size, activation='tanh') def call(self, state): x = self.fc1(state) x = self.fc2(x) x = self.fc3(x) return x class Critic(tf.keras.Model): def __init__(self, state_size, action_size): super(Critic, self).__init__() self.fc1 = tf.keras.layers.Dense(256, activation='relu') self.fc2 = tf.keras.layers.Dense(128, activation='relu') self.fc3 = tf.keras.layers.Dense(1, activation=None) self.fc4 = tf.keras.layers.Dense(256, activation='relu') self.fc5 = tf.keras.layers.Dense(128, activation='relu') self.fc6 = tf.keras.layers.Dense(1, activation=None) def call(self, state, action): xs = tf.concat([state, action], axis=1) x1 = self.fc1(xs) x1 = self.fc2(x1) x1 = self.fc3(x1) x2 = self.fc4(xs) x2 = self.fc5(x2) x2 = self.fc6(x2) return x1, x2 # 优先经验回放类 class PrioritizedReplay: def __init__(self, buffer_size, batch_size): self.buffer_size = buffer_size self.batch_size = batch_size self.buffer = [] self.priorities = np.zeros((buffer_size,), dtype=np.float32) self.pos = 0 self.alpha = 0.5 self.beta = 0.5 self.beta_increment_per_sampling = 0.001 def add(self, state, action, reward, next_state, done): max_priority = np.max(self.priorities) if self.buffer else 1.0 experience = (state, action, reward, next_state, done) if len(self.buffer) < self.buffer_size: self.buffer.append(experience) else: self.buffer[self.pos] = experience self.priorities[self.pos] = max_priority self.pos = (self.pos + 1) % self.buffer_size def sample(self): if len(self.buffer) == self.buffer_size: priorities = self.priorities else: priorities = self.priorities[:self.pos] probs = priorities ** self.alpha probs /= probs.sum() indices = np.random.choice(len(self.buffer), self.batch_size, p=probs) samples = [self.buffer[idx] for idx in indices] total = len(self.buffer) weights = (total * probs[indices]) ** (-self.beta) weights /= weights.max() self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) return indices, samples, weights def update_priorities(self, batch_indices, batch_priorities): for idx, priority in zip(batch_indices, batch_priorities): self.priorities[idx] = priority # MADDPG算法类 class MADDPG: def __init__(self, state_size, action_size, num_agents): self.state_size = state_size self.action_size = action_size self.num_agents = num_agents self.actors = [Actor(state_size, action_size) for _ in range(num_agents)] self.critics = [Critic((state_size+action_size)*num_agents, 1) for _ in range(num_agents)] self.target_actors = [Actor(state_size, action_size) for _ in range(num_agents)] self.target_critics = [Critic((state_size+action_size)*num_agents, 1) for _ in range(num_agents)] for i in range(num_agents): self.target_actors[i].set_weights(self.actors[i].get_weights()) self.target_critics[i].set_weights(self.critics[i].get_weights()) self.buffer = PrioritizedReplay(BUFFER_SIZE, BATCH_SIZE) self.actor_optimizer = [tf.keras.optimizers.Adam(LR_ACTOR) for _ in range(num_agents)] self.critic_optimizer = [tf.keras.optimizers.Adam(LR_CRITIC) for _ in range(num_agents)] self.t_step = 0 def act(self, obs): obs = np.array(obs) actions = [] for i in range(self.num_agents): action = self.actors[i](obs[i][np.newaxis,:], training=False) actions.append(action.numpy()) actions = np.concatenate(actions, axis=0) return actions def step(self, state, action, reward, next_state, done): self.buffer.add(state, action, reward, next_state, done) self.t_step = (self.t_step + 1) % UPDATE_EVERY if self.t_step == 0 and len(self.buffer.buffer) > BATCH_SIZE: for _ in range(NUM_UPDATES): indices, samples, weights = self.buffer.sample() self.learn(samples, weights) self.update_targets() self.buffer.update_priorities(indices, weights) def learn(self, samples, weights): states = np.array([sample[0] for sample in samples]) actions = np.array([sample[1] for sample in samples]) rewards = np.array([sample[2] for sample in samples]) next_states = np.array([sample[3] for sample in samples]) dones = np.array([sample[4] for sample in samples]) for i in range(self.num_agents): # 计算Q值 with tf.GradientTape(persistent=True) as tape: target_actions = [self.target_actors[j](next_states[j][np.newaxis,:], training=False) for j in range(self.num_agents)] target_actions = np.concatenate(target_actions, axis=0) target_qs = self.target_critics[i]((next_states.reshape(-1, self.state_size*self.num_agents), target_actions)) target_qs = target_qs.numpy().reshape(-1, self.num_agents) q_targets = rewards[:,i][:,np.newaxis] + (GAMMA * target_qs * (1 - dones[:,i][:,np.newaxis])) critic_qs = self.critics[i]((states.reshape(-1, self.state_size*self.num_agents), actions.reshape(-1, self.action_size*self.num_agents))) critic_loss = tf.reduce_mean(weights * (q_targets - critic_qs)**2) critic_grads = tape.gradient(critic_loss, self.critics[i].trainable_variables) self.critic_optimizer[i].apply_gradients(zip(critic_grads, self.critics[i].trainable_variables)) # 计算Actor梯度 with tf.GradientTape() as tape: actor_actions = [self.actors[j](states[:,j,:], training=False) if j == i else self.actors[j](states[:,j,:], training=True) for j in range(self.num_agents)] actor_actions = np.concatenate(actor_actions, axis=0) actor_loss = -tf.reduce_mean(self.critics[i]((states.reshape(-1, self.state_size*self.num_agents), actor_actions))) actor_grads = tape.gradient(actor_loss, self.actors[i].trainable_variables) self.actor_optimizer[i].apply_gradients(zip(actor_grads, self.actors[i].trainable_variables)) def update_targets(self): for i in range(self.num_agents): self.target_actors[i].set_weights(TAU*np.array(self.actors[i].get_weights())+(1-TAU)*np.array(self.target_actors[i].get_weights())) self.target_critics[i].set_weights(TAU*np.array(self.critics[i].get_weights())+(1-TAU)*np.array(self.target_critics[i].get_weights())) # 环境 env_name = "MPE/3DBall" env = UnityEnvironment(file_name=env_name) brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset()[brain_name] state_size = env_info.vector_observations.shape[1] action_size = brain.vector_action_space_size num_agents = len(env_info.agents) maddpg = MADDPG(state_size, action_size, num_agents) scores = [] scores_window = deque(maxlen=100) for i_episode in range(10000): env_info = env.reset()[brain_name] obs = env_info.vector_observations score = np.zeros(num_agents) while True: actions = maddpg.act(obs) env_info = env.step(actions)[brain_name] next_obs = env_info.vector_observations rewards = env_info.rewards dones = env_info.local_done maddpg.step(obs, actions, rewards, next_obs, dones) obs = next_obs score += rewards if np.any(dones): break scores_window.append(np.max(score)) scores.append(np.max(score)) print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=0.5: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) break env.close() ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值