Actor模型的优缺点

参考:https://blog.csdn.net/FX677588/article/details/74359823

优点:

  至此,Actor模型就可以看出这种消息机制的线程调用最大好处是非阻塞的,多个线程可以同时并发进行,无需等待被调用方法执行完成返回消息的响应。当然,看到此处大家或许跟我一样有一点困惑的地方,即万一我们后面的程序需要立即使用它返回的响应消息怎么办呢?其实这也算Actor存在的一点不足之处,需要我们在设计多线程前考虑你的程序到底适不适合这种机制,后面我们会再详细描述。 

缺点:

  ①、每个Actor虽然是独立运行的,但是一旦同时接收到多个消息,一次也仅能处理一条消息,也就是按消息队列处理; 
  ②、Actor间异步执行,通过消息的传递实现协同。所以一个Actor内部出现执行错误很可能造成整个程序的错误——比如一个或多个其他Actors可能一直等待它反馈的消息。所以我们一定需要好好的处理Actors的例外事件,并且有容错机制及时反馈错误信息给其他等待中的Actors; 
  ③、Actor模型也存在死锁问题,也有可能遇到它们之间互相等待消息,所以可以使用超时设定的功能打破这个死锁情况; 
  ④、从上面的阐述我们也能窥探一二,并不是所有的应用开发都很好适合Actor模型。当且仅当我们可以将问题分为多个小模块,并且它们各自是独立运行的,仅个别地方需要交流。如果多线程间需要频繁互动,存在交叉的环境,其实Actor模型的效率优势也就不复存在了,也就失去了其消息调用并行执行的必要性。

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
TD3算法和DDPG算法的比较优缺点如下: 优点: 1. TD3算法相对于DDPG算法来说更加稳定,能够更快地收敛。 2. TD3算法引入了目标策略平滑正则化,可以减少过拟合的情况。 3. TD3算法在训练过程中使用了三个神经网络,可以更好地估计Q值函数。 缺点: 1. TD3算法相对于DDPG算法来说更加复杂,需要更多的计算资源。 2. TD3算法在某些情况下可能会出现低估Q值的情况。 3. TD3算法对于超参数的选择比较敏感,需要进行更加细致的调参。 下面是一个使用TD3算法解决连续控制问题的例子: ```python import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable # 定义Actor网络 class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.layer1 = nn.Linear(state_dim,400) self.layer2 = nn.Linear(400, 300) self.layer3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, state): x = F.relu(self.layer1(state)) x = F.relu(self.layer2(x)) x = self.max_action * torch.tanh(self.layer3(x)) return x # 定义Critic网络 class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() self.layer1 = nn.Linear(state_dim + action_dim, 400) self.layer2 = nn.Linear(400, 300) self.layer3 = nn.Linear(300, 1) def forward(self, state, action): x = torch.cat([state, action], 1) x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) x = self.layer3(x) return x # 定义TD3算法 class TD3(object): def __init__(self, state_dim, action_dim, max_action): self.actor = Actor(state_dim, action_dim, max_action) self.actor_target = Actor(state_dim, action_dim, max_action) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=0.001) self.critic1 = Critic(state_dim, action_dim) self.critic1_target = Critic(state_dim, action_dim) self.critic1_target.load_state_dict(self.critic1.state_dict()) self.critic1_optimizer = optim.Adam(self.critic1.parameters(), lr=0.001) self.critic2 = Critic(state_dim, action_dim) self.critic2_target = Critic(state_dim, action_dim) self.critic2_target.load_state_dict(self.critic2.state_dict()) self.critic2_optimizer = optim.Adam(self.critic2.parameters(), lr=0.001) self.max_action = max_action def select_action(self, state): state = torch.FloatTensor(state.reshape(1, -1)) return self.actor(state).cpu().data.numpy().flatten() def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2): for it in range(iterations): # 从缓存中随机采样一批数据 batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size) state = torch.FloatTensor(batch_states) next_state = torch.FloatTensor(batch_next_states) action = torch.FloatTensor(batch_actions) reward = torch.FloatTensor(batch_rewards.reshape((batch_size, 1))) done = torch.FloatTensor(batch_dones.reshape((batch_size, 1))) # 计算目标Q值 with torch.no_grad(): noise = (torch.randn_like(action) * policy_noise).clamp(-noise_clip, noise_clip) next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action) target_Q1 = self.critic1_target(next_state, next_action) target_Q2 = self.critic2_target(next_state, next_action) target_Q = torch.min(target_Q1, target_Q2) target_Q = reward + ((1 - done) * discount * target_Q) # 更新Critic1网络 current_Q1 = self.critic1(state, action) loss_Q1 = F.mse_loss(current_Q1, target_Q) self.critic1_optimizer.zero_grad() loss_Q1.backward() self.critic1_optimizer.step() # 更新Critic2网络 current_Q2 = self.critic2(state, action) loss_Q2 = F.mse_loss(current_Q2, target_Q) self.critic2_optimizer.zero_grad() loss_Q2.backward() self.critic2_optimizer.step() # 延迟更新Actor网络和目标网络 if it % policy_freq == 0: # 更新Actor网络 actor_loss = -self.critic1(state, self.actor(state)).mean() self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # 更新目标网络 for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) for param, target_param in zip(self.critic1.parameters(), self.critic1_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) for param, target_param in zip(self.critic2.parameters(), self.critic2_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) def save(self, filename): torch.save(self.actor.state_dict(), filename + "_actor") torch.save(self.critic1.state_dict(), filename + "_critic1") torch.save(self.critic2.state_dict(), filename + "_critic2") def load(self, filename): self.actor.load_state_dict(torch.load(filename + "_actor")) self.actor_target.load_state_dict(torch.load(filename + "_actor")) self.critic1.load_state_dict(torch.load(filename + "_critic1")) self.critic1_target.load_state_dict(torch.load(filename + "_critic1")) self.critic2.load_state_dict(torch.load(filename + "_critic2")) self.critic2_target.load_state_dict(torch.load(filename + "_critic2")) # 创建环境 env = gym.make('Pendulum-v0') state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] max_action = float(env.action_space.high[0]) # 创建TD3算法对象 td3 = TD3(state_dim, action_dim, max_action) # 定义缓存大小和训练次数 replay_buffer = ReplayBuffer() replay_buffer_size = 1000000 replay_buffer.init(replay_buffer_size, state_dim, action_dim) iterations = 100000 # 训练TD3算法 state, done = env.reset(), False episode_reward = 0 episode_timesteps = 0 episode_num = 0 for t in range(iterations): episode_timesteps += 1 # 选择动作并执行 action = td3.select_action(state) next_state, reward, done, _ = env.step(action) replay_buffer.add(state, next_state, action, reward, done) state = next_state episode_reward += reward # 如果缓存中的数据足够,就开始训练 if replay_buffer.size() > 1000: td3.train(replay_buffer, 100) # 如果一个episode结束,就输出信息 if done: print("Total Timesteps: {} Episode Num: {} Episode Timesteps: {} Reward: {}".format(t+1, episode_num+1, episode_timesteps, episode_reward)) state, done = env.reset(), False episode_reward = 0 episode_timesteps = 0 episode_num += 1 # 保存模型 td3.save("td3_pendulum") --相关问题--:
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值