强化学习

import gym, os
from itertools import countimport paddleimport paddle.nn as nnimport paddle.optimizer as optimimport paddle.nn.functional as Ffrom paddle.distribution import Categoricalprint(paddle.__version__)device = paddle.get_device()env = gym.make("CartPole-v0")state_size = env.observation_space.shape[0]action_size = env.action_space.nlr = 0.001class Actor(nn.Layer):    def __init__(self, state_size, action_size):        super(Actor, self).__init__()        self.state_size = state_size        self.action_size = action_size        self.linear1 = nn.Linear(self.state_size, 128)        self.linear2 = nn.Linear(128, 256)        self.linear3 = nn.Linear(256, self.action_size)    def forward(self, state):        output = F.relu(self.linear1(state))        output = F.relu(self.linear2(output))        output = self.linear3(output)        distribution = Categorical(F.softmax(output, axis=-1))        return distributionclass Critic(nn.Layer):    def __init__(self, state_size, action_size):        super(Critic, self).__init__()        self.state_size = state_size        self.action_size = action_size        self.linear1 = nn.Linear(self.state_size, 128)        self.linear2 = nn.Linear(128, 256)        self.linear3 = nn.Linear(256, 1)    def forward(self, state):        output = F.relu(self.linear1(state))        output = F.relu(self.linear2(output))        value = self.linear3(output)        return valuedef compute_returns(next_value, rewards, masks, gamma=0.99):    R = next_value    returns = []    for step in reversed(range(len(rewards))):        R = rewards[step] + gamma * R * masks[step]        returns.insert(0, R)    return returnsdef trainIters(actor, critic, n_iters):    optimizerA = optim.Adam(lr, parameters=actor.parameters())    optimizerC = optim.Adam(lr, parameters=critic.parameters())    for iter in range(n_iters):        state = env.reset()        log_probs = []        values = []        rewards = []        masks = []        env.reset()        for i in count():            state = paddle.to_tensor(state,dtype="float32",place=device)            dist, value = actor(state), critic(state)            action = dist.sample([1])            next_state, reward, done, _ = env.step(action.cpu().squeeze(0).numpy())             log_prob = dist.log_prob(action);            log_probs.append(log_prob)            values.append(value)            rewards.append(paddle.to_tensor([reward], dtype="float32", place=device))            masks.append(paddle.to_tensor([1-done], dtype="float32", place=device))            state = next_state            if done:                if iter % 10 == 0:                    print('Iteration: {}, Score: {}'.format(iter, i))                break        next_state = paddle.to_tensor(next_state, dtype="float32", place=device)        next_value = critic(next_state)        returns = compute_returns(next_value, rewards, masks)        log_probs = paddle.concat(log_probs)        returns = paddle.concat(returns).detach()        values = paddle.concat(values)        advantage = returns - values        actor_loss = -(log_probs * advantage.detach()).mean()        critic_loss = advantage.pow(2).mean()        optimizerA.clear_grad()        optimizerC.clear_grad()        actor_loss.backward()        critic_loss.backward()        optimizerA.step()        optimizerC.step()    paddle.save(actor.state_dict(), 'model/actor.pdparams')    paddle.save(critic.state_dict(), 'model/critic.pdparams')    env.close()if __name__ == '__main__':    if os.path.exists('model/actor.pdparams'):        actor = Actor(state_size, action_size)        model_state_dict  = paddle.load('model/actor.pdparams')        actor.set_state_dict(model_state_dict )        print('Actor Model loaded')    else:        actor = Actor(state_size, action_size)    if os.path.exists('model/critic.pdparams'):        critic = Critic(state_size, action_size)        model_state_dict  = paddle.load('model/critic.pdparams')        critic.set_state_dict(model_state_dict )        print('Critic Model loaded')    else:        critic = Critic(state_size, action_size)    trainIters(actor, critic, n_iters=201)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值