先挖个坑等着填DQN PolicyGradient

问题

1.不能反向传播
2.计算出的loss用不用加和平均

import torch.nn as nn
import torch.nn.functional as F
import torch
import gym
import numpy as np
import torch.optim as optim
import random
import collections
from torch.distributions import Categorical

class Policy(nn.Module):
    def __init__(self):
        super(Policy, self).__init__()
        self.l1 = nn.Linear(N_STATES, 32)
        self.l2 = nn.Linear(32, N_ACTIONS)

    def forward(self, x):
        out = self.l1(x)
        out = F.relu(out)
        out = self.l2(out)
        out = F.relu(out)
        return F.softmax(out, dim=-1)

def calc_future_reward(reward_list):

    for i in range(len(reward_list)-2,-1,-1):
        reward_list[i] += gamma * reward_list[i+1]
    return reward_list

env = gym.make('CartPole-v0')
env = env.unwrapped
N_ACTIONS = env.action_space.n
N_STATES = env.observation_space.shape[0]
print(N_ACTIONS)
print(N_STATES)

lr = 0.01
gamma = 0.9
greedy = 1
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
print('\nCollecting experience...')
for i_episode in range(1):
    s = env.reset()

    step_cnt = 0
    episode = []
    a_list = []
    s_list = []
    r_list = []
    prob_list = []

    while True:
        step_cnt += 1
        env.render()

        prob = policy.forward(torch.FloatTensor(s))
        print(prob)
        prob_list.append(list(prob.data.numpy()))
        m = Categorical(prob)
        action = m.sample()

        a =  action.item()

        s_, r, done, _ = env.step(a)
        s_list.append(s)
        a_list.append(a)
        r_list.append(r)

        if done:
            s_tensor = torch.FloatTensor(s_list)
            a_tensor = torch.LongTensor(a_list).view(-1, 1)
            G_list = calc_future_reward(r_list)
            G_tensor = torch.FloatTensor(G_list).view(-1, 1)
            prob_tensor = torch.FloatTensor(prob_list)
            one_hot = torch.zeros(step_cnt, N_ACTIONS).scatter_(1, a_tensor, 1)
            loss = -1.0 * (prob_tensor.log() * one_hot).sum(dim=1).view(-1, 1) * G_tensor
            optimizer.zero_grad()
            print(loss)
            mess_loss = torch.cat((loss)).sum()/step_cnt
            mess_loss.backward()
            optimizer.step()
            break
        s = s_



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值