【零基础强化学习】基于DQN的highway自动驾驶


更多代码Gitee主页:https://gitee.com/GZHzzz
博客主页CSDN:https://blog.csdn.net/gzhzzaa

写在前面

show me code, no bb

import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
from torch import FloatTensor, LongTensor, ByteTensor
from collections import namedtuple
import random 
Tensor = FloatTensor

EPSILON = 0    # epsilon used for epsilon greedy approach
GAMMA = 0.9
TARGET_NETWORK_REPLACE_FREQ = 40       # How frequently target netowrk updates
MEMORY_CAPACITY = 100
BATCH_SIZE = 80
LR = 0.01         # learning rate

class DQNNet(nn.Module):
    def __init__(self):
        super(DQNNet,self).__init__()                  
        self.linear1 = nn.Linear(35,35)
        self.linear2 = nn.Linear(35,5)               
    def forward(self,s):
        s=torch.FloatTensor(s)        
        s = s.view(s.size(0),1,35)        
        s = self.linear1(s)
        s = self.linear2(s)
        return s           
                         
class DQN(object):
    def __init__(self):
        self.net,self.target_net = DQNNet(),DQNNet()        
        self.learn_step_counter = 0      
        self.memory = []
        self.position = 0 
        self.capacity = MEMORY_CAPACITY       
        self.optimizer = torch.optim.Adam(self.net.parameters(), lr=LR)
        self.loss_func = nn.MSELoss()

    def choose_action(self,s,e):
        x=np.expand_dims(s, axis=0)
        if np.random.uniform() < 1-e:  
            actions_value = self.net.forward(x)            
            action = torch.max(actions_value,-1)[1].data.numpy()
            action = action.max()           
        else: 
            action = np.random.randint(0, 5)
        return action

    def push_memory(self, s, a, r, s_):
        if len(self.memory) < self.capacity:
            self.memory.append(None)
        self.memory[self.position] = Transition(torch.unsqueeze(torch.FloatTensor(s), 0),torch.unsqueeze(torch.FloatTensor(s_), 0),\
                                                torch.from_numpy(np.array([a])),torch.from_numpy(np.array([r],dtype='float32')))#
        self.position = (self.position + 1) % self.capacity
       
    def get_sample(self,batch_size):
        sample = random.sample(self.memory,batch_size)
        return sample
      
    def learn(self):
        if self.learn_step_counter % TARGET_NETWORK_REPLACE_FREQ == 0:
            self.target_net.load_state_dict(self.net.state_dict())
        self.learn_step_counter += 1
        
        transitions = self.get_sample(BATCH_SIZE)
        batch = Transition(*zip(*transitions))

        b_s = Variable(torch.cat(batch.state))
        b_s_ = Variable(torch.cat(batch.next_state))
        b_a = Variable(torch.cat(batch.action))
        b_r = Variable(torch.cat(batch.reward))    
             
        q_eval = self.net.forward(b_s).squeeze(1).gather(1,b_a.unsqueeze(1).to(torch.int64)) 
        q_next = self.target_net.forward(b_s_).detach() #
        q_target = b_r + GAMMA * q_next.squeeze(1).max(1)[0].view(BATCH_SIZE, 1).t()           
        loss = self.loss_func(q_eval, q_target.t())        
        self.optimizer.zero_grad() # reset the gradient to zero        
        loss.backward()
        self.optimizer.step() # execute back propagation for one step       
        return loss
Transition = namedtuple('Transition',('state', 'next_state','action', 'reward'))


import gym
import highway_env
from matplotlib import pyplot as plt
import numpy as np
import time
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

config = \
    {
    "observation": 
         {
        "type": "Kinematics",
        "vehicles_count": 5,
        "features": ["presence", "x", "y", "vx", "vy", "cos_h", "sin_h"],
        "features_range": 
            {
            "x": [-100, 100],
            "y": [-100, 100],
            "vx": [-20, 20],
            "vy": [-20, 20]
            },
        "absolute": False,
        "order": "sorted"
        },
    "simulation_frequency": 8,  # [Hz]
    "policy_frequency": 2,  # [Hz]
    }
    
env = gym.make("highway-v0")
env.configure(config)


dqn=DQN()
count=0

reward=[]
avg_reward=0
all_reward=[]

time_=[]
all_time=[]

collision_his=[]
all_collision=[]
while True:
    done = False    
    start_time=time.time()
    s = env.reset()
    
    while not done:
        e = np.exp(-count/300)  #随机选择action的概率,随着训练次数增多逐渐降低
        a = dqn.choose_action(s,e)
        s_, r, done, info = env.step(a)
        env.render()
        
        dqn.push_memory(s, a, r, s_)
        
        if ((dqn.position !=0)&(dqn.position % 99==0)):
            loss_=dqn.learn()
            count+=1
            print('trained times:',count)
            if (count%40==0):
                avg_reward=np.mean(reward)
                avg_time=np.mean(time_)
                collision_rate=np.mean(collision_his)
                                
                all_reward.append(avg_reward)
                all_time.append(avg_time)
                all_collision.append(collision_rate)
                                
                # plt.plot(all_reward)
                # plt.show()
                # plt.plot(all_time)
                # plt.show()
                # plt.plot(all_collision)
                # plt.show()
                
                reward=[]
                time_=[]
                collision_his=[]
                
        s = s_
        reward.append(r)      
    
    end_time=time.time()
    episode_time=end_time-start_time
    time_.append(episode_time)
        
    is_collision=1 if info['crashed']==True else 0
    collision_his.append(is_collision)

  • 代码全部亲自跑过,你懂的!😝

结果展示

在这里插入图片描述

写在最后

十年磨剑,与君共勉!
更多代码Gitee主页:https://gitee.com/GZHzzz
博客主页CSDN:https://blog.csdn.net/gzhzzaa

  • Fighting!😎

基于pytorch的经典模型基于pytorch的典型智能体模型
强化学习经典论文强化学习经典论文
在这里插入图片描述

while True:
	Go life

在这里插入图片描述

谢谢点赞交流!(❁´◡`❁)

  • 10
    点赞
  • 50
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 15
    评论
评论 15
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

北郭zz

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值