在ROS开发笔记(8)中构建了ROS中DQN算法的开发环境,在此基础上,对算法代码进行了分析,并做了简单的修改:
修改1 : 改变了保存模型参数在循环中的位置,原来是每个10整数倍数回合里面每一步都修改(相当于修改episode_step次),改成了每个10整数倍数回合修改一次
# if e % 10 == 0:
# agent.model.save(agent.dirPath + str(e) + '.h5')
# with open(agent.dirPath + str(e) + '.json', 'w') as outfile:
# param_keys = ['epsilon']
# param_values = [agent.epsilon]
# param_dictionary = dict(zip(param_keys, param_values))
# json.dump(param_dictionary, outfile)
修改2 :改变了agent.updateTargetModel()的位置,原来是每次done都修改,改成了每经过target_up步后修改
# if global_step % agent.target_update == 0:
# agent.updateTargetModel()
# rospy.loginfo("UPDATE TARGET NETWORK")
结果如下:
下面是修改后的代码及其注释:
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#################################################################################
# Copyright 2018 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
# 原作者
# Authors: Gilbert #
import rospy
import os
import json
import numpy as np
import random
import time
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from collections import deque
from std_msgs.msg import Float32MultiArray
from keras.models import Sequential, load_model
from keras.optimizers import RMSprop
from keras.layers import Dense, Dropout, Activation
# 导入 Env
from src.turtlebot3_dqn.environment_stage_1 import Env
#最大回合数
EPISODES = 3000
#强化学习网络
class ReinforceAgent():
#初始化函数
def __init__(self, state_size, action_size):
# 创建 result 话题
self.pub_