基于tensorflow的policy network实现cart-pole控制

基于tensorflow的policy network实现cart-pole控制

#%%
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import numpy as np
import tensorflow as tf

import gym
env = gym.make('CartPole-v0')

env.reset()
random_episodes = 0
reward_sum = 0
while random_episodes < 10:
    env.render()
    observation, reward, done, _ = env.step(np.random.randint(0,2))
    reward_sum += reward
    if done:
        random_episodes += 1
        print("Reward for this episode was:",reward_sum)
        reward_sum = 0
        env.reset()
        
# hyperparameters
H = 50 # number of hidden layer neurons
batch_size = 25 # every how many episodes to do a param update?
learning_rate = 1e-1 # feel free to play with this to train faster or more stably.
gamma = 0.99 # discount factor for reward

D = 4 # input dimensionality        


tf.reset_default_graph()

#This defines the network as it goes from taking an observation of the environment to 
#giving a probability of chosing to the action of moving left or right.
observations = tf.placeholder(tf.float32, [None,D] , name="input_x")
W1 = tf.get_variable("W1", shape=[D, H],
           initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(observations,W1))
W2 = tf.get_variable("W2", shape=[H, 1],
           initializer=tf.contrib.layers.xavier_initializer())
score = tf.matmul(layer1,W2)
probability = tf.nn.sigmoid(score)

#From here we define the parts of the network needed for learning a good policy.
tvars = tf.trainable_variables()
input_y = tf.placeholder(tf.float32,[None,1], name="input_y")
advantages = tf.placeholder(tf.float32,name="reward_signal")

# The loss function. This sends the weights in the direction of making actions 
# that gave good advantage (reward over time) more likely, and actions that didn't less likely.
loglik = tf.log(input_y*(input_y - probability) + (1 - input_y)*(input_y + probability))
loss = -tf.reduce_mean(loglik * advantages) 
newGrads = tf.gradients(loss,tvars)

# Once we have collected a series of gradients from multiple episodes, we apply them.
# We don't just apply gradeients after every episode in order to account for noise in the reward signal.
adam = tf.train.AdamOptimizer(learning_rate=learning_rate) # Our optimizer
W1Grad = tf.placeholder(tf.float32,name="batch_grad1") # Placeholders to send the final gradients through when we update.
W2Grad = tf.placeholder(tf.float32,name="batch_grad2")
batchGrad = [W1Grad,W2Grad]
updateGrads = adam.apply_gradients(zip(batchGrad,tvars))


def discount_rewards(r):
    """ take 1D float array of rewards and compute discounted reward """
    discounted_r = np.zeros_like(r)
    running_add = 0
    for t in reversed(range(r.size)):
        running_add = running_add * gamma + r[t]
        discounted_r[t] = running_add
    return discounted_r
    
    
    
xs,ys,drs = [],[],[]
#running_reward = None
reward_sum = 0
episode_number = 1
total_episodes = 10000
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    rendering = False
    sess.run(init)
    observation = env.reset() # Obtain an initial observation of the environment

    # Reset the gradient placeholder. We will collect gradients in 
    # gradBuffer until we are ready to update our policy network. 
    gradBuffer = sess.run(tvars)
    for ix,grad in enumerate(gradBuffer):
        gradBuffer[ix] = grad * 0
    
    while episode_number <= total_episodes:
        
        # Rendering the environment slows things down, 
        # so let's only look at it once our agent is doing a good job.
        if reward_sum/batch_size > 100 or rendering == True : 
            env.render()
            rendering = True
            
        # Make sure the observation is in a shape the network can handle.
        x = np.reshape(observation,[1,D])
        
        # Run the policy network and get an action to take. 
        tfprob = sess.run(probability,feed_dict={observations: x})
        action = 1 if np.random.uniform() < tfprob else 0
        
        xs.append(x) # observation
        y = 1 if action == 0 else 0 # a "fake label"
        ys.append(y)

        # step the environment and get new measurements
        observation, reward, done, info = env.step(action)
        reward_sum += reward

        drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)

        if done: 
            episode_number += 1
            # stack together all inputs, hidden states, action gradients, and rewards for this episode
            epx = np.vstack(xs)
            epy = np.vstack(ys)
            epr = np.vstack(drs)
            xs,ys,drs = [],[],[] # reset array memory

            # compute the discounted reward backwards through time
            discounted_epr = discount_rewards(epr)
            # size the rewards to be unit normal (helps control the gradient estimator variance)
            discounted_epr -= np.mean(discounted_epr)
            discounted_epr /= np.std(discounted_epr)
            
            # Get the gradient for this episode, and save it in the gradBuffer
            tGrad = sess.run(newGrads,feed_dict={observations: epx, input_y: epy, advantages: discounted_epr})
            for ix,grad in enumerate(tGrad):
                gradBuffer[ix] += grad
                
            # If we have completed enough episodes, then update the policy network with our gradients.
            if episode_number % batch_size == 0: 
                sess.run(updateGrads,feed_dict={W1Grad: gradBuffer[0],W2Grad:gradBuffer[1]})
                for ix,grad in enumerate(gradBuffer):
                    gradBuffer[ix] = grad * 0
                
                # Give a summary of how well our network is doing for each batch of episodes.
                #running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
                print('Average reward for episode %d : %f.' % (episode_number,reward_sum/batch_size))
                
                if reward_sum/batch_size > 200: 
                    print("Task solved in",episode_number,'episodes!')
                    break
                    
                reward_sum = 0
            
            observation = env.reset()
        
   

C:\Users\Administrator\AppData\Local\Programs\Python\Python36\python.exe D:/Deeplearning/projects/tensorflow_projects/8_2_PolicyNetwork.py
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\dtypes.py:469: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint8 = np.dtype([("qint8", np.int8, 1)])
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\dtypes.py:470: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint16 = np.dtype([("qint16", np.int16, 1)])
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint32 = np.dtype([("qint32", np.int32, 1)])
C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\dtypes.py:476: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  np_resource = np.dtype([("resource", np.ubyte, 1)])
Reward for this episode was: 22.0
Reward for this episode was: 26.0
Reward for this episode was: 22.0
Reward for this episode was: 13.0
Reward for this episode was: 14.0
Reward for this episode was: 37.0
Reward for this episode was: 16.0
Reward for this episode was: 13.0
Reward for this episode was: 38.0
Reward for this episode was: 17.0
2020-05-12 17:10:47.554601: I C:\tf_jenkins\home\workspace\rel-win\M\windows-gpu\PY\36\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
2020-05-12 17:10:47.656601: I C:\tf_jenkins\home\workspace\rel-win\M\windows-gpu\PY\36\tensorflow\core\common_runtime\gpu\gpu_device.cc:1030] Found device 0 with properties: 
name: GeForce GTX 1060 6GB major: 6 minor: 1 memoryClockRate(GHz): 1.7085
pciBusID: 0000:01:00.0
totalMemory: 6.00GiB freeMemory: 5.70GiB
2020-05-12 17:10:47.656601: I C:\tf_jenkins\home\workspace\rel-win\M\windows-gpu\PY\36\tensorflow\core\common_runtime\gpu\gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: GeForce GTX 1060 6GB, pci bus id: 0000:01:00.0, compute capability: 6.1)
Average reward for episode 25 : 22.280000.
Average reward for episode 50 : 44.600000.
Average reward for episode 75 : 59.920000.
Average reward for episode 100 : 82.200000.
Average reward for episode 125 : 126.520000.
Average reward for episode 150 : 180.000000.
Average reward for episode 175 : 198.240000.
Average reward for episode 200 : 187.080000.
Average reward for episode 225 : 170.600000.
Average reward for episode 250 : 162.960000.
Average reward for episode 275 : 135.800000.
Average reward for episode 300 : 119.000000.
Average reward for episode 325 : 103.480000.
Average reward for episode 350 : 85.800000.
Average reward for episode 375 : 84.280000.
Average reward for episode 400 : 88.720000.
Average reward for episode 425 : 93.520000.
Average reward for episode 450 : 79.920000.
Average reward for episode 475 : 73.360000.
Average reward for episode 500 : 72.200000.
Average reward for episode 525 : 63.280000.
Average reward for episode 550 : 73.720000.
Average reward for episode 575 : 68.840000.
Average reward for episode 600 : 74.200000.
Average reward for episode 625 : 82.520000.
Average reward for episode 650 : 94.760000.
Average reward for episode 675 : 93.480000.
Average reward for episode 700 : 90.440000.
Average reward for episode 725 : 103.360000.
Average reward for episode 750 : 105.720000.
Average reward for episode 775 : 105.280000.
Average reward for episode 800 : 115.080000.
Average reward for episode 825 : 113.080000.
Average reward for episode 850 : 94.320000.
Average reward for episode 875 : 96.800000.
Average reward for episode 900 : 94.200000.
Average reward for episode 925 : 109.000000.
Average reward for episode 950 : 109.720000.
Average reward for episode 975 : 122.960000.
Average reward for episode 1000 : 98.400000.
Average reward for episode 1025 : 115.200000.
Average reward for episode 1050 : 120.280000.
Average reward for episode 1075 : 107.160000.
Average reward for episode 1100 : 126.640000.
Average reward for episode 1125 : 131.960000.
Average reward for episode 1150 : 132.640000.
Average reward for episode 1175 : 145.000000.
Average reward for episode 1200 : 138.000000.
Average reward for episode 1225 : 141.360000.
Average reward for episode 1250 : 121.240000.
Average reward for episode 1275 : 136.080000.
Average reward for episode 1300 : 153.080000.
Average reward for episode 1325 : 152.600000.
Average reward for episode 1350 : 154.720000.
Average reward for episode 1375 : 151.160000.
Average reward for episode 1400 : 176.800000.
Average reward for episode 1425 : 174.640000.
Average reward for episode 1450 : 172.120000.
Average reward for episode 1475 : 177.920000.
Average reward for episode 1500 : 185.480000.
Average reward for episode 1525 : 189.640000.
Average reward for episode 1550 : 189.080000.
Average reward for episode 1575 : 185.000000.
Average reward for episode 1600 : 188.560000.
Average reward for episode 1625 : 188.280000.
Average reward for episode 1650 : 177.920000.
Average reward for episode 1675 : 196.960000.
Average reward for episode 1700 : 191.560000.
Average reward for episode 1725 : 188.720000.
Average reward for episode 1750 : 194.040000.
Average reward for episode 1775 : 190.680000.
Average reward for episode 1800 : 196.400000.
Average reward for episode 1825 : 186.680000.
Average reward for episode 1850 : 191.200000.
Average reward for episode 1875 : 200.000000.
Average reward for episode 1900 : 190.840000.
Average reward for episode 1925 : 197.440000.
Average reward for episode 1950 : 197.120000.
Average reward for episode 1975 : 193.080000.
Average reward for episode 2000 : 194.320000.
Average reward for episode 2025 : 200.000000.
Average reward for episode 2050 : 195.760000.
Average reward for episode 2075 : 196.200000.
Average reward for episode 2100 : 198.200000.
Average reward for episode 2125 : 200.000000.
Average reward for episode 2150 : 200.000000.
Average reward for episode 2175 : 199.760000.
Average reward for episode 2200 : 200.000000.
Average reward for episode 2225 : 200.000000.
Average reward for episode 2250 : 200.000000.
Average reward for episode 2275 : 200.000000.
Average reward for episode 2300 : 199.840000.
Average reward for episode 2325 : 200.000000.
Average reward for episode 2350 : 199.600000.
Average reward for episode 2375 : 200.000000.
Average reward for episode 2400 : 200.000000.
Average reward for episode 2425 : 199.040000.
Average reward for episode 2450 : 200.000000.
Average reward for episode 2475 : 200.000000.
Average reward for episode 2500 : 200.000000.
Average reward for episode 2525 : 197.960000.
Average reward for episode 2550 : 198.800000.
Average reward for episode 2575 : 196.120000.
Average reward for episode 2600 : 200.000000.
Average reward for episode 2625 : 197.560000.
Average reward for episode 2650 : 198.880000.
Average reward for episode 2675 : 199.880000.
Average reward for episode 2700 : 200.000000.
Average reward for episode 2725 : 199.800000.
Average reward for episode 2750 : 200.000000.
Average reward for episode 2775 : 199.240000.
Average reward for episode 2800 : 200.000000.
Average reward for episode 2825 : 197.960000.
Average reward for episode 2850 : 200.000000.
Average reward for episode 2875 : 199.160000.
Average reward for episode 2900 : 200.000000.
Average reward for episode 2925 : 198.560000.
Average reward for episode 2950 : 199.800000.
Average reward for episode 2975 : 197.880000.
Average reward for episode 3000 : 195.600000.
Average reward for episode 3025 : 188.440000.
Average reward for episode 3050 : 176.920000.
Average reward for episode 3075 : 173.560000.
Average reward for episode 3100 : 160.000000.
Average reward for episode 3125 : 151.040000.
Average reward for episode 3150 : 138.680000.
Average reward for episode 3175 : 124.160000.
Average reward for episode 3200 : 149.240000.
Average reward for episode 3225 : 157.920000.
Average reward for episode 3250 : 149.040000.
Average reward for episode 3275 : 154.720000.
Average reward for episode 3300 : 153.280000.
Average reward for episode 3325 : 148.120000.
Average reward for episode 3350 : 172.880000.
Average reward for episode 3375 : 157.760000.
Average reward for episode 3400 : 166.080000.
Average reward for episode 3425 : 177.240000.
Average reward for episode 3450 : 168.240000.
Average reward for episode 3475 : 180.600000.
Average reward for episode 3500 : 188.080000.
Average reward for episode 3525 : 176.400000.
Average reward for episode 3550 : 190.120000.
Average reward for episode 3575 : 187.520000.
Average reward for episode 3600 : 190.440000.
Average reward for episode 3625 : 184.600000.
Average reward for episode 3650 : 176.840000.
Average reward for episode 3675 : 191.720000.
Average reward for episode 3700 : 172.920000.
Average reward for episode 3725 : 190.880000.

Process finished with exit code -1

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值