a regression model that predicts one model variable.

# -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 20:11:38 2017


@author: Administrator
"""
# 06_batch_stochastic_training.py
# Batch and Stochastic Training
#----------------------------------
#
#  This python function illustrates two different training methods:
#  batch and stochastic training.  For each model, we will use
#  a regression model that predicts one model variable.


import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()


# We will implement a regression example in stochastic and batch training


# Stochastic Training:
# Create graph
sess = tf.Session()


# Create data
x_vals = np.random.normal(1, 0.1, 100)
y_vals = np.repeat(10., 100)
x_data = tf.placeholder(shape=[1], dtype=tf.float32)
y_target = tf.placeholder(shape=[1], dtype=tf.float32)


# Create variable (one model parameter = A)
A = tf.Variable(tf.random_normal(shape=[1]))


# Add operation to graph
my_output = tf.multiply(x_data, A)


# Add L2 loss operation to graph
loss = tf.square(my_output - y_target)


# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)


# Create Optimizer
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)


loss_stochastic = []
# Run Loop
for i in range(100):
    rand_index = np.random.choice(100)
    rand_x = [x_vals[rand_index]]
    rand_y = [y_vals[rand_index]]
    sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
    if (i+1)%5==0:
        print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))
        temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
        print('Loss = ' + str(temp_loss))
        loss_stochastic.append(temp_loss)




# Batch Training:
# Re-initialize graph
ops.reset_default_graph()
sess = tf.Session()


# Declare batch size
batch_size = 20


# Create data
x_vals = np.random.normal(1, 0.1, 100)
y_vals = np.repeat(10., 100)
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)


# Create variable (one model parameter = A)
A = tf.Variable(tf.random_normal(shape=[1,1]))


# Add operation to graph
my_output = tf.matmul(x_data, A)


# Add L2 loss operation to graph
loss = tf.reduce_mean(tf.square(my_output - y_target))


# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)


# Create Optimizer
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)


loss_batch = []
# Run Loop
for i in range(100):
    rand_index = np.random.choice(100, size=batch_size)
    rand_x = np.transpose([x_vals[rand_index]])
    rand_y = np.transpose([y_vals[rand_index]])
    sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
    if (i+1)%5==0:
        print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))
        temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
        print('Loss = ' + str(temp_loss))
        loss_batch.append(temp_loss)


plt.plot(range(0, 100, 5), loss_stochastic, 'b-', label='Stochastic Loss')
plt.plot(range(0, 100, 5), loss_batch, 'r--', label='Batch Loss, size=20')
plt.legend(loc='upper right', prop={'size': 11})

plt.show()





WARNING:tensorflow:From C:\Program Files\Anaconda3\lib\site-packages\tensorflow\python\util\tf_should_use.py:175: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
Step #25 A = [ 6.5553894]
Loss = [ 3.70893455]
Step #50 A = [ 8.68255234]
Loss = [ 5.49622202]
Step #75 A = [ 9.40587711]
Loss = [ 1.98083961]
Step #100 A = [ 9.61017513]
Loss = [ 5.6909833]


runfile('F:/AI CODE/untitled12.py', wdir='F:/AI CODE')
Step #5 A = [ 1.51993215]
Loss = [ 73.81479645]
Step #10 A = [ 3.09589863]
Loss = [ 51.48274994]
Step #15 A = [ 4.36641359]
Loss = [ 30.00927925]
Step #20 A = [ 5.40667868]
Loss = [ 22.28380966]
Step #25 A = [ 6.26504755]
Loss = [ 13.09992886]
Step #30 A = [ 6.96014833]
Loss = [ 12.11621857]
Step #35 A = [ 7.51340199]
Loss = [ 4.77271843]
Step #40 A = [ 7.99291182]
Loss = [ 5.10676479]
Step #45 A = [ 8.43360043]
Loss = [ 2.47526813]
Step #50 A = [ 8.72731495]
Loss = [ 4.33505058]
Step #55 A = [ 8.98287106]
Loss = [ 0.48608229]
Step #60 A = [ 9.07200241]
Loss = [ 1.42206562]
Step #65 A = [ 9.23262882]
Loss = [ 2.63616991]
Step #70 A = [ 9.4445343]
Loss = [ 0.06758822]
Step #75 A = [ 9.54142189]
Loss = [ 0.12193415]
Step #80 A = [ 9.65256786]
Loss = [ 5.97040606]
Step #85 A = [ 9.74796867]
Loss = [ 0.7266131]
Step #90 A = [ 9.73230743]
Loss = [ 0.38649938]
Step #95 A = [ 9.71143532]
Loss = [ 0.30241525]
Step #100 A = [ 9.77708054]
Loss = [ 0.0378577]
Step #5 A = [[ 1.56158876]]
Loss = 71.7626
Step #10 A = [[ 3.12683678]]
Loss = 45.9927
Step #15 A = [[ 4.39508104]]
Loss = 32.2249
Step #20 A = [[ 5.42329502]]
Loss = 22.1835
Step #25 A = [[ 6.26193619]]
Loss = 15.2151
Step #30 A = [[ 6.93760014]]
Loss = 8.65269
Step #35 A = [[ 7.49225473]]
Loss = 6.58717
Step #40 A = [[ 7.94064379]]
Loss = 5.59499
Step #45 A = [[ 8.28280163]]
Loss = 2.30695
Step #50 A = [[ 8.58835697]]
Loss = 2.86667
Step #55 A = [[ 8.79910851]]
Loss = 2.92512
Step #60 A = [[ 9.00835991]]
Loss = 1.84608
Step #65 A = [[ 9.16416931]]
Loss = 1.02726
Step #70 A = [[ 9.32333183]]
Loss = 1.70024
Step #75 A = [[ 9.40606117]]
Loss = 1.26627
Step #80 A = [[ 9.48334122]]
Loss = 0.838435
Step #85 A = [[ 9.58439541]]
Loss = 1.25768
Step #90 A = [[ 9.64402676]]
Loss = 0.921342
Step #95 A = [[ 9.69995499]]
Loss = 1.68079
Step #100 A = [[ 9.74739265]]
Loss = 0.733745



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值