波士顿房价案例
import tensorflow as tf
import numpy as np
import matplotlib. pyplot as plt
import pandas as pd
from sklearn. utils import shuffle
import pickle as p
% matplotlib inline
df = pd. read_csv( 'D:/data/boston.csv' , header= 0 )
df= np. array( df)
for i in range ( 12 ) :
df[ : , i] = ( df[ : , i] - df[ : , i] . min ( ) ) / ( df[ : , i] . max ( ) - df[ : , i] . min ( ) )
x_data= df[ : , : 12 ]
y_data= df[ : , 12 ]
x= tf. placeholder( tf. float32, [ None , 12 ] , name= 'X' )
y= tf. placeholder( tf. float32, [ None , 1 ] , name= 'Y' )
with tf. name_scope( 'Model' ) :
w= tf. Variable( tf. random_normal( [ 12 , 1 ] , stddev= 0.01 ) , name= 'W' )
b= tf. Variable( 1.0 , name= 'b' )
def model ( x, w, b) :
return tf. matmul( x, w) + b
pred= model( x, w, b)
train_epochs = 50
learning_rate = 0.01
with tf. name_scope( 'LossFunction' ) :
loss_function= tf. reduce_mean( tf. pow ( y- pred, 2 ) )
optimizer = tf. train. GradientDescentOptimizer( learning_rate) . minimize( loss_function)
sess= tf. Session( )
init = tf. global_variables_initializer( )
sess. run( init)
logdir= 'D:\data'
sum_loss_op = tf. summary. scalar( 'loss' , loss_function)
merged = tf. summary. merge_all( )
writer = tf. summary. FileWriter( logdir, sess. graph)
for epoch in range ( train_epochs) :
loss_sum= 0
for xs, ys in zip ( x_data, y_data) :
xs= xs. reshape( 1 , 12 )
ys= ys. reshape( 1 , 1 )
_, summary_str, loss = sess. run( [ optimizer, sum_loss_op, loss_function] , feed_dict= { x: xs, y: ys} )
writer. add_summary( summary_str, epoch)
loss_sum+= loss
x_data, y_data = shuffle( x_data, y_data)
b0temp= b. eval ( session= sess)
w0temp= w. eval ( session= sess)
loss_average = loss_sum/ len ( y_data)
print ( 'epoch=' , epoch+ 1 , 'loss=' , loss_average, 'b=' , b0temp, 'w=' , w0temp)
n= 348
x_test= x_data[ n]
x_test= x_test. reshape( 1 , 12 )
predict= sess. run( pred, feed_dict= { x: x_test} )
print ( '预测值:%f' % predict)
target= y_data[ n]
print ( '标签值:%f' % target)