TensorFlow第一步
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("MNIST_DATA/",one_hot=True)
print(mnist.train.images.shape,mnist.train.labels.shape)
print (mnist.test.images.shape, mnist.test.labels.shape)
print (mnist.validation.images.shape, mnist.validation.labels.shape)
import tensorflow as tf
sess=tf.InteractiveSession()
x=tf.placeholder(tf.float32,[None,784])
W=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
y=tf.nn.softmax(tf.matmul(x,W)+b)
y_=tf.placeholder(tf.float32,[None,10])
cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_* tf.log(y),reduction_indices=[1]))
train_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
tf.global_variables_initializer().run()
for i in range(1000):
batch_xs, batch_ys=mnist.train.next_batch(100)
train_step.run({x:batch_xs,y_:batch_ys})
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(accuracy.eval({x:mnist.test.images,y_:mnist.test.labels}))
自编码器
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def xavier_int(fan_in,fan_out,constant=1):
low=-constant*np.sqrt(6.0/(fan_in+fan_out))
high=constant*np.sqrt(6.0/(fan_in+fan_out))
return tf.random_uniform((fan_in,fan_out), minval=low, maxval=high,dtype=tf.float32)
class AdditiveGaussianNoiseAutoencoder(object):
def __init__(self,n_input,n_hidden,transfer_function=tf.nn.softplus,optimizer=tf.train.AdamOptimizer(),scale=0.1):
self.n_input=n_input
self.n_hidden=n_hidden
self.transfer=transfer_function
self.scale=tf.placeholder(tf.float32)
self.training_scale=scale
network_weights=self._initialize_weights()
self.weights=network_weights
self.x=tf.placeholder(tf.float32,[None,self.n_input])
self.hidden=self.transfer(tf.add(tf.matmul(self.x+scale*tf.random_normal((n_input,)),self.weights['w1']),self.weights['b1']))
self.reconstruction=tf.add(tf.matmul(self.hidden,self.weights['w2']),self.weights['b2'])
self.cost=0.5*tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction,self.x),2.0))
self.optimizer=optimizer.minimize(self.cost)
init=tf.global_variables_initializer()
self.sess=tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights=dict()
all_weights['w1']=tf.Variable(xavier_int(self.n_input,self.n_hidden))
all_weights['b1']=tf.Variable(tf.zeros([self.n_hidden],dtype=tf.float32))
all_weights['w2']=tf.Variable(tf.zeros([self.n_hidden,self.n_input],dtype=tf.float32))
all_weights['b2']=tf.Variable(tf.zeros([self.n_input],dtype=tf.float32))
return all_weights
def partial_fit(self,X):
cost,opt=self.sess.run((self.cost,self.optimizer),feed_dict={self.x:X,self.scale:self.training_scale})
return cost
def calc_total_cost(self,X):
return self.sess.run(self.cost,feed_dict={self.x:X,self.scale:self.training_scale})
def transform(self,X):
return self.sess.run(self.hidden, feed_dict={self.x:X,self.scale:self.training_scale})
def generate(self,hidden=None):
if hidden is None:
hidden=np.random.normal(size=self.weights['b1'])
return self.sess.run(self.reconstruction,feed_dict={self.hidden:hidden})
def reconstruct(self,X):
return self.sess.run(self.reconstruct,feed_dict={self.x:X,self.scale:self.training_scale})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
def standard_scale(X_train,X_test):
preprocessor=prep.StandardScaler().fit(X_train)
X_train=preprocessor.transform(X_train)
X_test=preprocessor.transform(X_test)
return X_train, X_test
X_train, X_test=standard_scale(mnist.train.images,mnist.train.images)
n_samples=int(mnist.train.num_examples)
training_epochs=20
batch_size=128
display_step=1
autoencoder=AdditiveGaussianNoiseAutoencoder(n_input=784,n_hidden=200,transfer_function=tf.nn.softplus,optimizer=tf.train.AdamOptimizer(learning_rate=0.001),scale=0.01)
for epoch in range(training_epochs):
avg_cost=0.
total_batch=int(n_samples/batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost=autoencoder.partial_fit(batch_xs)
avg_cost+=cost/n_samples*batch_size
if epoch % display_step==0:
print("Epoch:",'%04d' % (epoch+1),"cost=","{:.9f}".format(avg_cost))
print("Total cost:"+str(autoencoder.calc_total_cost(X_test)))
实现多层感知机:
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)
sess=tf.InteractiveSession()
in_units=784
h1_units=300
W1=tf.Variable(tf.truncated_normal([in_units,h1_units],stddev=0.1))
b1=tf.Variable(tf.zeros(h1_units))
W2=tf.Variable(tf.zeros([h1_units,10]))
b2=tf.Variable(tf.zeros([10]))
x=tf.placeholder(tf.float32,[None,in_units])
keep_prob=tf.placeholder(tf.float32)
hidden1=tf.nn.relu(tf.matmul(x,W1)+b1)
hidden1_drop=tf.nn.dropout(hidden1,keep_prob)
y=tf.nn.softmax(tf.matmul(hidden1_drop,W2)+b2)
y_=tf.placeholder(tf.float32,[None,10])
cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))
train_step=tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)
tf.global_variables_initializer().run()
for i in range(3000):
batch_xs,batch_ys=mnist.train.next_batch(100)
train_step.run({x:batch_xs,y_:batch_ys,keep_prob:0.75})
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuary=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(accuary.eval({x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0}))
简单版本的卷积网络
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)
sess=tf.InteractiveSession()
def weight_variable(shape):
initial=tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial=tf.constant(0.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
x=tf.placeholder(tf.float32,[None,784])
y_=tf.placeholder(tf.float32,[None,10])
x_image=tf.reshape(x,[-1,28,28,1])
W_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1=max_pool_2x2(h_conv1)
W_conv2=weight_variable([5,5,32,64])
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2=max_pool_2x2(h_conv2)
W_fc1=weight_variable([7*7*64,1024])
b_fc1=bias_variable([1024])
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
keep_prob=tf.placeholder(tf.float32)
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)
W_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv),reduction_indices=[1]))
train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction=tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.global_variables_initializer().run()
for i in range(20000):
batch=mnist.train.next_batch(50)
if i%100==0:
train_accuary=accuracy.eval(feed_dict={ x:batch[0],y_:batch[1],keep_prob:1.0})
print("step %d trainning accuracy %g" % (i,train_accuary))
train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})
print ("test accuracy %g"%accuracy.eval(feed_dict={x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0}))
分布式Estimator
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
def my_model(features,tareget):
target=tf.one_hot(tareget, 3,1,0)
features=layers.stack(features,layers.fully_connected,[10,20,10])
prediction,loss=tf.contrib.learn.models.logistic_regression_zero_init(features,target)
train_op=tf.contrib.layers.optimize_loss(loss,tf.contrib.framework.get_global_step(),optimizer='Adagrad',learning_rate=0.1)
return {'class':tf.argmax(prediction,1), 'prob':prediction}, loss, train_op
from sklearn import datasets, cross_validation
iris=datasets.loadrirs()
x_train, x_test,y_train, y_test=cross_validation.train_test_split(iris.data,iris.target,test_size=0.2,random_state=32)
classifiter=learn.Estimator(mode_fn=my_model)
classifiter.fit(x_train,y_train,steps=700)
predictions=classifiter.predict(x_test)