Theano入门摘要

一个简单的逻辑回归小程序

from theano import function
import theano.tensor as T
import numpy as np 
import matplotlib.pyplot as plt 
class Layer(object):
	def __init__(self,inputs,in_size,out_size,activation_function=None):
		self.W=shared(np.random.normal(0,1,(in_size,out_size)))
		self.b=shared(np.zeros((out_size,))+0.1)
		self.Wx_plus_b=T.dot(inputs,self.W)+self.b
		self.activation_function=activation_function
		if activation_function is None:
			self.outputs=self.Wx_plus_b
		else:
			self.outputs=self.activation_function(self.Wx_plus_b)

# make up some fake data
x_data=np.linspace(-1,1,300)[:,np.newaxis]
# print np.linspace(-1,1,300) #[:,newaxis]
noise=np.random.normal(0,0.05,x_data.shape)
y_data=np.square(x_data)-0.5 +noise

#show the fake data
# plt.scatter(x_data,y_data)
# plt.show()

# determine the inputs dtype
x = T.dmatrix('x')
y = T.dmatrix('y')

#add layers
l1=Layer(x,1,10,T.nnet.relu)
l2=Layer(l1.outputs,10,1,None)

# compute the cost
cost = T.mean(T.square(l2.outputs-y))

# compute the gradients
gW1,gb1,gW2,gb2=T.grad(cost,[l1.W,l1.b,l2.W,l2.b])

# apply gradient descent
learning_rate = 0.05
train = function(inputs=[x,y],outputs=cost,updates=[(l1.W,l1.W-learning_rate*gW1),(l1.b,l1.b-learning_rate*gb1),(l2.W,l2.W-learning_rate*gW2),(l2.b,l2.b-learning_rate*gb2)])

# predict
predict=function(inputs=[x],outputs=l2.outputs)

# plot the fake data
fig=plt.figure()
ax=fig.add_subplot(111)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()



for i in range(1000):
	err=train(x_data,y_data)
	if i%50==0:
		# to  visualize the result and improvement
		try:
			ax.lines.remove(lines[0])
		except Exception:
			pass
		predict_value=predict(x_data)
		# # plot the prediction
		lines=ax.plot(x_data,predict_value,'r-',lw=5)
		plt.pause(1)
		# print err

效果图


classification

import numpy as np 
import theano,pickle
import theano.tensor as T 

def compute_accuracy(y_target,y_predict):
	correct_prediction=np.equal(y_predict,y_target)
	sum_correct_prediction=np.sum(correct_prediction)
	accuracy=1.0*sum_correct_prediction/len(correct_prediction)
	return accuracy

rng=np.random

N=400
feats=784

D=(rng.randn(N,feats),rng.randint(size=N,low=0,high=2))

x=T.dmatrix('x')
y=T.dvector('y')

W=theano.shared(rng.randn(feats),name='W')
b=theano.shared(0.1,name='b')

p_1=T.nnet.sigmoid(T.dot(x,W)+b)
prediction=p_1>0.5
xent=-y*np.log(p_1)-(1-y)*np.log(1-p_1)	#cost
cost=xent.mean()+0.01*(W**2).sum()
gW,gb=T.grad(cost,[W,b])

learning_rate=0.1
train = theano.function(
	inputs=[x,y],
	outputs=[prediction,xent.mean()],
	updates=((W,W-learning_rate*gW),
		(b,b-learning_rate*gb))
	)

predict=theano.function(inputs=[x],outputs=prediction)
# training
for i in range(500):
	pred,err=train(D[0],D[1])

	if i%50==0:
		print 'cost: ',cost 
		print 'accuracy: ',compute_accuracy(D[1],predict(D[0]))

	# print 'target values for D:',D[1]
	# print 'prediction on D:',predict(D[0])

Regularization L1 and L2

import theano,pickle
import theano.tensor as T
from sklearn.datasets import load_boston
import numpy as np 
import matplotlib.pyplot as plt 

# theano.config.compute_test_value = 'warn'

class Layer(object):
	def __init__(self,inputs,in_size,out_size,activation_function=None):
		self.W=theano.shared(np.random.normal(0,1,(in_size,out_size)))
		self.b=theano.shared(np.zeros((out_size,))+0.1)
		self.Wx_plus_b=T.dot(inputs,self.W)+self.b
		self.activation_function=activation_function
		if activation_function is None:
			self.outputs=self.Wx_plus_b
		else:
			self.outputs=self.activation_function(self.Wx_plus_b)

def minmax_normalization(data):
	xs_max=np.max(data,axis=0)
	xs_min=np.min(data,axis=0)
	xs = (1-0)*(data-xs_min)/(xs_max-xs_min)+0
	return xs

np.random.seed(100)
x_data=load_boston().data

x_data=minmax_normalization(x_data)
y_data=load_boston().target[:,np.newaxis]

print x_data.shape

x_train,y_train=x_data[:400],y_data[:400]
x_test,y_test=x_data[400:],y_data[400:]


x=T.dmatrix('x')
y=T.dmatrix('y')

l1=Layer(x,13,50,T.tanh)
l2=Layer(l1.outputs,50,1,None)

# cost = T.mean(T.square(l2.outputs-y))
# cost1=T.mean(T.square(l2.outputs-y))+0.1*((l1.W**2).sum()+(l2.W**2).sum())
cost=T.mean(T.square(l2.outputs-y))+0.1*(np.abs(l1.W).sum()+np.abs(l2.W).sum())
gW1,gb1,gW2,gb2=T.grad(cost,[l1.W,l1.b,l2.W,l2.b])

learning_rate=0.01

train=theano.function(
	inputs=[x,y],
	updates=[(l1.W,l1.W-learning_rate*gW1),
	(l1.b,l1.b-learning_rate*gb1),
	(l2.W,l2.W-learning_rate*gW2),
	(l2.b,l2.b-learning_rate*gb2)]
	)

compute_cost=theano.function(inputs=[x,y],outputs=cost)

train_err_list=[]
test_err_list=[]
learning_time=[]

for i in range(1000):
	train(x_train,y_train)
	if i%10==0:
		train_err_list.append(compute_cost(x_train,y_train))
		test_err_list.append(compute_cost(x_test,y_test))
		learning_time.append(i)


# save model

plt.plot(learning_time,train_err_list,'r-')
plt.plot(learning_time,test_err_list,'b--')
plt.show()


摘取于莫寒Theano入门视频

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值