有关神经网络(NeuralNetwork)的理论介绍,将在后续章节中进行整理,为了对神经网络有一个直观的理解,本篇博文使用神经网络实现手写数字识别项目,使用 logistic 函数作为激活函数,代码如下,注释比较清楚,不再赘述:
#!/bin/python
#coding=utf-8
# 实现神经网络
import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from sklearn.cross_validation import train_test_split
#tanh函数
def tanh(x):
return np.tanh(x)
#tanh函数的导数
def tanh_deriv(x):
return 1.0 - np.tanh(x)*np.tanh(x)
#逻辑函数
def logistic(x):
return 1/(1 + np.exp(-x))
#逻辑函数的导数
def logistic_derivative(x):
return logistic(x)*(1-logistic(x))
#定义神经网络,使用面向对象的特性
class NeuralNetwork:
#__init__是构造函数,self是指向当前类的指针,相当于java的this指针
def __init__(self, layers, activation='tanh'):
"""
:param layers: A list containing the number of units in each layer.
Should be at least two values
:param activation: The activation function to be used. Can be
"logistic" or "tanh"
"""
if activation == 'logistic':
self.activation = logistic
self.activation_deriv = logistic_derivative
elif activation == 'tanh':
self.activation = tanh
self.activation_deriv = tanh_deriv
self.weights = []
for i in range(1, len(layers) - 1):
#layers[i]当前层,layers[i - 1]前一层,layers[i + 1]后一层
self.weights.append((2*np.random.random((layers[i - 1] + 1, layers[i] + 1))-1)*0.25)
self.weights.append((2*np.random.random((layers[i] + 1, layers[i + 1]))-1)*0.25)
#随机抽取一行数据进行一次迭代
def fit(self, X, y, learning_rate=0.2, epochs=10000):
X = np.atleast_2d(X)
#初始化矩阵,值都为1,行数与x一样,列数再加1
temp = np.ones([X.shape[0], X.shape[1]+1])
#第一列到倒数第一列都为x
temp[:, 0:-1] = X # adding the bias unit to the input layer
X = temp
#将list转换成np.array
y = np.array(y)
for k in range(epochs):
i = np.random.randint(X.shape[0])
#a是随机抽取的一个样本
a = [X[i]]
for l in range(len(self.weights)): #going forward network, for each layer
#Computer the node value for each layer (O_i) using activation function
a.append(self.activation(np.dot(a[l], self.weights[l])))
error = y[i] - a[-1] #Computer the error at the top layer
deltas = [error * self.activation_deriv(a[-1])] #For output layer, Err calculation (delta is updated error)
#Staring backprobagation
#从第len(a) - 2层开始,到第0层,每次-1
for l in range(len(a) - 2, 0, -1): # we need to begin at the second to last layer
#Compute the updated error (i,e, deltas) for each node going from top layer to input layer
deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))
deltas.reverse()
for i in range(len(self.weights)):
layer = np.atleast_2d(a[i])
delta = np.atleast_2d(deltas[i])
self.weights[i] += learning_rate * layer.T.dot(delta)
def predict(self, x):
x = np.array(x)
temp = np.ones(x.shape[0]+1)
temp[0:-1] = x
a = temp
for l in range(0, len(self.weights)):
a = self.activation(np.dot(a, self.weights[l]))
return a
#handwrite number identify
#下载数据
digits = load_digits()
X = digits.data
y = digits.target
#预处理,将x归一化到0和1之间
X -= X.min() # normalize the values to bring them into the range 0-1
X /= X.max()
#层数的设定,8*8的图像为64个输入,0-9个数字共10类输出为10,隐藏层有灵活性,一般比输入层多
nn2 = NeuralNetwork([64, 100, 10], 'logistic')
X_train, X_test, y_train, y_test = train_test_split(X, y)
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
#print "start fitting"
nn2.fit(X_train, labels_train, epochs=3000)
predictions = []
for i in range(X_test.shape[0]):
o = nn2.predict(X_test[i])
predictions.append(np.argmax(o))
print 'confusion_matrix...'
print confusion_matrix(y_test, predictions)
print 'classification_report...'
print classification_report(y_test, predictions)
输出结果如下:
confusion_matrix...
[[48 0 0 0 0 0 0 0 0 0]
[ 0 39 0 0 0 1 0 0 2 3]
[ 0 0 43 0 0 0 0 0 0 0]
[ 0 0 1 45 0 0 0 1 2 1]
[ 0 0 0 0 38 0 0 0 2 1]
[ 0 0 0 0 1 45 1 0 0 2]
[ 0 1 0 0 0 0 39 0 0 0]
[ 0 0 0 0 0 0 0 45 3 0]
[ 0 3 0 0 0 0 0 0 28 2]
[ 0 0 0 0 2 1 0 0 2 48]]
classification_report...
precision recall f1-score support
0 1.00 1.00 1.00 48
1 0.91 0.87 0.89 45
2 0.98 1.00 0.99 43
3 1.00 0.90 0.95 50
4 0.93 0.93 0.93 41
5 0.96 0.92 0.94 49
6 0.97 0.97 0.97 40
7 0.98 0.94 0.96 48
8 0.72 0.85 0.78 33
9 0.84 0.91 0.87 53
avg / total 0.93 0.93 0.93 450