此项目及使用方法已上传至Github,请移步至https://github.com/choutsugi/handwritten-number-recognizer查看。
1.简述
编程语言:Python3.6
界面设计:PyQt5
识别方法:BP神经网络
训练数据:Mnist训练集
手写数字获取:画图板
2.环境配置
此项目使用python3.6.3版本,请做参考,更高或更低版本可能不适用本项目(无法安装依赖或程序异常退出)。
项目依赖:requirement.txt
numpy==1.12.0
Pillow==4.2.0
PyQt5==5.11.3
PyQt5-sip==4.19.19
PyQt5-stubs==5.11.3.0
sip==4.19.8
3.文件目录
项目文件列表:
文件/文件夹 | 用途 |
---|---|
Mnist文件夹 | 手写数字训练集 |
Resource文件夹 | 界面设计相关图像 |
tmp文件夹 | 图像处理临时文件 |
bp_train.py | 神经网络训练 |
Main.py | 主程序 |
MainWidget.py | 交互界面 |
NN.py | 神经网络定义 |
PaintBoard.py | 画板 |
recognize.py | 识别算法 |
weights.npy | 权重 |
4.主程序
main.py
from MainWidget import MainWidget
from PyQt5.QtWidgets import QApplication
from PyQt5.Qt import *
import sys
def main():
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('./Resource/icon/xmf.ico'))
mainWidget = MainWidget() # 新建一个主界面
mainWidget.show() # 显示主界面
exit(app.exec_()) # 进入消息循环
if __name__ == '__main__':
main()
5.神经网络定义
NN.py
import numpy as np
def LabelBinarizer(label):
relabel=np.zeros([len(label),10],dtype=np.int32)
for i in range(len(label)):
relabel[i][label[i]]=1
return relabel
def tanh(x):
return np.tanh(x)
def tanh_deriv(x):
return 1.0 - np.tanh(x)*np.tanh(x)
def logistic(x):
return 1/(1 + np.exp(-x))
def logistic_derivative(x):
return logistic(x)*(1-logistic(x))
class NeuralNetwork:
def __init__(self, layers, activation='tanh'):
if activation == 'logistic':
self.activation = logistic
self.activation_deriv = logistic_derivative
elif activation == 'tanh':
self.activation = tanh
self.activation_deriv = tanh_deriv
self.weights = []
for i in range(1, len(layers) - 1):
self.weights.append((2*np.random.random((layers[i - 1] + 1, layers[i] + 1))-1)*0.25)
self.weights.append((2*np.random.random((layers[i] + 1, layers[i + 1]))-1)*0.25)
def fit(self, X, y, learning_rate=0.15, epochs=60000):
X = np.atleast_2d(X)
temp = np.ones([X.shape[0], X.shape[1]+1])
temp[:, 0:-1] = X
X = temp
y = np.array(y)
for k in range(epochs):
print(k+1)
i = np.random.randint(X.shape[0])
a = [X[i]]
for l in range(len(self.weights)):
a.append(self.activation(np.dot(a[l], self.weights[l])))
error = y[i] - a[-1]
deltas = [error * self.activation_deriv(a[-1])]
for l in range(len(a) - 2, 0, -1):
deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))
deltas.reverse()
for i in range(len(self.weights)):
layer = np.atleast_2d(a[i])
delta = np.atleast_2d(deltas[i])
self.weights[i] += learning_rate * layer.T.dot(delta)
np.save("weights",self.weights)
def predict(self, x):
self.weights=np.load("weights.npy")
x = np.array(x)
temp = np.ones(x.shape[0]+