图片一共62个文件夹,0-9,A-Z,a-z每个里面55张样本图片
链接:https://pan.baidu.com/s/18vteIfuOf1XLdg9ZI4_1ZA
提取码:gt3d
因为电脑不行,所以没办法进行全部字母样本数据的训练,只从中截取了一部分数据进行训练识别。这是我自己截的一点a-z的训练集下面代码里用到的
链接:https://pan.baidu.com/s/14RhEYplLDqAnVKsgqnU-_g
提取码:1r82
import os
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
import numpy as np
import tensorflow.python.keras as keras
from tensorflow.python.keras import layers
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
批量读取图片,每张图片大小都是(1200,900)的为了减小缓存读取时读成(28,28)的了。
dir1 = 'Img'
sub_dir_and_files = os.listdir(dir1)
sub_dirs = []
#一共26个文件夹,a-z
for x in sub_dir_and_files:
if os.path.isdir(dir1+'/'+x):
sub_dirs.append(x)
print(sub_dirs)
#每个文件夹40个训练样本一共1040个
#N总图片数量
N = 0
#遍历每个文件夹a-z
for subdir in sub_dirs:
N += len(os.listdir(dir1+'/'+subdir))
print(N)
#X放所有图片
X = []
#y所有图片的真实内容标签a-z
y = ['']*N
i = 0
#遍历每个文件夹读取图片放入X
for subdir in sub_dirs:
image_files = os.listdir(dir1+'/'+subdir)
#print(image_files)
for image in image_files:
filename = dir1+'/'+subdir+'/'+image
#图片灰度化
img = cv2.imread(filename,cv2.IMREAD_GRAYSCALE)
#缩小图片由(1200,900)到(28,28)
img = cv2.resize(img,(28,28),)
X.append(img)
#label用数字0-25代替字符串a-z
y[i] = ord(subdir)-97
i +=1
#X,y格式list->numpy.ndarray
X = np.array(X)
y = np.array(y)
#划分训练集和测试集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=10)
# 归一化
X_train = X_train / 255.0
X_test = X_test / 255.0
X_train = X_train.reshape((728, 28, 28, 1))
X_test = X_test.reshape((312, 28, 28, 1))
#将类别向量转换为二进制(只有0和1)的矩阵类型表示(将原有的类别向量转换为独热编码的形式)
y_trainOnehot = to_categorical(y_train)
y_testOnehot = to_categorical(y_test)
# 建立模型
model = Sequential()
# 卷积层
model.add(
Conv2D(
filters=256,
kernel_size=(5, 5),
padding='same', # 保证卷积核大小,不够补零
input_shape=(28, 28, 1),
activation='relu'))
# 池化层
model.add(MaxPool2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
#卷积层
model.add(
Conv2D(filters=256, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(
Conv2D(filters=256, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
# 扁平层
model.add(Flatten())
# 全连接层激活函数relu
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.25))
# 全连接层激活函数softmax
model.add(Dense(26, activation='softmax'))
#输出模型各层的参数状况
model.summary()
# 训练模型
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(X_train, y_trainOnehot, epochs=20)
# 返回损失和精度
res = model.evaluate(X_test, y_testOnehot)
print(model.metrics_names)
print(res)
# 随机选取图片测试
i = np.random.randint(0, len(X_test))
print(i)
img_random = X_test[i]
plt.imshow(img_random)
plt.show()
# 模型预测
img_random = (np.expand_dims(img_random, 0))
prob = model.predict(img_random)
print("预测值:",chr(np.argmax(prob)+97))
print("真实值:",chr(y_test[i]+97))