【无标题】

该文涉及使用TensorFlow和OpenCV处理图像识别任务,包括对MNIST手写数字数据集的卷积神经网络模型训练以及人脸识别。文章展示了模型构建、数据预处理、训练过程及模型评估,同时应用了dropout技术提高模型泛化能力。
摘要由CSDN通过智能技术生成
#
import numpy as np
import tensorflow as tf

# 模型参数
img_row, img_cols = 28, 28
num_class = 10
bath_size = 128
epoch = 10
# 定义卷积层的数量
conv1_filters = 32
conv2_filters = 64
kernel_size = (3, 3)
# 池化层数量
pool_size = (2, 2)
dro_ration = 0.25
# 加载MINIST数据
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()

import cv2
# 人脸识别
# 读取图片
img = cv2.imread("./test3.png")
def face_demo():
    # gaay = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
    gaay = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    face_img = cv2.CascadeClassifier(r'F:\w11_opencv\venv\Lib\site-packages\cv2\data\haarcascade_frontalface_alt.xml')
    eye_img = cv2.CascadeClassifier(r'F:\w11_opencv\venv\Lib\site-packages\cv2\data\haarcascade_eye.xml')
    # image - 包含检测到对象的图片的CV_8U类型矩阵。 objects - 矩形向量,每个矩形包含检测的对象。
    # scaleFactor - 检测过程中每次迭代时图像的压缩率(值必须大于1)。数值越低,检测到目标的机会越大,但也花费更多时间。1.05 ~1.4是比较好的值,比如1.05
    # 表示每次迭代缩小5 %。 minNeighbors - 参数指明每个候选矩形应保留近邻数目的最小值。这个参数将会影响检测到目标的质量。数值越大,检测到的目标越少,但质量越高。3~6是比较好的值。
    # flags - 与函数cvHaarDetectObjects中的旧级联具有相同含义的参数。它不用于新的级联。 minSize - 最小可能对象大小。比这更小的对象会被忽略。eg.(30, 30)
    # maxSize - 最大可能对象大小。比这更大的对象会被忽略。eg.(50, 50)
    face = face_img.detectMultiScale(gaay,scaleFactor=1.1,minNeighbors=5,minSize=(30,30))
    face_eye = eye_img.detectMultiScale(gaay,scaleFactor=1.1,minNeighbors=6,maxSize=(50,50))
    for x,y,w,h in face:
        cv2.rectangle(img,(x,y),(x+w,y+h),color=(0,0,255),thickness=10)
    for x,y,w,h in face_eye:
        cv2.rectangle(img,(x,y),(x+w,y+h),color=(0,0,255),thickness=10)
    cv2.imshow("result", img)
face_demo()
while True:
    if cv2.waitKey(0)==ord('q'):
        break
cv2.destroyAllWindows()
import cv2
import face_recognition
import numpy as np

image = face_recognition.load_image_file('F:/88.jpg')
# 修改图片大小
image = cv2.resize(image, (1000, 800))
# 提取特征数据 默认参数 large:脸部所有特征 small(鼻子,左眼,右眼特征)
face_landmarks_list = face_recognition.face_landmarks(image, face_locations=None, model='large')
print("循环遍历")
for face_landmarks in face_landmarks_list:
    face_s = [
        'chin',
        'left_eyebrow',
        'right_eyebrow',
        'nose_bridge',
        'nose_tip',
        'left_eye',
        'right_eye',
        'top_lip',
        'bottom_lip'
    ]
    #     标记特征
    for landmarks in face_s:
        # 标记数据必须是int类型数据
        points = np.array(face_landmarks[landmarks], np.int32)
        points = points.reshape(-1, 1, 2)
        cv2.polylines(image, [points], False, (0, 0, 200), 2)
image = image[:, :, ::-1]
cv2.imshow('image', image)
cv2.waitKey(0)

import tensorflow as tf

import input_data

tf.__version__

mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)

print('训练数据集', mnist.train.num_examples)  # 55000
print("验证机", mnist.validation.num_examples)  # 5000
print("测试数据", mnist.test.num_examples)  # 10000

print("训练数据集的维度", mnist.train.images.shape)  # 55000,784
print("数据集的标签", mnist.train.labels.shape)  # 55000,10

import matplotlib.pyplot as plt


def plot_images(image):
    plt.imshow(image.reshape(28, 28), cmap='binary')
    plt.show()


plot_images(mnist.train.images[10])  # 如何传递值

print(mnist.train.labels[10])

for i in range(1, 10001):
    index = 0
    for n in mnist.train.labels[i]:
        if n == 1:
            print(str(i) + ":" + str(index))
        index = index + 1



mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 归一化图片数据
x_train = x_train / 255.0
x_test = x_test / 255.0
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam()
# 编译模型 loss
model.compile(optimizer=optimizer,
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
# 评估模型 x_test测试的准确率0-1.y_test预测的评分
model.evaluate(x_test, y_test, verbose=2)




import numpy as np
import tensorflow as tf
from tensorflow.python.keras import models, layers

img_row, img_cols = 28, 28
num_class = 10
batch_size = 128
epochs = 10
# 定义卷积层的数量
conv1_filters = 32
conv2_filters = 64
kernel_size = (3, 3)
# 池化层数量
pool_size = (2, 2)
dro_ration = 0.25
# 加载数据集
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_row, img_cols, 1)
x_train = x_train.astype('float32')
x_train /= 255.0
x_test = x_test.reshape(x_test.shape[0], img_row, img_cols, 1)
x_test = x_test.astype('float32')
x_test /= 255.0
# 导包
from keras.utils import np_utils

# 归一化处理,对标签数据进行处理
y_train = np_utils.to_categorical(y_train, num_class)
y_test = np_utils.to_categorical(y_test, num_class)
# 构建模型
model = models.Sequential()
# 第一层卷积
model.add(layers.Conv2D(conv1_filters, kernel_size, activation='relu'))
# 最大池化
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
# 第2层卷积
model.add(layers.Conv2D(conv1_filters, kernel_size, activation='relu'))
# 最大池化
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
# 第3层卷积
model.add(layers.Conv2D(conv1_filters, kernel_size, activation='relu'))
# 最大池化
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
# 安全连接层
model.add(layers.Flatten(input_shape=(img_row, img_cols)))
model.add(layers.Dense(batch_size, activation='relu'))
model.add(layers.Dropout(dro_ration))
model.add(layers.Dense(10, activation='softmax'))
# 编译模型
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
# 模型训练
model.fit(x_train,
          y_train,
          epochs=epochs,
          batch_size=batch_size,
          verbose=1,
          validation_data=(x_test, y_test))
# 模型评估
score = model.evaluate(x_test, y_test)
print('Test loss', score[0])
print('Test accuracy', score[1])
# 保存模式
model.save_weights('modle_data/model')





tf.keras.models.Sequential(
    [tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'),
     tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10)])

import numpy as np
import tensorflow as tf
from tensorflow.python.keras import models, layers, utils
from keras.utils import np_utils

# 模型参数
img_row, img_cols = 28, 28
num_class = 10
batch_size = 128
epochs = 10
# 定义卷积层的数量
conv1_filters = 32
conv2_filters = 64
kernel_size = (3, 3)
# 池化层数量
pool_size = (2, 2)
dro_ration = 0.25
# 加载MNIST数据集
mnist = tf.keras.datasets.mnist
# 做模型
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_row, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_row, img_cols, 1)
input_shape = (img_row, img_cols, 1)
# 将图像数据转换为float里欸写并进行归一化
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255.0
x_test = x_test / 255.0
# 归一化处理
y_train = np_utils.to_categorical(y_train, num_class)
# y_train = utils.to_categorical(y_train, num_class)
y_test = np_utils.to_categorical(y_test, num_class)
# 构建模型
model = models.Sequential()
# 第一层卷积神经
model.add(layers.Conv2D(conv1_filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
# 第二层卷积神经
model.add(layers.Conv2D(conv2_filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=pool_size))
model.add(layers.Dropout(dro_ration))
# 全连接层
model.add(layers.Flatten(input_shape=(img_row, img_cols)))
model.add(layers.Dense(batch_size, activation='relu'))
model.add(layers.Dropout(dro_ration))
model.add(layers.Dense(10, activation='softmax'))
# 编译模型
# 编译模型,loss
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 模型训练
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, y_test))
# 模型评估
score = model.evaluate(x_test, y_test, verbose=2)
# print('test loss', score[0])
# print('acc', score[1])
# 保存模型
# 2.0以上
model.save_weights('model_data/model')
# 1.9
# saver = tf.train.Saver()
# saver.saver(model, 'model_data/model')

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值