首先介绍一下数据集:
Hand Gesture Recognition Database(手势识别数据库)是一个用于手势识别研究的公开数据集。该数据集由美国佛罗里达州立大学的Mohammad Mahoor教授及其团队创建,旨在为研究人员提供可用于训练和测试机器学习算法的手势图像。
该数据集包含约9000个手部姿势图像,这些图像来自24位参与者,使用两只手进行了拍摄。这些手势图像涵盖20种不同的手势,在不同的背景下进行了拍摄,包括室内和室外环境。
该数据集还提供了详细的注释信息,包括每个手势的类别标签、手势图像的文件名以及手势图像的时间戳。
此外,该数据集还提供了基于SIFT(尺度不变特征变换)算法的手势描述符。这些描述符可以用于比较不同手势之间的相似性,并且可以帮助研究人员分析手势识别算法的性能。
下载链接:https://www.kaggle.com/datasets/gti-upm/leapgestrecog
开发环境:TensorFlow_gpu-2.4.0,numpy==1.19.2(TensorFlow版本和numpy的版本也是有些对应关系在的) Flask Pillow python3.8
基本的CUDA 和CUDNN我就不再多说,很多大佬的相关教程可以自行搜索
数据清洗我也就不多过多阐述,若实在有需要可以联系我
直接贴代码(划分训练集)
import os
import random
import shutil
from shutil import copy2
def data_set_split(src_data_folder, target_data_folder, train_scale=0.5, val_scale=0.2, test_scale=0.3):
'''
读取源数据文件夹,生成划分好的文件夹,分为trian、val、test三个文件夹进行
:param src_data_folder: 源文件夹 E:/biye/gogogo/note_book/torch_note/data/utils_test/data_split/src_data
:param target_data_folder: 目标文件夹 E:/biye/gogogo/note_book/torch_note/data/utils_test/data_split/target_data
:param train_scale: 训练集比例
:param val_scale: 验证集比例
:param test_scale: 测试集比例
:return:
'''
print("开始数据集划分")
class_names = os.listdir(src_data_folder)
# 在目标目录下创建文件夹
split_names = ['train', 'val', 'test']
for split_name in split_names:
split_path = os.path.join(target_data_folder, split_name)
if os.path.isdir(split_path):
pass
else:
os.mkdir(split_path)
# 然后在split_path的目录下创建类别文件夹
for class_name in class_names:
class_split_path = os.path.join(split_path, class_name)
if os.path.isdir(class_split_path):
pass
else:
os.mkdir(class_split_path)
# 按照比例划分数据集,并进行数据图片的复制
# 首先进行分类遍历
for class_name in class_names:
current_class_data_path = os.path.join(src_data_folder, class_name)
current_all_data = os.listdir(current_class_data_path)
current_data_length = len(current_all_data)
current_data_index_list = list(range(current_data_length))
random.shuffle(current_data_index_list)
train_folder = os.path.join(os.path.join(target_data_folder, 'train'), class_name)
val_folder = os.path.join(os.path.join(target_data_folder, 'val'), class_name)
test_folder = os.path.join(os.path.join(target_data_folder, 'test'), class_name)
train_stop_flag = current_data_length * train_scale
val_stop_flag = current_data_length * (train_scale + val_scale)
current_idx = 0
train_num = 0
val_num = 0
test_num = 0
for i in current_data_index_list:
src_img_path = os.path.join(current_class_data_path, current_all_data[i])
if current_idx <= train_stop_flag:
copy2(src_img_path, train_folder)
# print("{}复制到了{}".format(src_img_path, train_folder))
train_num = train_num + 1
elif (current_idx > train_stop_flag) and (current_idx <= val_stop_flag):
copy2(src_img_path, val_folder)
# print("{}复制到了{}".format(src_img_path, val_folder))
val_num = val_num + 1
else:
copy2(src_img_path, test_folder)
# print("{}复制到了{}".format(src_img_path, test_folder))
test_num = test_num + 1
current_idx = current_idx + 1
print("*********************************{}*************************************".format(class_name))
print(
"{}类按照{}:{}:{}的比例划分完成,一共{}张图片".format(class_name, train_scale, val_scale, test_scale, current_data_length))
print("训练集{}:{}张".format(train_folder, train_num))
print("验证集{}:{}张".format(val_folder, val_num))
print("测试集{}:{}张".format(test_folder, test_num))
if __name__ == '__main__':
src_data_folder = r"D:\2023\cvsx\date\new_date"
target_data_folder = r"D:\2023\cvsx\date\new_dates"
data_set_split(src_data_folder, target_data_folder)
训练模型:
import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
import os
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
# 设置路径
train_data_path = "D:/2023/cvsx/date/new_dates/train"
test_data_path = "D:/2023/cvsx/date/new_dates/test"
val_data_path = "D:/2023/cvsx/date/new_dates/val"
# 设置参数
img_size = (64, 64)
num_classes = 10
batch_size = 53
epochs = 25
# 加载数据集
train_data = tf.keras.preprocessing.image_dataset_from_directory(
train_data_path,
image_size=img_size,
batch_size=batch_size)
test_data = tf.keras.preprocessing.image_dataset_from_directory(
test_data_path,
image_size=img_size,
batch_size=batch_size)
val_data = tf.keras.preprocessing.image_dataset_from_directory(
val_data_path,
image_size=img_size,
batch_size=batch_size)
# 构建模型
model = models.Sequential([
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(img_size[0], img_size[1], 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(num_classes)
])
# 编译模型
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# 训练模型
history = model.fit(train_data, epochs=epochs, validation_data=val_data)
# 评估模型
test_loss, test_acc = model.evaluate(test_data, verbose=2)
print('\nTest Accuracy:', test_acc)
# 保存模型
model.save('hand_gesture_model.h5')