训练数据在最下方的网盘链接里,代码格式最好用ipynb的,方便一块一块的运行,当然py格式的也可以运行.
首先创建自定义数据集类ImageDataset继承自torch.utils.data的Dataset
import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
# 定义数据集类
class ImageDataset(Dataset):
def __init__(self, folder_path):
self.dataset = []
self.labels = []
for class_name in os.listdir(folder_path):
class_folder = os.path.join(folder_path, class_name)
if not os.path.isdir(class_folder):
continue
label = int(class_name.split('.')[0])
for image_file in os.listdir(class_folder):
image_path = os.path.join(class_folder, image_file)
image = cv2.imread(image_path)
image = cv2.resize(image, (64, 64))
image = np.transpose(image, (2, 0, 1))
self.dataset.append(image)
self.labels.append(label)
self.dataset = np.array(self.dataset)
self.labels = np.array(self.labels)
self.dataset = torch.tensor(self.dataset, dtype=torch.float32)
self.labels = torch.tensor(self.labels, dtype=torch.long)
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
return self.dataset[index], self.labels[index]
train_dataset = ImageDataset('shuju/train')
test_dataset = ImageDataset('shuju/test')
val_dataset = ImageDataset('shuju/validation')
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
然后要创建卷积神经网络模型
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv2 = nn.Sequential(nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv3 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc1 = nn.Linear(128*4*4, 128)
self.fc2 = nn.Linear(128, 8)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(-1, 128*4*4)
x = self.fc1(x)
x = self.fc2(x)
return x
# 实例化模型并定义优化器和损失函数
model = CNN()
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()
之后就开始训练模型了
def calculate_loss(model, data_loader, criterion):
total_loss = 0.0
model.eval() # 将模型设置为评估模式
with torch.no_grad():
for images, labels in data_loader:
outputs = model(images)
loss = criterion(outputs, labels)
total_loss += loss.item() * images.size(0)
return total_loss / len(data_loader.dataset)
import matplotlib.pyplot as plt
num_epochs = 10
l=3
train_losses = []
test_losses = []
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i + 1) % 50 == 0:
print('Epoch [{}/{}], Batch [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, len(train_loader), loss.item()))
train_loss = calculate_loss(model, train_loader, criterion)
test_loss = calculate_loss(model, test_loader, criterion)
train_losses.append(train_loss)
test_losses.append(test_loss)
if loss.item()<l:
torch.save(model.state_dict(), 'model4.pth')
plt.plot(range(1, num_epochs + 1), train_losses, label='Train Loss')
plt.plot(range(1, num_epochs + 1), test_losses, label='Test Loss')
plt.xlabel('轮数')
plt.ylabel('Loss')
plt.title('Loss')
plt.legend()
plt.show()
等模型训练好之后就可以用训练好的模型来预测验证集数据的分类了,
#梯度不更新
with torch.no_grad():
correct = 0
total = 0
flag=0
for images, labels in val_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
if(flag==0):
print(labels)
flag=1
print(predicted)
print((predicted == labels).sum().item())
correct += (predicted == labels).sum().item()
print('Accuracy of the model on the train images: {} %'.format(100 * correct / total))
也可以保存或者加载模型
torch.save(model.state_dict(), 'model.pth')
# 加载模型
model = CNN()
model.load_state_dict(torch.load('model4.pth'))
model.eval()
也可以一个一个地去测试属于哪个分类
#一个一个测试
image = cv2.imread("./shuju/test/0.bandian/20210715100920794.bmp")
image = cv2.resize(image, (64, 64))
image = np.transpose(image, (2, 0, 1))
image = torch.tensor(image, dtype=torch.float32)
image = image.unsqueeze(0)
class_names = ['bandian', 'famei', 'faya', 'hongpi', 'qipao', 'youwu', 'zhengchang', 'baiban']
# 进行预测
with torch.no_grad():
outputs = model(image)
_, predicted = torch.max(outputs.data, 1)
class_name = class_names[predicted.item()]
print(class_name)
接下来就是用计算机视觉opencv处理一张大图片中的花生米分类了,这一步也是比较核心的.
将花生米按轮廓截取出来,然后用蓝色填充成122*220大小的图片(因为训练时候的数据就是这样的),之后进行推理并画框和文字.
import sys
import cv2
import numpy as np
class_names = ['bandian', 'famei', 'faya', 'hongpi', 'qipao', 'youwu', 'zhengchang', 'baiban']
def ShowImage(name, image):
cv2.imshow(name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
img=cv2.imread('huasheng4.jpg')
# img=cv2.resize(img,(img.shape[1]//3, img.shape[0]//3))
img2=img.copy()
image=img.copy()
hsv_image=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower_color = np.array([90, 50, 50]) # 目标颜色的下限
upper_color = np.array([130, 255, 255]) # 目标颜色的上限
# 创建遮罩层
mask = cv2.inRange(hsv_image, lower_color, upper_color)
# 替换颜色
replacement_color = (0, 0, 0)
image[mask == 255] = replacement_color
img2[mask==255]=(255,0,0)
# 转灰度
image_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#二值化
_, binary_image = cv2.threshold(image_gray, 11, 255, cv2.THRESH_BINARY)
#得到轮廓
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # opencv旧版是返回三个值,新版返回两个值
# contours是一个list类型的点的集合的集合(每个点的集合代表一个轮廓)
# hierarchy 表示图像保存的层级
area_threshold=500
contours2 = []
#去除小的轮廓
for con in contours:
# 计算轮廓面积
area = cv2.contourArea(con)
# 如果轮廓面积大于阈值,则将其添加到新的列表中
if area > area_threshold:
contours2.append(con)
if len(contours2) == 0:
print("没有花生.")
sys.exit()
print(len(contours)-len(contours2))
print(len(contours2))
target_width, target_height = 122, 220
for con in contours2:
x,y,w,h = cv2.boundingRect(con)
i=img2[y:y + h, x:x + w]
height, width, depth = i.shape
padding_width = target_width - width
padding_height = target_height - height
# 计算填充位置
left = padding_width // 2
right = padding_width - left
top = padding_height // 2
bottom = padding_height - top
# 进行填充,并将填充部分设置为蓝色
i = cv2.copyMakeBorder(i, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[255, 0, 0])
i = cv2.resize(i, (64, 64))
i = np.transpose(i, (2, 0, 1))
i = torch.tensor(i, dtype=torch.float32)
i = i.unsqueeze(0)
with torch.no_grad():
outputs = model(i)
_, predicted = torch.max(outputs.data, 1)
class_name = class_names[predicted.item()]
cv2.rectangle(img, (x,y), (x+w,y+h), (0,0,255), 2)
cv2.putText(img,class_name,(x,y),cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 255), 2)
ShowImage('img',img)
下图就是预测出来的结果啦
链接:https://pan.baidu.com/s/1ynZjdeoq3VJ_qvnn6WnDIg?pwd=qwer
提取码:qwer