人工智能菏泽鄄城县

import torch
from torchvision import transforms,datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch import nn
import torch.optim as optim
#
transform=transforms.Compose([transforms.Resize((32,32)),
                              transforms.ToTensor(),
                              transforms.Normalize(0.5,0.5) #transforms.Normalize((0.5,), (0.5,))  # 归一化到 [-1, 1]
                              ])# transforms.Normalize(0.1307,0.3081)

num_workers=2
device=("cuda:0" if torch.cuda.is_available() else "cpu")
epochs=20
# 定义变量保存最佳模型
best_model = None
best_params = {}
# # 定义调参网格
# param_grid = {
#     'learning_rate': [0.1, 0.01, 0.001],
#     'batch_size': [32, 64, 128],
#     'optimizer': ['SGD', 'Adam', 'RMSProp']
# }
# 定义调参网格
param_grid = {
    'learning_rate': [0.01],
    'batch_size': [128],
    'optimizer': ['Adam']
}
# 加载本地自有的数据集
train_data=datasets.ImageFolder(root='D:/prj/prjend/mnist_images/mnist_images/train',transform=transform)
test_data=datasets.ImageFolder(root='D:/prj/prjend/mnist_images/mnist_images/test',transform=transform)
# 定义 LeNet 网络结构
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=2)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.fc1 = nn.Linear(16 * 6 * 6, 120)  # 更新为正确的输入大小
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = x.view(x.size(0), -1)  # 动态展平
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# 训练函数
def train_model(model, train_loader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return running_loss / len(train_loader), accuracy
# 测试函数
def test_model(model, test_loader, criterion, device):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)

            running_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return running_loss / len(test_loader), accuracy
def main():
    best_acc=0
    # 嵌套循环调参
    for lr in param_grid['learning_rate']:
        for batch_size in param_grid['batch_size']:
            for opt_name in param_grid['optimizer']:
                print(f"Training with LR: {lr}, Batch Size: {batch_size}, Optimizer: {opt_name}")
                # 数据加载器
                train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
                test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
                # 初始化 LeNet 模型
                model = LeNet().to(device)
                # 定义优化器
                # if opt_name == 'SGD':
                #     optimizer = optim.SGD(model.parameters(), lr=lr)
                # elif opt_name == 'Adam':
                #     optimizer = optim.Adam(model.parameters(), lr=lr)
                # elif opt_name == 'RMSProp':
                #     optimizer = optim.RMSprop(model.parameters(), lr=lr)
                optimizer = optim.Adam(model.parameters(), lr=lr)
                # 损失函数
                criterion = nn.CrossEntropyLoss()

                # 训练模型
                for epoch in range(epochs):
                    train_loss, train_acc = train_model(model, train_loader, criterion, optimizer, device)
                    test_loss, test_acc = test_model(model, test_loader, criterion, device)
                    print(
                        f"Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%")

                # 保存最佳模型
                if test_acc > best_acc:
                    best_acc = test_acc
                    best_model = model
                    best_params = {'learning_rate': lr, 'batch_size': batch_size, 'optimizer': opt_name}
                    torch.save(best_model.state_dict(), 'best_lenet_model_new2.pth')
                    print(f"New Best Model Saved with Accuracy: {best_acc:.2f}% and Params: {best_params}")

if __name__ == '__main__':
    main()
C完
B2
import torch
from torchvision import transforms,datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch import nn
import torch.optim as optim
#
transform=transforms.Compose([transforms.Resize((32,32)),
                              transforms.ToTensor(),
                              transforms.Normalize(0.1307,0.3081)
                              ])
#
batch_size=64
num_workers=2
device=("cuda:0" if torch.cuda.is_available() else "cpu")
epochs=10
best_acc=0 #
# 加载本地自有的数据集
train_data=datasets.ImageFolder(root='D:/prj/prjend/mnist_images/mnist_images/train',transform=transform)
test_data=datasets.ImageFolder(root='D:/prj/prjend/mnist_images/mnist_images/test',transform=transform)
# 创建数据加载器
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
# 定义 LeNet 网络结构
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=2)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.fc1 = nn.Linear(16 * 6 * 6, 120)  # 更新为正确的输入大小
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = x.view(x.size(0), -1)  # 动态展平
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x
# 初始化模型、优化器和损失函数
model = LeNet().to(device)
# 使用Adam优化器
optimizer = optim.Adam(model.parameters())
# 损失函数
criterion = nn.CrossEntropyLoss()

# 训练函数
def train_model(model, train_loader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return running_loss / len(train_loader), accuracy
# 测试函数
def test_model(model, test_loader, criterion, device):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)

            running_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return running_loss / len(test_loader), accuracy

def main():
    # global best_acc
    for epoch in range(epochs):
        train_loss,train_acc =train_model(model, train_loader, criterion, optimizer, device)
        test_loss,test_acc = test_model(model, test_loader, criterion, device)
        print(f"Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%")
  
if __name__ == '__main__':
    main()

torch.save(model.state_dict(), "best_model_new.pth")
B1开始
import torch
import random
from PIL import Image
from torchvision import transforms

# 设置原始图像路径和保存路径
images_path = "D:/prjtest/资源文件/task1/datasets/test/5/gesture_2055.jpg"
output_dir = "./augmentation/" #在当前路径下建好augmentation文件件
# 打开图像
img = Image.open(images_path)
# 保存原图
img.save(output_dir + "origin.png")
# 调整尺寸
img_resize = transforms.Resize((150, 150))(img)
img_resize.save(output_dir + "resize.png")
# 水平翻转
img_flip = transforms.RandomHorizontalFlip(p=1.0)(img)  # 确保一定翻转
img_flip.save(output_dir + "flip.png")

# 随机裁剪
img_randomcrop = transforms.RandomCrop((50, 50))(img)
img_randomcrop.save(output_dir + "random_crop.png")
# 添加更多扩增方法
# 随机旋转
img_rotate = transforms.RandomRotation(degrees=45)(img)
img_rotate.save(output_dir + "rotate.png")
# 色彩抖动
img_color_jitter = transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1)(img)
img_color_jitter.save(output_dir + "color_jitter.png")
# 随机仿射变换
img_affine = transforms.RandomAffine(degrees=30, translate=(0.1, 0.1), scale=(0.8, 1.2), shear=10)(img)
img_affine.save(output_dir + "affine.png")
# 垂直翻转
img_vertical_flip = transforms.RandomVerticalFlip(p=1.0)(img)
img_vertical_flip.save(output_dir + "vertical_flip.png")
# 灰度化
img_grayscale = transforms.Grayscale(num_output_channels=3)(img)
img_grayscale.save(output_dir + "grayscale.png")
# 中心裁剪
img_center_crop = transforms.CenterCrop((100, 100))(img)
img_center_crop.save(output_dir + "center_crop.png")
# 高斯模糊
img_gaussian_blur = transforms.GaussianBlur(kernel_size=(5, 5), sigma=(0.1, 2.0))(img)
img_gaussian_blur.save(output_dir + "gaussian_blur.png")
print("图像扩增完成,已保存至指定文件夹。")

b21
import os

import torch
from torchvision import transforms,datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch import nn
import torch.optim as optim
#
transform=transforms.Compose([transforms.Resize((32,32)),
                              transforms.ToTensor(),
                              transforms.Normalize(0.1307,0.3081)
                              ])
#
batch_size=128
num_workers=2
device=("cuda:0" if torch.cuda.is_available() else "cpu")
epochs=5
best_acc=0 #
# ##########################################################################################
# train_data=datasets.ImageFolder(root='D:/prjtest/prj1/mnist_images/MNIST/raw',train=True,download=True,transform=transform)
# test_data=datasets.ImageFolder(root='D:/prjtest/prj1/mnist_images/MNIST/raw',train=False,download=True,transform=transform)
#
# train_loader=DataLoader(train_data,batch_size=batch_size,shuffle=True,num_workers=num_workers)
# test_loader=DataLoader(test_data,batch_size=batch_size,shuffle=True,num_workers=num_workers)

# data_dir = 'D:/prj/prjend/mnist_images/mnist_images'
# train_data=datasets.ImageFolder(root=f'{data_dir}/train',transform=transform)
# test_data=datasets.ImageFolder(root=f'{data_dir}/test',transform=transform)
#
# # 定义 LeNet 网络结构
# class LeNet(nn.Module):
#     def __init__(self):
#         super(LeNet, self).__init__()
#         self.conv1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
#         self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
#         self.fc1 = nn.Linear(16 * 5 * 5, 120)
#         self.fc2 = nn.Linear(120, 84)
#         self.fc3 = nn.Linear(84, 10)
#         self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
#
#     def forward(self, x):
#         x = self.pool(F.relu(self.conv1(x)))
#         x = self.pool(F.relu(self.conv2(x)))
#         x = x.view(-1, 16 * 5 * 5)  # 展平
#         x = F.relu(self.fc1(x))
#         x = F.relu(self.fc2(x))
#         x = self.fc3(x)  # 输出 logits
#         return x
# # 训练模型
# def train_model(epoch):
#     model.train() #将模型设置为训练模式
#     for batch_index,(X,y) in enumerate(train_loader): #batch_index是当前批次的索引,X是输入数据,y是对应的标签
#         X,y = X.to(device),y.to(device)             #将数据和标签移动到指定的设备(如CPU或GPU)上
#         pre_y = model(X)                            #第一次向前转播(通过模型model对输入数据X进行预测,得到预测结果pre_y)
#         # loss = F.cross_entropy(pre_y,y)           #计算损失函数(计算预测结果y_hat和真实标签y之间的交叉熵损失)
#         loss = nn.CrossEntropyLoss(pre_y, y)        #计算损失函数(计算预测结果y_hat和真实标签y之间的交叉熵损失)
#         optimizer.zero_grad()                       #清空梯度(清除,归零)
#         loss.backward()                             #第一次反向传播
#         optimizer.step()                            #优化内部参数
#         if batch_index % 20 == 0:
#             print("Train Epoch : {} \t Loss : {:.6f}".format(epoch, loss.item())) #打印当前训练轮次epoch和当前批次的损失值loss.item()

# # 测试模型 定义测试函数并保存最佳模型
# def test_model():
#     model.eval()
#     correct = 0.0
#     test_loss = 0.0
#
#     with torch.no_grad():
#         for X,y in test_loader:
#             X,y = X.to(device),y.to(device)
#             pre_y = model(X)
#             test_loss += F.cross_entropy(pre_y,y).item()
#             pred = pre_y.argmax(dim=1)
#             correct += pred.eq(y.view_as(pred)).sum().item()
#         test_loss /= len(test_loader.dataset)
#         print("Test —— Average loss : {:.4f}, Accuracy : {:.3f}\n".format(test_loss,
#                                                                       100.0 * correct / len(test_loader.dataset)))
# #
# ##########################################################################################
# 数据集路径
data_dir = 'D:/prj/prjend/MNIST'

# 检查本地数据是否存在
if not os.path.exists(data_dir):
    raise FileNotFoundError(f"数据集路径 {data_dir} 不存在,请确保数据已下载并放置在该目录下。")
# 加载本地数据集
train_data = datasets.MNIST(root=data_dir, train=True, transform=transform, download=False)
test_data = datasets.MNIST(root=data_dir, train=False, transform=transform, download=False)
# 创建数据加载器
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
# 定义 LeNet 网络结构
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=2)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.fc1 = nn.Linear(16 * 6 * 6, 120)  # 更新为正确的输入大小
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = x.view(x.size(0), -1)  # 动态展平
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x
# 初始化模型、优化器和损失函数
model = LeNet().to(device)
# 使用Adam优化器
optimizer = optim.Adam(model.parameters())
# 损失函数
criterion = nn.CrossEntropyLoss()

# 训练函数
def train_model(model, train_loader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return running_loss / len(train_loader), accuracy
# 测试函数
def test_model(model, test_loader, criterion, device):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)

            running_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return running_loss / len(test_loader), accuracy

def main():
    global best_acc
    for epoch in range(epochs):
        train_loss,train_acc =train_model(model, train_loader, criterion, optimizer, device)
        test_loss,test_acc = test_model(model, test_loader, criterion, device)
        print(f"Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%")

    # # 保存最佳模型
    # if test_acc > best_acc:
    #     best_acc = test_acc
    #     best_model = model
    #     best_params = {'learning_rate': lr, 'batch_size': batch_size, 'optimizer': opt_name}
    #     torch.save(model.state_dict(), 'best_lenet_model.pth')
    #     print(f"New Best Model Saved with Accuracy: {best_acc:.2f}% and Params: {best_params}")

if __name__ == '__main__':
    main()


torch.save(model.state_dict(), "best_model.pth")
# model.load_state_dict(torch.load("best_model.pth"))
# model.eval()
# ---------------------------------------------

# train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers)
# test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=num_workers)
#
# # 定义 LeNet 网络结构
# class LeNet(nn.Module):
#     def __init__(self):
#         super(LeNet, self).__init__()
#         self.conv1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
#         self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
#         self.fc1 = nn.Linear(16 * 5 * 5, 120)
#         self.fc2 = nn.Linear(120, 84)
#         self.fc3 = nn.Linear(84, 10)
#         self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
#
#     def forward(self, x):
#         x = self.pool(F.relu(self.conv1(x)))
#         x = self.pool(F.relu(self.conv2(x)))
#         x = x.view(-1, 16 * 5 * 5)  # 展平
#         x = F.relu(self.fc1(x))
#         x = F.relu(self.fc2(x))
#         x = self.fc3(x)  # 输出 logits
#         return x
#
# # 初始化模型、优化器和损失函数
# model = LeNet().to(device)
# optimizer = optim.Adam(model.parameters())
# criterion = nn.CrossEntropyLoss()
#
# # 定义训练函数
# def train_model(epoch):
#     model.train()
#     total_loss = 0
#     for batch_idx, (X, y) in enumerate(train_loader):
#         X, y = X.to(device), y.to(device)
#         # optimizer.zero_grad()
#         y_hat = model(X)
#         loss = criterion(y_hat, y)
#         optimizer.zero_grad()
#         loss.backward()
#         optimizer.step()
#         total_loss += loss.item()
#         if batch_idx % 100 == 0:
#             print(f"Train Epoch: {epoch} [{batch_idx * len(X)}/{len(train_loader.dataset)}] Loss: {loss.item():.6f}")
#     print(f"Epoch {epoch} Training Loss: {total_loss / len(train_loader):.6f}")
#
# # 定义测试函数并保存最佳模型
# def test_model():
#     global best_accuracy
#     model.eval()
#     test_loss = 0
#     correct = 0
#     with torch.no_grad():
#         for X, y in test_loader:
#             X, y = X.to(device), y.to(device)
#             y_hat = model(X)
#             test_loss += criterion(y_hat, y).item()
#             pred = y_hat.argmax(dim=1)
#             correct += pred.eq(y).sum().item()
#     test_loss /= len(test_loader)
#     accuracy = 100.0 * correct / len(test_loader.dataset)
#     print(f"Test Loss: {test_loss:.6f}, Accuracy: {accuracy:.2f}%")
#
#     # 保存最佳模型
#     if accuracy > best_accuracy:
#         best_accuracy = accuracy
#         torch.save(model.state_dict(), "best_lenet_model.pth")
#         print(f"Saved Best Model with Accuracy: {accuracy:.2f}%")
#
# # 主函数
# def main():
#     for epoch in range(1, epochs + 1):
#         train_model(epoch)
#         test_model()
#     print("Training complete. Best model saved as 'best_lenet_model.pth'.")
#
#
# # 加载模型并测试新数据
# def load_and_test_model():
#     model = LeNet().to(device)
#     model.load_state_dict(torch.load("best_lenet_model.pth"))
#     model.eval()
#     correct = 0
#     with torch.no_grad():
#         for X, y in test_loader:
#             X, y = X.to(device), y.to(device)
#             y_hat = model(X)
#             pred = y_hat.argmax(dim=1)
#             correct += pred.eq(y).sum().item()
#     accuracy = 100.0 * correct / len(test_loader.dataset)
#     print(f"Loaded Model Test Accuracy: {accuracy:.2f}%")
#
# # 运行程序
# if __name__ == "__main__":
#     main()
#     load_and_test_model()
 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值