Statement
本代码未经系统测试及专业测评
示例1
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# 定义数据转换
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[.485, .456, .406], std=[.229, .224, .225])
])
# 加载数据集
train_dataset = datasets.ImageFolder('path/to/train/dataset', transform=transform)
test_dataset = datasets.ImageFolder('path/to/test/dataset', transform=transform)
# 定义模型
model = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(256 * 28 * 28, 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 2)
)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=.001)
# 训练模型
for epoch in range(10):
running_loss = .
for i, data in enumerate(train_dataset, ):
inputs, labels = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))
running_loss = .
# 测试模型
correct =
total =
with torch.no_grad():
for data in test_dataset:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size()
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))
示例2
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from PIL import Image
# 定义数据集类
class FaceDataset(Dataset):
def __init__(self, data_list, transform=None):
self.data_list = data_list
self.transform = transform
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
img_path, label = self.data_list[index]
img = Image.open(img_path).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, label
# 定义卷积神经网络模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(x)))
x = self.pool(torch.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义训练函数
def train(model, train_loader, criterion, optimizer):
model.train()
running_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader, 0):
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
return running_loss / len(train_loader)
# 定义测试函数
def test(model, test_loader, criterion):
model.eval()
correct = 0
total = 0
running_loss = 0.0
with torch.no_grad():
for i, (inputs, labels) in enumerate(test_loader, 0):
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
running_loss += loss.item()
return running_loss / len(test_loader), correct / total
# 定义主函数
def main():
# 定义数据预处理
transform = transforms.Compose([transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# 定义数据集
train_list = [('path/to/face1.jpg', 0), ('path/to/face2.jpg', 1), ...]
test_list = [('path/to/face3.jpg', 0), ('path/to/face4.jpg', 1), ...]
train_dataset = FaceDataset(train_list, transform=transform)
test_dataset = FaceDataset(test_list, transform=transform)
# 定义数据加载器
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=4, shuffle=False, num_workers=2)
# 定义模型、损失函数、优化器
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# 训练模型
for epoch in range(10):
train_loss = train(net, train_loader, criterion, optimizer)
test_loss, test_acc = test(net, test_loader, criterion)
print('Epoch %d, Train Loss: %.3f, Test Loss: %.3f, Test Acc: %.3f' % (epoch+1, train_loss, test_loss, test_acc))
# 保存模型
torch.save(net.state_dict(), 'path/to/model.pt')
if __name__ == '__main__':
main()
这个示例代码中,我们首先定义了一个数据集类 FaceDataset,它继承自 torch.utils.data.Dataset,并重写了__len__ 和 __ getitem__ 方法,用于获取数据集中的样本。然后我们定义了一个卷积神经网络模型 Net,它继承自 nn.Module,并实现了 forward 方法,用于前向计算。接着我们定义了训练函数 train 和测试函数 test,它们分别用于模型的训练和测试。最后我们定义了一个主函数 main,它首先定义了数据预处理和数据加载器,然后定义了模型、损失函数和优化器,接着训练模型并保存。在训练过程中,我们使用了交叉熵损失函数和随机梯度下降优化器,每个 epoch 训练和测试一次,并输出训练和测试的损失和准确率。
示例3
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import torch.optim as optim
# 定义数据预处理
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# 加载训练集和测试集
train_dataset = datasets.ImageFolder('./train', transform=transform)
test_dataset = datasets.ImageFolder('./test', transform=transform)
# 定义批量大小
batch_size = 8
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 定义模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 5, padding=2)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 5, padding=2)
self.fc1 = nn.Linear(64 * 56 * 56, 1024)
self.fc2 = nn.Linear(1024, 2)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(x)))
x = self.pool(torch.relu(self.conv2(x)))
x = x.view(-1, 64 * 56 * 56)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
# 实例化模型并定义损失函数和优化器
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# 训练网络
for epoch in range(10):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Finished Training')
# 测试网络
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))