import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data import Dataset
import os
import glob
import matplotlib.pyplot as plt
import numpy as np
imgs_open = glob.glob('datasets/openRightEyes/*.jpg')
imgs_closed = glob.glob('datasets/closedRightEyes/*.jpg')
batch_size = 50
iterations = 20
class Train_Dataset(Dataset):
def __init__(self):
self.images = []
self.labels = []
for i in range(0, 1000):
image = plt.imread(imgs_open[i])
image = np.reshape(image, [1, 24, 24])
self.images.append(image)
self.labels.append(0)
for i in range(0, 1000):
image = plt.imread(imgs_closed[i])
image = np.reshape(image, [1, 24, 24])
self.images.append(image)
self.labels.append(1)
self.images = np.array(self.images, dtype=np.float32)
self.labels = np.array(self.labels, dtype=np.int64)
self.images = torch.from_numpy(self.images)
self.labels = torch.from_numpy(self.labels)
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return 2000
class Test_Dataset(Dataset):
def __init__(self):
self.images = []
self.labels = []
for i in range(1000, len(imgs_open)):
image = plt.imread(imgs_open[i])
image = np.reshape(image, [1, 24, 24])
self.images.append(image)
self.labels.append(0)
for i in range(1000, len(imgs_closed)):
image = plt.imread(imgs_closed[i])
image = np.reshape(image, [1, 24, 24])
self.images.append(image)
self.labels.append(1)
self.images = np.array(self.images, dtype=np.float32)
self.labels = np.array(self.labels, dtype=np.int64)
self.images = torch.from_numpy(self.images)
self.labels = torch.from_numpy(self.labels)
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return 423
train_dataset = Train_Dataset()
test_dataset = Test_Dataset()
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 输入1通道,输出32通道,kernel 3*3
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 50, kernel_size=3, padding=1)
self.mp = nn.MaxPool2d(2)
# fully connect
self.fc1 = nn.Linear(1800, 1024)
self.fc2 = nn.Linear(1024, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.mp(x)
x = F.relu(self.conv2(x))
x = self.mp(x)
b_n = x.size(0)
x = x.view(b_n, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x)
net = Net()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for i in range(iterations):
#Train
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = net(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
pred = output.data.max(1, keepdim=True)[1]
accuracy_ = pred.eq(target.data.view_as(pred)).float()
accuracy = accuracy_.mean()
if batch_idx % 5 == 0:
print('Train Epoch: %d [%d/%d ] %f \tTrainLoss: %f\tTrainAcc:%f'%(
i, batch_idx * len(data), len(train_loader.dataset),
1.0 * batch_idx / len(train_loader), loss.data, accuracy))
# Test
accuracy_all = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = Variable(data), Variable(target)
output = net(data)
pred = output.data.max(1, keepdim=True)[1]
accuracy_ = pred.eq(target.data.view_as(pred))
accuracy = accuracy_.sum()
accuracy_all += accuracy
x1 = len(test_loader.dataset)
accuracy_mean = accuracy_all.float()/len(test_loader.dataset)
print('Train Epoch: %d\tTestAcc: %f'%(
i, accuracy_mean))
数据集已经上传到了我的CSDN的资料库里