x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.out(x)
return output, x # return x for visualization
cnn = CNN()
print(cnn) # net architecture
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
following function (plot_with_labels) is for visualization, can be ignored if not interested
from matplotlib import cm
try: from sklearn.manifold import TSNE; HAS_SK = True
except: HAS_SK = False; print(‘Please install sklearn for layer visualization’)
def plot_with_labels(lowDWeights, labels):
plt.cla()
X, Y = lowDWeights[:, 0], lowDWeights[:, 1]
for x, y, s in zip(X, Y, labels):
c = cm.rainbow(int(255 * s / 9)); plt.text(x, y, s, backgroundcolor=c, fontsize=9)
plt.xlim(X.min(), X.max()); plt.ylim(Y.min(), Y.max()); plt.title(‘Visualize last layer’); plt.show(); plt.pause(0.01)
plt.ion()
training and testing
for epoch in range(EPOCH):
losses = []
acc = []
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
output = cnn(b_x)[0] # cnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 50 == 0:
test_output, last_layer = cnn(test_x)
pred_y = torch.max(test_output, 1)[1].data.numpy()
accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))
print('Epoch: ', epoch, 'Step: ', step/50, ‘| train loss: %.4f’ % loss.data.numpy(), ‘| test accuracy: %.2f’ % accuracy)
losses.append(loss.data.numpy())
acc.append(accuracy)
if HAS_SK:
# Visualization of trained flatten layer (T-SNE)
tsne = TSNE(perplexity=30, n_components=2, init=‘pca’, n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(last_layer.data.numpy()[:plot_only, :])
labels = test_y.numpy()[:plot_only]
plot_with_labels(low_dim_embs, labels)
plt.ioff()
plt.figure()
f, axes = plt.subplots(1, 1)
axes.clear()
axes.plot([x for x in range(24)], losses) ###range(1,11)
axes.set_xlabel(“训练步数”)
axes.set_ylabel(“损失值”)
axes.set_title(“MNIST”)
axes.set_ylim((0, max(losses))) ###1
axes.set_xlim((1, 24))
row_labels = [‘准确率:’]
col_labels = [‘数值’]
value = max(acc)
table_vals = [[‘{:.2f}%’.format(value*100)]]
row_colors = [‘gold’]
my_table = plt.table(cellText=table_vals, colWidths=[0.1] * 5,
rowLabels=row_labels, rowColours=row_colors, loc=‘best’)
plt.savefig(“CNN” + “.png”)
plt.show()
print 10 predictions from test data
test_output, _ = cnn(test_x[:10])
pred_y = torch.max(test_output, 1)[1].data.numpy()
print(pred_y, ‘prediction number’)
print(test_y[:10].numpy(), ‘real number’)
3.knn实现
from future import print_function
import os
third-party library
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import time
import matplotlib.pyplot as plt
localtime = time.asctime( time.localtime(time.time()) )
print(“本地时间为 :”, localtime)
torch.manual_seed(1) # reproducible
Hyper Parameters
EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 50
LR = 0.001 # learning rate
DOWNLOAD_MNIST = False
Mnist digits dataset
if not(os.path.exists(‘./mnist/’)) or not os.listdir(‘./mnist/’):
not mnist dir or mnist is empyt dir
DOWNLOAD_MNIST = True
train_data = torchvision.datasets.MNIST(
root=‘./mnist/’,
train=True, # this is training data
transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to
torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
download=DOWNLOAD_MNIST,
)
plot one example
print(train_data.train_data.size()) # (60000, 28, 28)
print(train_data.train_labels.size()) # (60000)
plt.imshow(train_data.train_data[0].numpy(), cmap=‘gray’)
plt.title(‘%i’ % train_data.train_labels[0])
plt.show()
pick 2000 samples to speed up testing
train_data = torchvision.datasets.MNIST