简单回归问题
只使用numpy编写,代码里有注释
import numpy as np
# y = wx + b
# 计算均方误差
# 𝒍𝒐𝒔𝒔 = 𝑾𝑿 + 𝒃 − 𝒚
def compute_error_for_line_given_points(b, w, points):
totalError = 0
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
totalError += (y - (w * x + b)) ** 2
return totalError / float(len(points))
# 计算截距和斜率
# 𝒘′ = 𝒘 − 𝒍𝒓 ∗(𝛁𝒍𝒐𝒔𝒔/𝛁𝒘)
# 𝒍𝒐𝒔𝒔 = 𝑾𝑿 + 𝒃 − 𝒚
def step_gradient(b_current, w_current, points, learningRate):
b_gradient = 0
w_gradient = 0
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
b_gradient += -(2 / N) * (y - ((w_current * x) + b_current))
w_gradient += -(2 / N) * x * (y - ((w_current * x) + b_current))
new_b = b_current - (learningRate * b_gradient)
new_w = w_current - (learningRate * w_gradient)
return [new_b, new_w]
# 迭代更新截距和斜率
def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations):
b = starting_b
w = starting_w
for i in range(num_iterations):
b, w = step_gradient(b, w, np.array(points), learning_rate)
return [b, w]
def run():
points = np.genfromtxt("data.csv", delimiter=",")
learning_rate = 0.0001
initial_b = 0 # 初始化y截距
initial_w = 0 # 初始化斜率
num_iterations = 1000 # 迭代1000次
print("Starting gradient descent at b = {0}, w = {1}, error = {2}"
.format(initial_b, initial_w,
compute_error_for_line_given_points(initial_b, initial_w, points))
)
print("Running...")
[b, w] = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_iterations)
print("After {0} iterations b = {1}, w = {2}, error = {3}".
format(num_iterations, b, w,
compute_error_for_line_given_points(b, w, points))
)
if __name__ == '__main__':
run()
结果:
Starting gradient descent at b = 0, w = 0, error = 5565.107834483211
Running...
After 1000 iterations b = 0.08893651993741346, w = 1.4777440851894448, error = 112.61481011613473
手写数字识别
代码,有注释
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim
import torchvision
from matplotlib import pyplot as plt
from utils import plot_image, plot_curve, one_hot
batch_size = 512
# step1. load dataset
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('mnist_data', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=False)
# 在装载完成后,我们可以选取其中一个批次的数据进行预览
x, y = next(iter(train_loader))
print(x.shape, y.shape, x.min(), x.max())
plot_image(x, y, 'image sample')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# xw+b
self.fc1 = nn.Linear(28 * 28, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
# x: [b, 1, 28, 28]
# h1 = relu(xw1+b1)
x = F.relu(self.fc1(x))
# h2 = relu(h1w2+b2)
x = F.relu(self.fc2(x))
# h3 = h2w3+b3
x = self.fc3(x)
return x
net = Net()
# [w1, b1, w2, b2, w3, b3]
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
train_loss = []
for epoch in range(3):
for batch_idx, (x, y) in enumerate(train_loader):
# x: [b, 1, 28, 28], y: [512]
# [b, 1, 28, 28] => [b, 784]
x = x.view(x.size(0), 28 * 28) # 将前面多维度的tensor展平成一维,x.size(0)=b
# => [b, 10]
out = net(x)
# [b, 10]
y_onehot = one_hot(y)
# loss = mse(out, y_onehot)
loss = F.mse_loss(out, y_onehot)
# 先给梯度清0
optimizer.zero_grad()
loss.backward()
# w' = w - lr*grad
optimizer.step()
train_loss.append(loss.item())
if batch_idx % 10 == 0:
print(epoch, batch_idx, loss.item())
plot_curve(train_loss)
# we get optimal [w1, b1, w2, b2, w3, b3]
total_correct = 0
for x, y in test_loader:
x = x.view(x.size(0), 28 * 28)
out = net(x)
# out: [b, 10] => pred: [b]
pred = out.argmax(dim=1)
correct = pred.eq(y).sum().float().item()
total_correct += correct
total_num = len(test_loader.dataset)
acc = total_correct / total_num
print('test acc:', acc)
x, y = next(iter(test_loader))
out = net(x.view(x.size(0), 28 * 28))
pred = out.argmax(dim=1)
plot_image(x, pred, 'test')
结果:
torch.Size([512, 1, 28, 28]) torch.Size([512]) tensor(-0.4242) tensor(2.8215)
0 0 0.10631299018859863
0 10 0.09327627718448639
0 20 0.08357524871826172
0 30 0.07842864841222763
0 40 0.07161600142717361
0 50 0.06780468672513962
0 60 0.06343945860862732
0 70 0.061706673353910446
0 80 0.056804824620485306
0 90 0.05541609972715378
0 100 0.052139125764369965
0 110 0.05142538994550705
1 0 0.05038641765713692
1 10 0.04735875129699707
1 20 0.04581252112984657
1 30 0.046893153339624405
1 40 0.04484379291534424
1 50 0.045751187950372696
1 60 0.044139545410871506
1 70 0.04007252678275108
1 80 0.04167523235082626
1 90 0.041462428867816925
1 100 0.039175406098365784
1 110 0.0402270182967186
2 0 0.04034041985869408
2 10 0.03800373896956444
2 20 0.03578241541981697
2 30 0.03599751740694046
2 40 0.03725225850939751
2 50 0.034895461052656174
2 60 0.03328505530953407
2 70 0.033005014061927795
2 80 0.036702465265989304
2 90 0.03656462952494621
2 100 0.03397378325462341
2 110 0.030807316303253174
test acc: 0.8842
精度不高,这简单的使用梯度下降,而且只有3层的神经网络,肯定不太理想,最后可以用CNN卷积神经网络模型,后期测试精度可以达到99.7%