感知机:
二分类模型,无法处理XOR函数
训练感知机:
初始化w=0,b=0
Repeat:
If yi*[<w,xi>+b]<=0 then
W <- w+yi*xi and b<-b+yi
End if
直到所有的类分类正确
等价于批量大小为1的梯度下降
多层感知机:
单隐藏层-----单分类
h=σ(
w
1
x
+
b
1
w_1x+b_1
w1x+b1)
o=
w
2
T
h
+
b
2
w^T_2h+b_2
w2Th+b2
隐藏层不能使用线性激活函数,需要激活函数σ,按元素做运算
激活函数
sigmoid(x)=
1
1
+
e
x
p
(
−
x
)
\frac{1}{1+exp(-x)}
1+exp(−x)1
将输入投影到(0,1)
tanh(x)=
1
−
e
x
p
(
−
2
x
)
1
+
e
x
p
(
−
2
x
)
\frac{1-exp(-2x)}{1+exp(-2x)}
1+exp(−2x)1−exp(−2x)
将输入投影到(-1,1)
RelU(x)=max(x,0)最常用也是速度最快的
多类分类:
与sooftmax相似
多隐藏层:
h
1
h_1
h1=σ(
w
1
x
+
b
1
w_1x+b_1
w1x+b1)
h
2
h_2
h2=σ(
w
2
h
1
+
b
2
w_2h_1+b_2
w2h1+b2)
h
3
h_3
h3=σ(
w
3
h
2
+
b
3
w_3h_2+b_3
w3h2+b3)
o
o
o=
w
4
h
3
+
b
4
w_4h_3+b_4
w4h3+b4
以下是从头实现的代码,在上一节softmax的代码基础上,修改了一部分 的算法,其余 的部分都相同
import torch
from torch import nn
from d2l import torch as d2l
import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
from IPython import display
d2l.use_svg_display()
def get_dataloader_workers():
return 4
def load_data_fashion_mnist(batch_size,resize=None):
trans=[transforms.ToTensor()]
if resize:
trans.insert(0,transforms.Resize(resize))
trans=transforms.Compose(trans)
mnist_train=torchvision.datasets.FashionMNIST(root="./data",train=True,transform=trans,download=False)
mnist_test=torchvision.datasets.FashionMNIST(root="./data",train=False,transform=trans,download=False)
return (data.DataLoader(mnist_train,batch_size,shuffle=True,num_workers=get_dataloader_workers()),
data.DataLoader(mnist_test,batch_size,shuffle=False,num_workers=get_dataloader_workers()))
batch_size=256
train_iter,test_iter=load_data_fashion_mnist(batch_size)
num_inputs,num_outputs,num_hiddens=784,10,256
#设置隐藏层参数
w1=nn.Parameter(torch.randn(num_inputs,num_hiddens,requires_grad=True))
b1=nn.Parameter(torch.zeros(num_hiddens,requires_grad=True))
w2=nn.Parameter(torch.randn(num_hiddens,num_outputs,requires_grad=True))
b2=nn.Parameter(torch.zeros(num_outputs,requires_grad=True))
params=[w1,b1,w2,b2]
#relu函数
def relu(x):
a=torch.zeros_like(x)
return torch.max(x,a)
#实现模型
def net(x):
x=x.reshape((-1,num_inputs))
H=relu(x @ w1+b1)
return (H @ w2+b2)
loss=nn.CrossEntropyLoss()
def accuracy(y_hat,y):#找出预测正确的样本
if(len(y_hat.shape)>1 and y_hat.shape[1]>1):#输入是矩阵
y_hat=y_hat.argmax(axis=1)#将预测结果概率值最大的值取出
cmp=y_hat.type(y.dtype)==y#y_hat转化为y的类型
return float(cmp.type(y.dtype).sum())#求和
class Accumulator:#累加器
def __init__(self,n):
self.data=[0.0]*n
def add(self,*args):
self.data=[a+float(b) for a,b in zip(self.data,args)]
def reset(self):
self.data=[0.0]*len(self.data)
def __getitem__(self,idx):
return self.data[idx]
def evaluate_accuracy(net,data_iter):
if isinstance(net,torch.nn.Module):
net.eval()#将模型设置成评估模式
metric=Accumulator(2)#正确预测数,预测总数
for x,y in data_iter:
metric.add(accuracy(net(x),y),y.numel())
return metric[0]/metric[1]
def train_epoch_ch3(net,train_iter,loss,updater):
if isinstance(net,torch.nn.Module):
net.train()
metric=Accumulator(3)#大小为3的累加器储存数据
for x,y in train_iter:
y_hat=net(x)
l=loss(y_hat,y)
if isinstance(updater,torch.optim.Optimizer):#两种实现
updater.zero_grad()
l.backward()
updater.step()
metric.add(float(l)*len(y),accuracy(y_hat,y),y.size().numel())#记录分类
else:
l.sum().backward()
updater(x.shape[0])
metric.add(float(l.sum()),accuracy(y_hat,y),y.numel())
return metric[0]/metric[2],metric[1]/metric[2]#正确与错误的概率
class Animator:#不重要,实时显示用的
def __init__(self,xlabel=None,ylabel=None,legend=None,xlim=None,ylim=None,xscale='linear',yscale='linear',
fmts=('-','m--','g-.','r:'),nrows=1,ncols=1,figsize=(3.5,2.5)):
if legend is None:
legend=[]
d2l.use_svg_display()
self.fig,self.axes=d2l.plt.subplots(nrows,ncols,figsize=figsize)
if nrows*ncols==1:
self.axes=[self.axes,]
self.config_axes=lambda:d2l.set_axes(self.axes[0],xlabel,ylabel,xlim,ylim,xscale,yscale,legend)
self.x,self.y,self.fmts=None,None,fmts
def add(self,x,y):
if not hasattr(y,"_len_"):
y=[y]
n=len(y)
if not hasattr(x,"_len_"):
x=[x]*n
if not self.x:
self.x=[[] for _ in range(n)]
if not self.y:
self.y=[[] for _ in range(n)]
for i,(a,b) in enumerate(zip(x,y)):
if a is not None and b is not None:
self.x[i].append(a)
self.y[i].append(b)
self.axes[0].cla()
for x,y,fmt in zip(self.x,self.y,self.fmts):
self.axes[0].plot(x,y,fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train_ch3(net,train_iter,test_iter,loss,num_epochs,updater):
animator=Animator(xlabel='epoch',xlim=[1,num_epochs],ylim=[0.3,0.9],legend=['trans loss','trans acc','test acc'])
for epoch in range(num_epochs):
train_metrics=train_epoch_ch3(net,train_iter,loss,updater)
test_acc=evaluate_accuracy(net,test_iter)
animator.add(epoch+1,train_metrics+(test_acc,))
train_loss,train_acc=train_metrics
assert train_loss<0.5,train_loss
assert train_acc<=1 and train_acc>0.7, train_acc
assert test_acc<=1 and test_acc>0.7,test_acc
num_epochs,lr=10,0.1
updater=torch.optim.SGD(params,lr=lr)
d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,updater)
不知道 为啥train loss一开始突破天际了,但是老师 视频里面是正常的,然后别人说训练更多次数就可以了
大概是我用的数据集和老师的版本不一样之类的?
但是感知机思路是这样子的没错
简单实现是把net和relu那两个函数改一改
net=nn.Sequential(nn.Flatten(),nn.Linear(784,256),nn.ReLU(),nn.Linear(256,10))
def init_weight(m):
if type(m)==nn.Linear:
nn.init.normall_(m.weight,std=0.01)
net.apply(init_weight)