import numpy
import torch
import matplotlib.pyplot as plt
import numpy as np
print(torch.__version__)
#生成训练数据
def GenerateSamples(n):
x1 = torch.randn((2,n)) +2
x2 = torch.randn((2,n)) -2
y1 = torch.ones((n))
y2 = torch.zeros((n))
x = torch.cat((x1,x2),dim = 1)
y = torch.cat((y1,y2))
return x,y
X,Y = GenerateSamples(30)
print(X.shape)
plt.plot(X[0,Y==1],X[1,Y==1],'r+')
plt.plot(X[0,Y==0],X[1,Y==0],'bo')
plt.show()
#编写模型并训练
class Perceptron():
def __init__(self):
self.w =torch.zeros((1,2))
self.b=torch.zeros(1)
def __transfer__(self,x):
return self.w@x+self.b
def __update__(self,dw,db,lr):
self.w=self.w + lr * dw
self.b=self.b + lr * db
def __calc_loss__(self,Y,rho):
loss =-torch.log(rho[Y==1]).sum()-torch.log(1-rho[Y==0]).sum()
loss=loss/Y.shape[0]
return loss
def __backward__(self,Y,rho):
err=Y-rho
dw=err@X.T/Y.shape[0]
db=err.mean()
return dw,db
def predict(self,x):
z=self.__transfer__(x)
rho=torch.sigmoid(z)
return rho
def fit(self,X,Y,max_iter=500,lr=0.1):
n=X.shape[1]
for iter in range(max_iter):
rho=self.predict(X).squeeze()
loss=self.__calc_loss__(Y,rho)
print('iter=',iter,',loss=',loss.item())
dw,db =self.__backward__(Y,rho)
self.__update__(dw,db,lr)
Perceptron().fit(X,Y,500,0.1)
#测试模型
X_test,Y_test=GenerateSamples(20)
Y_hat=torch.where(Perceptron().predict(X_test).squeeze()>0.5,1,0)
acc=torch.mean((Y_hat==Y_test).to(torch.float32)).item()
print("测试集精度=",acc)