Pytorch极简入门教程(四)—— 逻辑回归与数据处理

import torch
import pandas as pd
import numpy as np
import matplotlib as plt
from torch import nn

data = pd.read_csv("dataset/credit-a.csv", header=None)
print(data.info())
data.head()
# 查看前5行的数据
print("data.head():\t", data.head())
X = data.iloc[:, :-1]

Y = data.iloc[:, -1]
# 对于一维数组或者列表,unique函数去除其中重复的元素,并按元素由大到小返回一个新的无元素重复的元组或者列表

print("Y.unique:\t", np.unique(Y)) # Y.unique() = np.unique()
# X.values 和 Y.values
b = X.values
X = torch.from_numpy(X.values).type(torch.float32)
print("X.shape:\t", X.shape)
print("X.size():\t", X.size())

c = Y.values.reshape(-1, 1)
# torch中转换类型用.type()
Y = torch.from_numpy(Y.values).type(torch.float32)
print("Y.values")

# Sequential()将多个层合并在一起 适合顺序连接
model = nn.Sequential(
    nn.Linear(15, 1), # input: 15 output:1
    nn.Sigmoid()
)
print("model", model)
# 二元交叉熵函数
loss_fn = nn.BCELoss()
# 优化器
opt = torch.optim.Adam(model.parameters(), lr=0.0001)

# 训练
"""
mimi-batch训练,

"""
batches = 16
no_of_batch = 653//16 # 全部数据训练完,需要的批次
epoches = 1000  # 一个epoch:把全部数据集训练一遍
for epoch in range(epoches):    # 训练1000个epoches
    for i in range(no_of_batch): # 批次训练
        start = i*batches           # i=0, 1, 2,3   start=0, 16, 32, 48
        end = start+batches         # end=16, 32, 48, 64
        x = X[start: end]
        y = Y[start: end]
        y_pred = model(x)
        loss = loss_fn(y_pred, y)   # def forward(self, input, target):
        opt.zero_grad()             # 将优化器中每一个梯度置零
        loss.backward()             # 进行反向传播,计算每一个变量的梯度
        opt.step()                  # 每一个参数进行优化

model.state_dict() # sigmoid(w1*x1 + w2*x2 + w3*x3 + ... + w15*x15 + b)
print("model.state_dict():\t", model.state_dict())
#astype("int") 转换成int型
predict = ((model(x).data.numpy() > 0.5).astype("int") == Y.numpy()).mean()
print("predict:\t", predict)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 653 entries, 0 to 652
Data columns (total 16 columns):
 #   Column  Non-Null Count  Dtype  
---  ------  --------------  -----  
 0   0       653 non-null    int64  
 1   1       653 non-null    float64
 2   2       653 non-null    float64
 3   3       653 non-null    int64  
 4   4       653 non-null    int64  
 5   5       653 non-null    int64  
 6   6       653 non-null    int64  
 7   7       653 non-null    float64
 8   8       653 non-null    int64  
 9   9       653 non-null    int64  
 10  10      653 non-null    int64  
 11  11      653 non-null    int64  
 12  12      653 non-null    int64  
 13  13      653 non-null    int64  
 14  14      653 non-null    float64
 15  15      653 non-null    int64  
dtypes: float64(4), int64(12)
memory usage: 81.8 KB
None
data.head():	    0      1      2   3   4   5   6     7   8   9   10  11  12   13     14  15
0   0  30.83  0.000   0   0   9   0  1.25   0   0   1   1   0  202    0.0  -1
1   1  58.67  4.460   0   0   8   1  3.04   0   0   6   1   0   43  560.0  -1
2   1  24.50  0.500   0   0   8   1  1.50   0   1   0   1   0  280  824.0  -1
3   0  27.83  1.540   0   0   9   0  3.75   0   0   5   0   0  100    3.0  -1
4   0  20.17  5.625   0   0   9   0  1.71   0   1   0   1   2  120    0.0  -1
Y.unique:	 [-1  1]
X.shape:	 torch.Size([653, 15])
X.size():	 torch.Size([653, 15])
Y.values
model Sequential(
  (0): Linear(in_features=15, out_features=1, bias=True)
  (1): Sigmoid()
)
E:\Professional Software\Anconda\envs\pytracking\lib\site-packages\torch\nn\modules\loss.py:498: UserWarning: Using a target size (torch.Size([16])) that is different to the input size (torch.Size([16, 1])) is deprecated. Please ensure they have the same size.
  return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
model.state_dict():	 OrderedDict([('0.weight', tensor([[ 0.1030, -0.0765, -0.2319,  0.5685,  0.5918, -0.0065,  0.5697, -0.9154,
          2.0562,  0.7506, -1.4317,  0.5080,  0.0578,  0.0038, -0.0282]])), ('0.bias', tensor([0.1462]))])
predict:	 0.20501531393568148
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值