import torch
import numpy as np
import re
import torch. nn as nn
import torch. optim as optim
读取数据
house_data= [ ]
with open ( "./housing.data" ) as f:
data= f. readlines( )
for line in data:
out= re. sub( r"\s{2,}" , " " , line) . strip( )
house_data. append( out. split( " " ) )
house_data= np. array( house_data) . astype( np. float32)
house_data, house_data. shape
(array([[6.3200e-03, 1.8000e+01, 2.3100e+00, ..., 3.9690e+02, 4.9800e+00,
2.4000e+01],
[2.7310e-02, 0.0000e+00, 7.0700e+00, ..., 3.9690e+02, 9.1400e+00,
2.1600e+01],
[2.7290e-02, 0.0000e+00, 7.0700e+00, ..., 3.9283e+02, 4.0300e+00,
3.4700e+01],
...,
[6.0760e-02, 0.0000e+00, 1.1930e+01, ..., 3.9690e+02, 5.6400e+00,
2.3900e+01],
[1.0959e-01, 0.0000e+00, 1.1930e+01, ..., 3.9345e+02, 6.4800e+00,
2.2000e+01],
[4.7410e-02, 0.0000e+00, 1.1930e+01, ..., 3.9690e+02, 7.8800e+00,
1.1900e+01]], dtype=float32),
(506, 14))
划分数据集
x= house_data[ : , 0 : - 1 ]
y= house_data[ : , - 1 ]
x_train, y_train, x_test, y_test= x[ : 496 , : ] , y[ 0 : 496 ] , x[ 496 : , : ] , y[ 496 : ]
x_test. shape, y_test
((10, 13),
array([19.7, 18.3, 21.2, 17.5, 16.8, 22.4, 20.6, 23.9, 22. , 11.9],
dtype=float32))
构建模型
device= torch. device( "cuda" if torch. cuda. is_available( ) else "cpu" )
class Net ( nn. Module) :
def __init__ ( self, inputs, outputs) :
super ( Net, self) . __init__( )
self. hidden= nn. Linear( inputs, 100 )
self. predict= nn. Linear( 100 , outputs)
def forward ( self, x) :
x= self. hidden( x)
out= torch. relu( self. predict( x) )
return out
model= Net( 13 , 1 )
model= model. to( device)
model
Net(
(hidden): Linear(in_features=13, out_features=100, bias=True)
(predict): Linear(in_features=100, out_features=1, bias=True)
)
定义损失函数与优化器
loss_func= nn. MSELoss( )
optim= optim. Adam( model. parameters( ) , lr= 0.00001 )
数据输入模型中进行训练
epochs= 1000
acc_loss= [ ]
test_loss= [ ]
for i in range ( epochs) :
x_data= torch. tensor( x_train, dtype= torch. float32) . to( device)
y_data= torch. tensor( y_train, dtype= torch. float32) . to( device)
pred= model. forward( x_data)
pred= torch. squeeze( pred)
loss= loss_func( pred, y_data)
acc_loss. append( loss)
optim. zero_grad( )
loss. backward( )
optim. step( )
x_data= torch. tensor( x_test, dtype= torch. float32) . to( device)
y_data= torch. tensor( y_test, dtype= torch. float32) . to( device)
pred= model. forward( x_data)
pred= torch. squeeze( pred)
loss_test= loss_func( pred, y_data)
test_loss. append( loss_test)
if ( i+ 1 ) % 10 == 0 :
print ( f"epochs[ { i+ 1 } /1000],loss: { loss. item( ) : .4f } ,test_loss: { loss_test. item( ) : .4f } " )
epochs[10/1000],loss:595.3217,test_loss:388.3850
epochs[20/1000],loss:589.6299,test_loss:388.3850
epochs[30/1000],loss:585.1714,test_loss:388.3850
epochs[40/1000],loss:581.7561,test_loss:388.3850
epochs[50/1000],loss:579.0131,test_loss:388.3850
epochs[60/1000],loss:576.6849,test_loss:388.3850
epochs[70/1000],loss:574.5705,test_loss:388.3850
epochs[80/1000],loss:572.6195,test_loss:388.3850
epochs[90/1000],loss:570.7507,test_loss:388.3850
epochs[100/1000],loss:568.9516,test_loss:388.3850
epochs[110/1000],loss:567.2042,test_loss:388.3850
epochs[120/1000],loss:565.4830,test_loss:388.3850
epochs[130/1000],loss:563.7838,test_loss:388.3850
epochs[140/1000],loss:562.0828,test_loss:388.3850
epochs[150/1000],loss:560.3568,test_loss:388.3850
epochs[160/1000],loss:558.5772,test_loss:388.3850
epochs[170/1000],loss:556.4716,test_loss:388.3850
epochs[180/1000],loss:553.7165,test_loss:388.3850
epochs[190/1000],loss:549.7405,test_loss:388.3850
epochs[200/1000],loss:544.6332,test_loss:382.1937
epochs[210/1000],loss:538.4864,test_loss:357.3943
epochs[220/1000],loss:531.9700,test_loss:335.0782
epochs[230/1000],loss:525.4945,test_loss:317.0957
epochs[240/1000],loss:514.0722,test_loss:296.1143
epochs[250/1000],loss:493.4310,test_loss:255.5118
epochs[260/1000],loss:459.2870,test_loss:182.6697
epochs[270/1000],loss:425.1219,test_loss:126.4568
epochs[280/1000],loss:392.8506,test_loss:88.6110
epochs[290/1000],loss:367.0310,test_loss:67.3971
epochs[300/1000],loss:345.1929,test_loss:56.1764
epochs[310/1000],loss:325.9217,test_loss:49.6663
epochs[320/1000],loss:308.6973,test_loss:45.1484
epochs[330/1000],loss:293.1958,test_loss:41.4564
epochs[340/1000],loss:279.1291,test_loss:38.1813
epochs[350/1000],loss:266.2664,test_loss:35.2274
epochs[360/1000],loss:254.4335,test_loss:32.5905
epochs[370/1000],loss:243.4969,test_loss:30.2693
epochs[380/1000],loss:233.3503,test_loss:28.2452
epochs[390/1000],loss:223.9073,test_loss:26.4883
epochs[400/1000],loss:215.0960,test_loss:24.9657
epochs[410/1000],loss:206.8561,test_loss:23.6476
epochs[420/1000],loss:199.1365,test_loss:22.5092
epochs[430/1000],loss:191.8931,test_loss:21.5302
epochs[440/1000],loss:185.0880,test_loss:20.6939
epochs[450/1000],loss:178.6874,test_loss:19.9862
epochs[460/1000],loss:172.6620,test_loss:19.3950
epochs[470/1000],loss:166.9856,test_loss:18.9096
epochs[480/1000],loss:161.6345,test_loss:18.5206
epochs[490/1000],loss:156.5875,test_loss:18.2194
epochs[500/1000],loss:151.8254,test_loss:17.9985
epochs[510/1000],loss:147.3306,test_loss:17.8510
epochs[520/1000],loss:143.0870,test_loss:17.7706
epochs[530/1000],loss:139.0799,test_loss:17.7516
epochs[540/1000],loss:135.2956,test_loss:17.7887
epochs[550/1000],loss:131.7216,test_loss:17.8773
epochs[560/1000],loss:128.3459,test_loss:18.0127
epochs[570/1000],loss:125.1577,test_loss:18.1910
epochs[580/1000],loss:122.1468,test_loss:18.4083
epochs[590/1000],loss:119.3036,test_loss:18.6610
epochs[600/1000],loss:116.6190,test_loss:18.9459
epochs[610/1000],loss:114.0846,test_loss:19.2597
epochs[620/1000],loss:111.6925,test_loss:19.5998
epochs[630/1000],loss:109.4350,test_loss:19.9631
epochs[640/1000],loss:107.3052,test_loss:20.3473
epochs[650/1000],loss:105.2962,test_loss:20.7500
epochs[660/1000],loss:103.4017,test_loss:21.1689
epochs[670/1000],loss:101.6156,test_loss:21.6019
epochs[680/1000],loss:99.9322,test_loss:22.0470
epochs[690/1000],loss:98.3460,test_loss:22.5024
epochs[700/1000],loss:96.8519,test_loss:22.9662
epochs[710/1000],loss:95.4449,test_loss:23.4369
epochs[720/1000],loss:94.1203,test_loss:23.9129
epochs[730/1000],loss:92.8736,test_loss:24.3927
epochs[740/1000],loss:91.7008,test_loss:24.8750
epochs[750/1000],loss:90.5976,test_loss:25.3585
epochs[760/1000],loss:89.5603,test_loss:25.8420
epochs[770/1000],loss:88.5853,test_loss:26.3245
epochs[780/1000],loss:87.6689,test_loss:26.8049
epochs[790/1000],loss:86.8080,test_loss:27.2822
epochs[800/1000],loss:85.9993,test_loss:27.7556
epochs[810/1000],loss:85.2399,test_loss:28.2242
epochs[820/1000],loss:84.5269,test_loss:28.6872
epochs[830/1000],loss:83.8577,test_loss:29.1441
epochs[840/1000],loss:83.2296,test_loss:29.5940
epochs[850/1000],loss:82.6402,test_loss:30.0365
epochs[860/1000],loss:82.0872,test_loss:30.4710
epochs[870/1000],loss:81.5683,test_loss:30.8970
epochs[880/1000],loss:81.0816,test_loss:31.3142
epochs[890/1000],loss:80.6249,test_loss:31.7221
epochs[900/1000],loss:80.1909,test_loss:32.0994
epochs[910/1000],loss:79.7756,test_loss:32.4198
epochs[920/1000],loss:79.3841,test_loss:32.7463
epochs[930/1000],loss:79.0159,test_loss:33.0976
epochs[940/1000],loss:78.6698,test_loss:33.4625
epochs[950/1000],loss:78.3446,test_loss:33.8245
epochs[960/1000],loss:78.0390,test_loss:34.1734
epochs[970/1000],loss:77.7517,test_loss:34.5071
epochs[980/1000],loss:77.4815,test_loss:34.8269
epochs[990/1000],loss:77.2273,test_loss:35.1353
epochs[1000/1000],loss:76.9880,test_loss:35.4333
画图
import matplotlib. pyplot as plt
test_loss= [ loss. cpu( ) . detach( ) . numpy( ) for loss in test_loss]
acc_loss= [ loss. cpu( ) . detach( ) . numpy( ) for loss in acc_loss]
保存模型并推理
import os
os. makedirs( "checkpoints/regression" )
torch. save( model, "checkpoints/regression/simple_model" )
model= torch. load( "checkpoints/regression/simple_model" )
x_data= torch. tensor( x_test, dtype= torch. float32) . to( device)
y_data= torch. tensor( y_test, dtype= torch. float32) . to( device)
pred= model. forward( x_data)
pred= torch. squeeze( pred)
loss_test= loss_func( pred, y_data)
print ( f"loss_test: { loss_test} " )
loss_test:35.433345794677734