import torch.nn
import torch.nn.functional as F
x_data = torch.Tensor([[1.0],[2.0],[3.0]])
y_data = torch.Tensor([[0.0],[0.0],[1.0]])
class LogisticRegressionModel(torch.nn.Module):
def __init__(self):
super(LogisticRegressionModel, self).__init__()
self.linear = torch.nn.Linear(1,1)
def forward(self,x):
y_pred = F.sigmoid(self.linear(x))
return y_pred
model = LogisticRegressionModel()
criterion = torch.nn.BCELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)
for epoch in range(1000):
y_pred = model(x_data)
loss = criterion(y_pred,y_data)
print(epoch,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()




import numpy as np
import torch
xy = np.loadtxt('diabetes.csv.gz',delimiter=',',dtype=np.float32)
x_data = torch.from_numpy(xy[:,:-1])
y_data = torch.from_numpy(xy[:,[-1]])
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear1 = torch.nn.Linear(8,6)
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
import numpy as np
import torch
xy = np.loadtxt('diabetes.csv.gz',delimiter=',',dtype=np.float32)
x_data = torch.from_numpy(xy[:,:-1])
y_data = torch.from_numpy(xy[:,[-1]])
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear1 = torch.nn.Linear(8,6)
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
# construct loss and optimizer
criterion = torch.nn.BCELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
# Training Cycle
for epoch in range(100):
# Forward
y_pred = model(x_data)
# this program has not use Mini-Batch for training.
loss = criterion(y_pred,y_data)
print(epoch,loss.item())
#backward
optimizer.zero_grad()
loss.backward()
# update
optimizer.step()

D:\soft\pycharm\pro\venv\Scripts\python.exe D:/soft/pycharm/pro/op/f1.py
D:\soft\pycharm\pro\venv\lib\site-packages\torch\nn\_reduction.py:42: UserWarning: size_average and reduce args will be deprecated, please use reduction='mean' instead.
warnings.warn(warning.format(ret))
0 0.8402661681175232
1 0.8195154070854187
2 0.8008943200111389
3 0.784209668636322
4 0.7692796587944031
5 0.7559344172477722
6 0.7440169453620911
7 0.7333827018737793
8 0.723899245262146
9 0.7154463529586792
10 0.7079148292541504
11 0.7012061476707458
12 0.6952313780784607
13 0.6899109482765198
14 0.6851732730865479
15 0.6809546947479248
16 0.6771978735923767
17 0.6738520860671997
18 0.6708719730377197
19 0.6682170629501343
20 0.6658517122268677
21 0.6637434363365173
22 0.6618644595146179
23 0.6601890921592712
24 0.6586951017379761
25 0.6573625206947327
26 0.656173586845398
27 0.655112624168396
28 0.6541655659675598
29 0.6533200144767761
30 0.6525649428367615
31 0.6518905162811279
32 0.6512879133224487
33 0.6507494449615479
34 0.6502680778503418
35 0.6498377919197083
36 0.6494529247283936
37 0.6491087079048157
38 0.648800790309906
39 0.6485252976417542
40 0.6482785940170288
41 0.6480578184127808
42 0.6478601098060608
43 0.6476830840110779
44 0.6475244760513306
45 0.6473823189735413
46 0.6472548842430115
47 0.6471408605575562
48 0.6470383405685425
49 0.6469465494155884
50 0.6468641757965088
51 0.6467902064323425
52 0.6467238664627075
53 0.6466642618179321
54 0.646610677242279
55 0.6465626358985901
56 0.6465193629264832
57 0.6464804410934448
58 0.6464454531669617
59 0.6464139819145203
60 0.6463856101036072
61 0.6463600397109985
62 0.6463369727134705
63 0.6463161706924438
64 0.6462973952293396
65 0.6462804675102234
66 0.6462650299072266
67 0.6462511420249939
68 0.6462385058403015
69 0.6462270617485046
70 0.6462166905403137
71 0.6462072730064392
72 0.6461986303329468
73 0.6461907625198364
74 0.6461836099624634
75 0.6461770534515381
76 0.6461710333824158
77 0.6461654305458069
78 0.646160364151001
79 0.646155595779419
80 0.6461513638496399
81 0.6461472511291504
82 0.6461434364318848
83 0.6461400389671326
84 0.6461367011070251
85 0.6461336016654968
86 0.6461307406425476
87 0.6461280584335327
88 0.6461254954338074
89 0.6461230516433716
90 0.6461207866668701
91 0.6461185216903687
92 0.6461164355278015
93 0.6461144089698792
94 0.6461124420166016
95 0.6461105942726135
96 0.6461087465286255
97 0.646107017993927
98 0.6461053490638733
99 0.6461036205291748
进程已结束,退出代码0
import numpy as np
import torch
xy = np.loadtxt('diabetes.csv.gz',delimiter=',',dtype=np.float32)
x_data = torch.from_numpy(xy[:,:-1])
y_data = torch.from_numpy(xy[:,[-1]])
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear1 = torch.nn.Linear(8,6)
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
# can try different active funtion
# forexample: self.activate = torch.nn. ReLU()
def forward(self,x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
# construct loss and optimizer
criterion = torch.nn.BCELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
# Training Cycle
for epoch in range(100):
# Forward
y_pred = model(x_data)
# this program has not use Mini-Batch for training.
loss = criterion(y_pred,y_data)
print(epoch,loss.item())
#backward
optimizer.zero_grad()
loss.backward()
# update
optimizer.step()
135

被折叠的 条评论
为什么被折叠?



