nn.TrainOneStepCell 的用法
可以每次只训练一个 step。
使用 MindSpore 自带的数据集形式
import mindspore.dataset as ds
import numpy as np
import mindspore.nn as nn
from mindspore.common.initializer import Normal
class LinearNet(nn.Cell):
def __init__(self):
super().__init__()
self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))
def construct(self, x):
return self.fc(x)
def get_data(num):
for _ in range(num):
x = np.array(np.random.rand(num,1))
y = x*2 + 1
yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32)
def create_dataset(num_data, batch_size=1):
dataset = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
dataset = dataset.batch(batch_size)
return dataset
train_dataset = create_dataset(num_data=5)
# 实例化前向网络
net = LinearNet()
# 设定损失函数并连接前向网络与损失函数
crit = nn.MSELoss()
net_with_criterion = nn.WithLossCell(net, crit)
# 设定优化器
opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9)
# 定义训练网络
train_net = nn.TrainOneStepCell(net_with_criterion, opt)
# 设置网络为训练模式
train_net.set_train()
# 获取训练过程数据
epochs = 10
for epoch in range(epochs):
for d in train_dataset.create_dict_iterator():
result = train_net(d["data"], d["label"])
print(result)
5.1566205
3.5033524
3.7321503
3.0552125
2.6895077
2.5464785
2.111021
2.9512393
1.6424942
1.5643072
1.7818321
0.8898671
0.77646303
0.39888993
0.3576911
0.4943265
0.099797666
0.12688538
0.16317035
0.07054947
0.08752558
0.05382672
0.18979765
0.09188278
0.11046805
0.13484702
0.14702769
0.08012586
0.30975002
0.24582061
0.097704545
0.32322106
0.20437805
0.24186614
0.17246039
0.0821461
0.20730102
0.25953925
0.13468163
0.1144672
0.10252868
0.13516097
0.039622754
0.17106774
0.056725293
0.059788816
0.14130223
0.039064743
0.028482627
0.069743
不使用 MindSpore 自带的数据集形式
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.initializer import Normal
import mindspore as ms
def train_data(num):
x = np.array(np.random.rand(num,1))
y = x*2 + 1
return np.concatenate((x,y),axis=1)
class LinearNet(nn.Cell):
def __init__(self):
super().__init__()
self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))
def construct(self, x):
return self.fc(x)
# 实例化前向网络
net = LinearNet()
# 设定损失函数并连接前向网络与损失函数
crit = nn.MSELoss()
net_with_criterion = nn.WithLossCell(net, crit)
# 设定优化器
opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9)
# 定义训练网络
train_net = nn.TrainOneStepCell(net_with_criterion, opt)
# 设置网络为训练模式
train_net.set_train()
num = 5
train_data = train_data(num).reshape(num,2,1,1)
# 获取训练过程数据
epochs = 10
for epoch in range(epochs):
for data in train_data:
result = train_net(Tensor(data[0],dtype=ms.float32), Tensor(data[1],dtype=ms.float32))
print(result)
1.328676
6.8571067
4.870447
1.20143
2.2300339
0.83028597
4.847509
3.0727391
0.48210886
1.0133752
0.20800468
2.414973
1.2901465
0.041479196
0.21248206
0.000278302
0.84305495
0.31890616
0.04953823
6.864396e-05
0.14955498
0.1885423
0.023113843
0.2585973
0.08018551
0.36805892
0.022249248
0.0053038215
0.41212934
0.16867524
0.4671257
0.0019663011
0.018585904
0.4240828
0.16885154
0.4284188
0.0044514323
0.0084109595
0.33865896
0.11230515
0.32114375
0.02414243
1.9585277e-05
0.22992054
0.053520534
0.21287921
0.06707646
0.011384166
0.1432423
0.01808984