for epoch in range(n_epochs):
for step, batch in enumerate(trnloader):
tr_loss += loss.item()
epoch_loss = tr_loss / len(trnloader) ```
> 链接:https://www.zhihu.com/question/335724798/answer/1369014657
for epoch in range(epochs):
loss = 0
for batch_features, _ in train_loader:
# reshape mini-batch data to [N, 784] matrix
# load it to the active device
batch_features = batch_features.view(-1, 784).to(device)
# reset the gradients back to zero
# PyTorch accumulates gradients on subsequent backward passes
optimizer.zero_grad()
# compute reconstructions
outputs = model(batch_features)
# compute training reconstruction loss
train_loss = criterion(outputs, batch_features)
# compute accumulated gradients
train_loss.backward()
# perform parameter update based on current gradients
optimizer.step()
# add the mini-batch training loss to epoch loss
loss += train_loss.item()
# compute the epoch training loss
loss = loss / len(train_loader)
# display the epoch training loss
print("epoch : {}/{}, loss = {:.6f}".format(epoch + 1, epochs, loss))
from https://medium.com/pytorch/implementing-an-autoencoder-in-pytorch-19baa22647d1
# statistics
running_loss += loss.item() * inputs.size(0)
scheduler.step()
epoch_loss = running_loss / dataset_sizes
print(f' Loss: {epoch_loss:.4f} ')
https://androidkt.com/how-to-calculate-running-loss-using-loss-item-in-pytorch/
Default reduction in Pytorch Loss calculates the mean loss value for
the batch. If you set reduction=‘sum’, you should get the same loss.
However, if you need the loss for each batch, just disable the
reduction via reduction=‘none’ .https://discuss.pytorch.org/t/compute-loss-for-each-batch/56673